code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30 values | license stringclasses 15 values | size int64 3 1.01M |
|---|---|---|---|---|---|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""""Implementation of Spatial Transformer networks core components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from itertools import chain
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import basic
import tensorflow as tf
class GridWarper(base.AbstractModule):
"""Grid warper interface class.
An object implementing the `GridWarper` interface generates a reference grid
of feature points at construction time, and warps it via a parametric
transformation model, specified at run time by an input parameter Tensor.
Grid warpers must then implement a `create_features` function used to generate
the reference grid to be warped in the forward pass (according to a determined
warping model).
"""
def __init__(self, source_shape, output_shape, num_coeff, name, **kwargs):
"""Constructs a GridWarper module and initializes the source grid params.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
num_coeff: Number of coefficients parametrizing the grid warp.
For example, a 2D affine transformation will be defined by the 6
parameters populating the corresponding 2x3 affine matrix.
name: Name of Module.
**kwargs: Extra kwargs to be forwarded to the `create_features` function,
instantiating the source grid parameters.
Raises:
Error: If `len(output_shape) > len(source_shape)`.
TypeError: If `output_shape` and `source_shape` are not both iterable.
"""
super(GridWarper, self).__init__(name=name)
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
if len(self._output_shape) > len(self._source_shape):
raise base.Error('Output domain dimensionality ({}) must be equal or '
'smaller than source domain dimensionality ({})'
.format(len(self._output_shape),
len(self._source_shape)))
self._num_coeff = num_coeff
self._psi = self._create_features(**kwargs)
@abc.abstractmethod
def _create_features(self, **kwargs):
"""Generates matrix of features, of size `[num_coeff, num_points]`."""
pass
@property
def n_coeff(self):
"""Returns number of coefficients of warping function."""
return self._n_coeff
@property
def psi(self):
"""Returns a list of features used to compute the grid warp."""
return self._psi
@property
def source_shape(self):
"""Returns a tuple containing the shape of the source signal."""
return self._source_shape
@property
def output_shape(self):
"""Returns a tuple containing the shape of the output grid."""
return self._output_shape
def _create_affine_features(output_shape, source_shape):
"""Generates n-dimensional homogenous coordinates for a given grid definition.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
output_shape: Iterable of integers determining the shape of the grid to be
warped.
source_shape: Iterable of integers determining the domain of the signal to be
resampled.
Returns:
List of flattened numpy arrays of coordinates in range `[-1, 1]^N`, for
example:
```
[[x_0_0, .... , x_0_{n-1}],
....
[x_{M-1}_0, .... , x_{M-1}_{n-1}],
[x_{M}_0=0, .... , x_{M}_{n-1}=0],
...
[x_{N-1}_0=0, .... , x_{N-1}_{n-1}=0],
[1, ..., 1]]
```
where N is the dimensionality of the sampled space, M is the
dimensionality of the output space, i.e. 2 for images
and 3 for volumes, and n is the number of points in the output grid.
When the dimensionality of `output_shape` is smaller that that of
`source_shape` the last rows before [1, ..., 1] will be filled with 0.
"""
ranges = [np.linspace(-1, 1, x, dtype=np.float32)
for x in reversed(output_shape)]
psi = [x.reshape(-1) for x in np.meshgrid(*ranges, indexing='xy')]
dim_gap = len(source_shape) - len(output_shape)
for _ in xrange(dim_gap):
psi.append(np.zeros_like(psi[0], dtype=np.float32))
psi.append(np.ones_like(psi[0], dtype=np.float32))
return psi
class AffineGridWarper(GridWarper):
"""Affine Grid Warper class.
The affine grid warper generates a reference grid of n-dimensional points
and warps it via an affine transormation model determined by an input
parameter Tensor. Some of the transformation parameters can be fixed at
construction time via an `AffineWarpConstraints` object.
"""
def __init__(self,
source_shape,
output_shape,
constraints=None,
name='affine_grid_warper'):
"""Constructs an AffineGridWarper.
`source_shape` and `output_shape` are used to define the size of the source
and output signal domains, as opposed to the shape of the respective
Tensors. For example, for an image of size `width=W` and `height=H`,
`{source,output}_shape=[H, W]`; for a volume of size `width=W`, `height=H`
and `depth=D`, `{source,output}_shape=[H, W, D]`.
Args:
source_shape: Iterable of integers determining the size of the source
signal domain.
output_shape: Iterable of integers determining the size of the destination
resampled signal domain.
constraints: Either a double list of shape `[N, N+1]` defining constraints
on the entries of a matrix defining an affine transformation in N
dimensions, or an `AffineWarpConstraints` object. If the double list is
passed, a numeric value bakes in a constraint on the corresponding
entry in the tranformation matrix, whereas `None` implies that the
corresponding entry will be specified at run time.
name: Name of module.
Raises:
Error: If constraints fully define the affine transformation; or if
input grid shape and contraints have different dimensionality.
TypeError: If output_shape and source_shape are not both iterable.
"""
self._source_shape = tuple(source_shape)
self._output_shape = tuple(output_shape)
num_dim = len(source_shape)
if isinstance(constraints, AffineWarpConstraints):
self._constraints = constraints
elif constraints is None:
self._constraints = AffineWarpConstraints.no_constraints(num_dim)
else:
self._constraints = AffineWarpConstraints(constraints=constraints)
if self._constraints.num_free_params == 0:
raise base.Error('Transformation is fully constrained.')
if self._constraints.num_dim != num_dim:
raise base.Error('Incompatible set of constraints provided: '
'input grid shape and constraints have different '
'dimensionality.')
super(AffineGridWarper, self).__init__(source_shape=source_shape,
output_shape=output_shape,
num_coeff=6,
name=name,
constraints=self._constraints)
def _create_features(self, constraints):
"""Creates all the matrices needed to compute the output warped grids."""
affine_warp_constraints = constraints
if not isinstance(affine_warp_constraints, AffineWarpConstraints):
affine_warp_constraints = AffineWarpConstraints(affine_warp_constraints)
mask = affine_warp_constraints.mask
psi = _create_affine_features(output_shape=self._output_shape,
source_shape=self._source_shape)
scales = [(x - 1.0) * .5 for x in reversed(self._source_shape)]
offsets = scales
# Transforming a point x's i-th coordinate via an affine transformation
# is performed via the following dot product:
#
# x_i' = s_i * (T_i * x) + t_i (1)
#
# where Ti is the i-th row of an affine matrix, and the scalars s_i and t_i
# define a decentering and global scaling into the source space.
# In the AffineGridWarper some of the entries of Ti are provided via the
# input, some others are instead fixed, according to the constraints
# assigned in the constructor.
# In create_features the internal dot product (1) is accordingly broken down
# into two parts:
#
# x_i' = Ti[uncon_i] * x[uncon_i, :] + offset(con_var) (2)
#
# i.e. the sum of the dot product of the free parameters (coming
# from the input) indexed by uncond_i and an offset obtained by
# precomputing the fixed part of (1) according to the constraints.
# This step is implemented by analyzing row by row the constraints matrix
# and saving into a list the x[uncon_i] and offset(con_var) data matrices
# for each output dimension.
features = []
for row, scale in zip(mask, scales):
x_i = np.array([x for x, is_active in zip(psi, row) if is_active])
features.append(x_i * scale if len(x_i) else None)
for row_i, row in enumerate(mask):
x_i = None
s = scales[row_i]
for i, is_active in enumerate(row):
if is_active:
continue
# In principle a whole row of the affine matrix can be fully
# constrained. In that case the corresponding dot product between input
# parameters and grid coordinates doesn't need to be implemented in the
# computation graph since it can be precomputed.
# When a whole row if constrained, x_i - which is initialized to
# None - will still be None at the end do the loop when it is appended
# to the features list; this value is then used to detect this setup
# in the build function where the graph is assembled.
if x_i is None:
x_i = np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
else:
x_i += np.array(psi[i]) * affine_warp_constraints[row_i][i] * s
features.append(x_i)
features += offsets
return features
def _build(self, inputs):
"""Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not consistent with the constraints
passed at construction time.
"""
input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if number_of_params != self._constraints.num_free_params:
raise base.Error('Input size is not consistent with constraint '
'definition: {} parameters expected, {} provided.'
.format(self._constraints.num_free_params,
number_of_params))
num_output_dimensions = len(self._psi) // 3
def get_input_slice(start, size):
"""Extracts a subset of columns from the input 2D Tensor."""
return basic.SliceByDim([1], [start], [size])(inputs)
warped_grid = []
var_index_offset = 0
number_of_points = np.prod(self._output_shape)
for i in xrange(num_output_dimensions):
if self._psi[i] is not None:
# The i-th output dimension is not fully specified by the constraints,
# the graph is setup to perform matrix multiplication in batch mode.
grid_coord = self._psi[i].astype(input_dtype)
num_active_vars = self._psi[i].shape[0]
active_vars = get_input_slice(var_index_offset, num_active_vars)
warped_coord = tf.matmul(active_vars, grid_coord)
warped_coord = tf.expand_dims(warped_coord, 1)
var_index_offset += num_active_vars
offset = self._psi[num_output_dimensions + i]
if offset is not None:
offset = offset.astype(input_dtype)
# Some entries in the i-th row of the affine matrix were constrained
# and the corresponding matrix multiplications have been precomputed.
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(offset.shape)
],
0)
offset = offset.reshape((1, 1) + offset.shape)
warped_coord += tf.tile(offset, tiling_params)
else:
# The i-th output dimension is fully specified by the constraints, and
# the corresponding matrix multiplications have been precomputed.
warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype)
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(warped_coord.shape)
],
0)
warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape)
warped_coord = tf.tile(warped_coord, tiling_params)
warped_coord += self._psi[i + 2 * num_output_dimensions]
# Need to help TF figuring out shape inference since tiling information
# is held in Tensors which are not known until run time.
warped_coord.set_shape([None, 1, number_of_points])
warped_grid.append(warped_coord)
# Reshape all the warped coordinates tensors to match the specified output
# shape and concatenate into a single matrix.
grid_shape = self._output_shape + (1,)
warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]
return tf.concat(warped_grid, len(grid_shape))
@property
def constraints(self):
return self._constraints
def inverse(self, name=None):
"""Returns a `sonnet` module to compute inverse affine transforms.
The function first assembles a network that given the constraints of the
current AffineGridWarper and a set of input parameters, retrieves the
coefficients of the corresponding inverse affine transform, then feeds its
output into a new AffineGridWarper setup to correctly warp the `output`
space into the `source` space.
Args:
name: Name of module implementing the inverse grid transformation.
Returns:
A `sonnet` module performing the inverse affine transform of a reference
grid of points via an AffineGridWarper module.
Raises:
tf.errors.UnimplementedError: If the function is called on a non 2D
instance of AffineGridWarper.
"""
if self._num_coeff != 6:
raise tf.errors.UnimplementedError('AffineGridWarper currently supports'
'inversion only for the 2D case.')
def _affine_grid_warper_inverse(inputs):
"""Assembles network to compute inverse affine transformation.
Each `inputs` row potentailly contains [a, b, tx, c, d, ty]
corresponding to an affine matrix:
A = [a, b, tx],
[c, d, ty]
We want to generate a tensor containing the coefficients of the
corresponding inverse affine transformation in a constraints-aware
fashion.
Calling M:
M = [a, b]
[c, d]
the affine matrix for the inverse transform is:
A_in = [M^(-1), M^-1 * [-tx, -tx]^T]
where
M^(-1) = (ad - bc)^(-1) * [ d, -b]
[-c, a]
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A tensorflow graph performing the inverse affine transformation
parametrized by the input coefficients.
"""
batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)
constant_shape = tf.concat([batch_size, tf.convert_to_tensor((1,))], 0)
index = iter(range(6))
def get_variable(constraint):
if constraint is None:
i = next(index)
return inputs[:, i:i+1]
else:
return tf.fill(constant_shape, tf.constant(constraint,
dtype=inputs.dtype))
constraints = chain.from_iterable(self.constraints)
a, b, tx, c, d, ty = (get_variable(constr) for constr in constraints)
det = a * d - b * c
a_inv = d / det
b_inv = -b / det
c_inv = -c / det
d_inv = a / det
m_inv = basic.BatchReshape(
[2, 2])(tf.concat([a_inv, b_inv, c_inv, d_inv], 1))
txy = tf.expand_dims(tf.concat([tx, ty], 1), 2)
txy_inv = basic.BatchFlatten()(tf.matmul(m_inv, txy))
tx_inv = txy_inv[:, 0:1]
ty_inv = txy_inv[:, 1:2]
inverse_gw_inputs = tf.concat(
[a_inv, b_inv, -tx_inv, c_inv, d_inv, -ty_inv], 1)
agw = AffineGridWarper(self.output_shape,
self.source_shape)
return agw(inverse_gw_inputs) # pylint: disable=not-callable
if name is None:
name = self.module_name + '_inverse'
return base.Module(_affine_grid_warper_inverse, name=name)
class AffineWarpConstraints(object):
"""Affine warp contraints class.
`AffineWarpConstraints` allow for very succinct definitions of constraints on
the values of entries in affine transform matrices.
"""
def __init__(self, constraints=((None,) * 3,) * 2):
"""Creates a constraint definition for an affine transformation.
Args:
constraints: A doubly-nested iterable of shape `[N, N+1]` defining
constraints on the entries of a matrix that represents an affine
transformation in `N` dimensions. A numeric value bakes in a constraint
on the corresponding entry in the tranformation matrix, whereas `None`
implies that the corresponding entry will be specified at run time.
Raises:
TypeError: If `constraints` is not a nested iterable.
ValueError: If the double iterable `constraints` has inconsistent
dimensions.
"""
try:
self._constraints = tuple(tuple(x) for x in constraints)
except TypeError:
raise TypeError('constraints must be a nested iterable.')
# Number of rows
self._num_dim = len(self._constraints)
expected_num_cols = self._num_dim + 1
if any(len(x) != expected_num_cols for x in self._constraints):
raise ValueError('The input list must define a Nx(N+1) matrix of '
'contraints.')
def _calc_mask(self):
"""Computes a boolean mask from the user defined constraints."""
mask = []
for row in self._constraints:
mask.append(tuple(x is None for x in row))
return tuple(mask)
def _calc_num_free_params(self):
"""Computes number of non constrained parameters."""
return sum(row.count(None) for row in self._constraints)
@property
def num_free_params(self):
return self._calc_num_free_params()
@property
def mask(self):
return self._calc_mask()
@property
def constraints(self):
return self._constraints
@property
def num_dim(self):
return self._num_dim
def __getitem__(self, i):
"""Returns the list of constraints for the i-th row of the affine matrix."""
return self._constraints[i]
def _combine(self, x, y):
"""Combines two constraints, raising an error if they are not compatible."""
if x is None or y is None:
return x or y
if x != y:
raise ValueError('Incompatible set of constraints provided.')
return x
def __and__(self, rhs):
"""Combines two sets of constraints into a coherent single set."""
return self.combine_with(rhs)
def combine_with(self, additional_constraints):
"""Combines two sets of constraints into a coherent single set."""
x = additional_constraints
if not isinstance(additional_constraints, AffineWarpConstraints):
x = AffineWarpConstraints(additional_constraints)
new_constraints = []
for left, right in zip(self._constraints, x.constraints):
new_constraints.append([self._combine(x, y) for x, y in zip(left, right)])
return AffineWarpConstraints(new_constraints)
# Collection of utlities to initialize an AffineGridWarper in 2D and 3D.
@classmethod
def no_constraints(cls, num_dim=2):
"""Empty set of constraints for a num_dim-ensional affine transform."""
return cls(((None,) * (num_dim + 1),) * num_dim)
@classmethod
def translation_2d(cls, x=None, y=None):
"""Assign contraints on translation components of affine transform in 2d."""
return cls([[None, None, x],
[None, None, y]])
@classmethod
def translation_3d(cls, x=None, y=None, z=None):
"""Assign contraints on translation components of affine transform in 3d."""
return cls([[None, None, None, x],
[None, None, None, y],
[None, None, None, z]])
@classmethod
def scale_2d(cls, x=None, y=None):
"""Assigns contraints on scaling components of affine transform in 2d."""
return cls([[x, None, None],
[None, y, None]])
@classmethod
def scale_3d(cls, x=None, y=None, z=None):
"""Assigns contraints on scaling components of affine transform in 3d."""
return cls([[x, None, None, None],
[None, y, None, None],
[None, None, z, None]])
@classmethod
def shear_2d(cls, x=None, y=None):
"""Assigns contraints on shear components of affine transform in 2d."""
return cls([[None, x, None],
[y, None, None]])
@classmethod
def no_shear_2d(cls):
return cls.shear_2d(x=0, y=0)
@classmethod
def no_shear_3d(cls):
"""Assigns contraints on shear components of affine transform in 3d."""
return cls([[None, 0, 0, None],
[0, None, 0, None],
[0, 0, None, None]])
| mumuwoyou/vnpy-master | sonnet/python/modules/spatial_transformer.py | Python | mit | 23,304 |
/**
* Copyright (c) 2010-2019 Contributors to the openHAB project
*
* See the NOTICE file(s) distributed with this work for additional
* information.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
*/
package org.openhab.binding.enocean.internal.eep.A5_08;
import static org.openhab.binding.enocean.internal.EnOceanBindingConstants.*;
import org.eclipse.smarthome.config.core.Configuration;
import org.eclipse.smarthome.core.library.types.OnOffType;
import org.eclipse.smarthome.core.library.types.QuantityType;
import org.eclipse.smarthome.core.library.unit.SIUnits;
import org.eclipse.smarthome.core.library.unit.SmartHomeUnits;
import org.eclipse.smarthome.core.types.State;
import org.eclipse.smarthome.core.types.UnDefType;
import org.openhab.binding.enocean.internal.eep.Base._4BSMessage;
import org.openhab.binding.enocean.internal.messages.ERP1Message;
/**
*
* @author Daniel Weber - Initial contribution
*/
public abstract class A5_08 extends _4BSMessage {
public A5_08(ERP1Message packet) {
super(packet);
}
protected double getUnscaledTemperatureMin() {
return 0;
}
protected double getUnscaledTemperatureMax() {
return 255;
}
protected double getUnscaledIlluminationMin() {
return 0;
}
protected double getUnscaledIlluminationMax() {
return 255;
}
protected abstract double getScaledTemperatureMin();
protected abstract double getScaledTemperatureMax();
protected abstract double getScaledIlluminationMin();
protected abstract double getScaledIlluminationMax();
protected int getUnscaledTemperatureValue() {
return getDB_1Value();
}
protected int getUnscaledIlluminationValue() {
return getDB_2Value();
}
@Override
protected State convertToStateImpl(String channelId, String channelTypeId, State currentState, Configuration config) {
if (!isValid()) {
return UnDefType.UNDEF;
}
if (channelId.equals(CHANNEL_TEMPERATURE)) {
double scaledTemp = getScaledTemperatureMin()
+ ((getUnscaledTemperatureValue() * (getScaledTemperatureMax() - getScaledTemperatureMin()))
/ (getUnscaledTemperatureMax() - getUnscaledTemperatureMin()));
return new QuantityType<>(scaledTemp, SIUnits.CELSIUS);
} else if (channelId.equals(CHANNEL_ILLUMINATION)) {
double scaledIllumination = getScaledIlluminationMin()
+ ((getUnscaledIlluminationValue() * (getScaledIlluminationMax() - getScaledIlluminationMin()))
/ (getUnscaledIlluminationMax() - getUnscaledIlluminationMin()));
return new QuantityType<>(scaledIllumination, SmartHomeUnits.LUX);
} else if (channelId.equals(CHANNEL_MOTIONDETECTION)) {
return getBit(getDB_0(), 1) ? OnOffType.OFF : OnOffType.ON;
} else if (channelId.equals(CHANNEL_OCCUPANCY)) {
return getBit(getDB_0(), 0) ? OnOffType.OFF : OnOffType.ON;
}
return UnDefType.UNDEF;
}
}
| clinique/openhab2 | bundles/org.openhab.binding.enocean/src/main/java/org/openhab/binding/enocean/internal/eep/A5_08/A5_08.java | Java | epl-1.0 | 3,271 |
#
# Copyright (C) 2008-2010 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=Mesa
PKG_RELEASE:=2
PKG_VERSION:=7.2
PKG_SOURCE_URL:=@SF/mesa3d
PKG_SOURCE:=$(PKG_NAME)Lib-$(PKG_VERSION).tar.bz2
PKG_MD5SUM:=04d379292e023df0b0266825cb0dbde5
PKG_FIXUP:=libtool
PKG_BUILD_DEPENDS:=glproto
include $(INCLUDE_DIR)/package.mk
PKG_INSTALL:=1
define Package/libgl-mesa
SECTION:=xorg-libraries
CATEGORY:=Xorg
SUBMENU:=libraries
DEPENDS:=+libX11 +libXext +libXfixes +libXdamage +libXxf86vm +libdrm +libexpat
TITLE:=Mesa OpenGL library
URL:=http://mesa3d.org
endef
define Package/libglu-mesa
SECTION:=xorg-libraries
CATEGORY:=Xorg
SUBMENU:=libraries
DEPENDS:=+libgl-mesa
TITLE:=Mesa OpenGL utility library
URL:=http://mesa3d.org
endef
define Package/libgl-mesa-dri/Default
define Package/libgl-mesa-dri-$(1)
SECTION:=xorg-libraries
CATEGORY:=Xorg
SUBMENU:=libraries
DEPENDS:=libgl-mesa @DISPLAY_SUPPORT $(if $(findstring swrast,$(1)),,@TARGET_x86)
TITLE:=mesa dri $(1)
URL:=http://mesa3d.org
endef
endef
DRIDRIVERS:=i810 i915 i965 mach64 mga r128 r200 r300 radeon s3v \
savage sis tdfx trident unichrome ffb swrast
$(foreach dri,$(DRIDRIVERS),$(eval $(call Package/libgl-mesa-dri/Default,$(dri))))
STAMP_CONFIGURED:=$(STAMP_CONFIGURED)_$(call confvar,CONFIG_PACKAGE_libgl-mesa \
CONFIG_PACKAGE_libglu-mesa \
$(foreach dri,$(DRIDRIVERS),CONFIG_PACKAGE_libgl-mesa-dri-$(dri)))
define Build/Configure
$(call Build/Configure/Default, \
--disable-glw \
--with-driver=dri \
--with-dri-drivers="$(foreach dri,$(DRIDRIVERS),$(if $(CONFIG_PACKAGE_libgl-mesa-dri-$(dri)),$(dri)))" \
)
endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/{include/GL,lib/pkgconfig}
$(CP) \
$(PKG_INSTALL_DIR)/usr/include/GL/* \
$(1)/usr/include/GL
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/*.so* \
$(1)/usr/lib
$(INSTALL_DATA) \
$(PKG_INSTALL_DIR)/usr/lib/pkgconfig/* \
$(1)/usr/lib/pkgconfig
endef
define Package/libgl-mesa/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/libGL.so* \
$(1)/usr/lib/
endef
define Package/libglu-mesa/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/libGLU.so* \
$(1)/usr/lib/
endef
define Package/libgl-mesa-dri/install/Default
define Package/libgl-mesa-dri-$(1)/install
$(INSTALL_DIR) $$(1)/usr/lib/dri/
$(CP) \
$(PKG_INSTALL_DIR)/usr/lib/dri/$(1)_dri.so* \
$$(1)/usr/lib/dri
endef
endef
$(foreach dri,$(DRIDRIVERS),$(eval $(call Package/libgl-mesa-dri/install/Default,$(dri))))
$(eval $(call BuildPackage,libgl-mesa))
$(eval $(call BuildPackage,libglu-mesa))
$(foreach dri,$(DRIDRIVERS),$(eval $(call BuildPackage,libgl-mesa-dri-$(dri))))
| jameshilliard/openwrtubicom | feeds_src/packages/Xorg/lib/mesa/Makefile | Makefile | gpl-2.0 | 2,804 |
/*
* Crypto-JS v2.5.3
* http://code.google.com/p/crypto-js/
* (c) 2009-2012 by Jeff Mott. All rights reserved.
* http://code.google.com/p/crypto-js/wiki/License
*/
(typeof Crypto=="undefined"||!Crypto.util)&&function(){var d=window.Crypto={},m=d.util={rotl:function(a,c){return a<<c|a>>>32-c},rotr:function(a,c){return a<<32-c|a>>>c},endian:function(a){if(a.constructor==Number)return m.rotl(a,8)&16711935|m.rotl(a,24)&4278255360;for(var c=0;c<a.length;c++)a[c]=m.endian(a[c]);return a},randomBytes:function(a){for(var c=[];a>0;a--)c.push(Math.floor(Math.random()*256));return c},bytesToWords:function(a){for(var c=[],b=0,i=0;b<a.length;b++,i+=8)c[i>>>5]|=(a[b]&255)<<
24-i%32;return c},wordsToBytes:function(a){for(var c=[],b=0;b<a.length*32;b+=8)c.push(a[b>>>5]>>>24-b%32&255);return c},bytesToHex:function(a){for(var c=[],b=0;b<a.length;b++)c.push((a[b]>>>4).toString(16)),c.push((a[b]&15).toString(16));return c.join("")},hexToBytes:function(a){for(var c=[],b=0;b<a.length;b+=2)c.push(parseInt(a.substr(b,2),16));return c},bytesToBase64:function(a){if(typeof btoa=="function")return btoa(f.bytesToString(a));for(var c=[],b=0;b<a.length;b+=3)for(var i=a[b]<<16|a[b+1]<<
8|a[b+2],l=0;l<4;l++)b*8+l*6<=a.length*8?c.push("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(i>>>6*(3-l)&63)):c.push("=");return c.join("")},base64ToBytes:function(a){if(typeof atob=="function")return f.stringToBytes(atob(a));for(var a=a.replace(/[^A-Z0-9+\/]/ig,""),c=[],b=0,i=0;b<a.length;i=++b%4)i!=0&&c.push(("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".indexOf(a.charAt(b-1))&Math.pow(2,-2*i+8)-1)<<i*2|"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".indexOf(a.charAt(b))>>>
6-i*2);return c}},d=d.charenc={};d.UTF8={stringToBytes:function(a){return f.stringToBytes(unescape(encodeURIComponent(a)))},bytesToString:function(a){return decodeURIComponent(escape(f.bytesToString(a)))}};var f=d.Binary={stringToBytes:function(a){for(var c=[],b=0;b<a.length;b++)c.push(a.charCodeAt(b)&255);return c},bytesToString:function(a){for(var c=[],b=0;b<a.length;b++)c.push(String.fromCharCode(a[b]));return c.join("")}}}();
(function(){var d=Crypto,m=d.util,f=d.charenc,a=f.UTF8,c=f.Binary,b=d.SHA1=function(a,l){var g=m.wordsToBytes(b._sha1(a));return l&&l.asBytes?g:l&&l.asString?c.bytesToString(g):m.bytesToHex(g)};b._sha1=function(b){b.constructor==String&&(b=a.stringToBytes(b));var c=m.bytesToWords(b),g=b.length*8,b=[],d=1732584193,h=-271733879,j=-1732584194,k=271733878,f=-1009589776;c[g>>5]|=128<<24-g%32;c[(g+64>>>9<<4)+15]=g;for(g=0;g<c.length;g+=16){for(var o=d,p=h,q=j,r=k,s=f,e=0;e<80;e++){if(e<16)b[e]=c[g+e];else{var n=
b[e-3]^b[e-8]^b[e-14]^b[e-16];b[e]=n<<1|n>>>31}n=(d<<5|d>>>27)+f+(b[e]>>>0)+(e<20?(h&j|~h&k)+1518500249:e<40?(h^j^k)+1859775393:e<60?(h&j|h&k|j&k)-1894007588:(h^j^k)-899497514);f=k;k=j;j=h<<30|h>>>2;h=d;d=n}d+=o;h+=p;j+=q;k+=r;f+=s}return[d,h,j,k,f]};b._blocksize=16;b._digestsize=20})();
| DIA-NZ/nlnz-epubviewer | src/main/webapp/lib/thirdparty/crypto-sha1.js | JavaScript | gpl-2.0 | 2,963 |
#ifndef CYGONCE_PLF_CACHE_H
#define CYGONCE_PLF_CACHE_H
//=============================================================================
//
// plf_cache.h
//
// HAL cache control API
//
//=============================================================================
// ####ECOSGPLCOPYRIGHTBEGIN####
// -------------------------------------------
// This file is part of eCos, the Embedded Configurable Operating System.
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
//
// eCos is free software; you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 or (at your option) any later
// version.
//
// eCos is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// for more details.
//
// You should have received a copy of the GNU General Public License
// along with eCos; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
// As a special exception, if other files instantiate templates or use
// macros or inline functions from this file, or you compile this file
// and link it with other works to produce a work based on this file,
// this file does not by itself cause the resulting work to be covered by
// the GNU General Public License. However the source code for this file
// must still be made available in accordance with section (3) of the GNU
// General Public License v2.
//
// This exception does not invalidate any other reasons why a work based
// on this file might be covered by the GNU General Public License.
// -------------------------------------------
// ####ECOSGPLCOPYRIGHTEND####
//=============================================================================
//#####DESCRIPTIONBEGIN####
//
// Author(s): nickg
// Contributors: nickg
// Date: 1998-02-17
// Purpose: Cache control API
// Description: The macros defined here provide the HAL APIs for handling
// cache control operations.
// Usage:
// #include <cyg/hal/plf_cache.h>
// ...
//
//
//####DESCRIPTIONEND####
//
//=============================================================================
//=============================================================================
// Nothing here at present.
//-----------------------------------------------------------------------------
#endif // ifndef CYGONCE_PLF_CACHE_H
// End of plf_cache.h
| reille/proj_ecos | src/ecos/packages/hal/mips/ref4955/current/include/plf_cache.h | C | gpl-2.0 | 3,089 |
/*
*
* Copyright (c) Matthew Wilcox for Hewlett Packard 2003
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**********************************************************
*
* TEST IDENTIFIER : flock06
*
* EXECUTED BY : anyone
*
* TEST TITLE : Error condition test for flock(2)
*
* TEST CASE TOTAL : 1
*
* AUTHOR : Matthew Wilcox <willy@debian.org>
*
* SIGNALS
* Uses SIGUSR1 to pause before test if option set.
* (See the parse_opts(3) man page).
*
* DESCRIPTION
* This test verifies that flock locks held on one fd conflict with
* flock locks held on a different fd.
*
* Test:
* The process opens two file descriptors on the same file.
* It acquires an exclusive flock on the first descriptor,
* checks that attempting to acquire an flock on the second
* descriptor fails. Then it removes the first descriptor's
* lock and attempts to acquire an exclusive lock on the
* second descriptor.
*
* USAGE: <for command-line>
* flock06 [-c n] [-e] [-i n] [-I x] [-P x] [-t] [-h] [-f] [-p]
* where, -c n : Run n copies concurrently
* -f : Turn off functional testing
* -e : Turn on errno logging
* -h : Show help screen
* -i n : Execute test n times
* -I x : Execute test for x seconds
* -p : Pause for SIGUSR1 before starting
* -P x : Pause for x seconds between iterations
* -t : Turn on syscall timing
*
****************************************************************/
#include <stdio.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/file.h>
#include <sys/wait.h>
#include "test.h"
void setup(void);
void cleanup(void);
char *TCID = "flock06";
int TST_TOTAL = 3;
char filename[100];
int main(int argc, char **argv)
{
int lc;
tst_parse_opts(argc, argv, NULL, NULL);
setup();
/* The following loop checks looping state if -i option given */
for (lc = 0; TEST_LOOPING(lc); lc++) {
int fd1, fd2;
/* reset tst_count in case we are looping */
tst_count = 0;
fd1 = open(filename, O_RDWR);
if (fd1 == -1)
tst_brkm(TFAIL, cleanup, "failed to open the"
"file, errno %d", errno);
TEST(flock(fd1, LOCK_EX | LOCK_NB));
if (TEST_RETURN != 0)
tst_resm(TFAIL, "First attempt to flock() failed, "
"errno %d", TEST_ERRNO);
else
tst_resm(TPASS, "First attempt to flock() passed");
fd2 = open(filename, O_RDWR);
if (fd2 == -1)
tst_brkm(TFAIL, cleanup, "failed to open the"
"file, errno %d", errno);
TEST(flock(fd2, LOCK_EX | LOCK_NB));
if (TEST_RETURN == -1)
tst_resm(TPASS, "Second attempt to flock() denied");
else
tst_resm(TFAIL, "Second attempt to flock() succeeded!");
TEST(flock(fd1, LOCK_UN));
if (TEST_RETURN == -1)
tst_resm(TFAIL, "Failed to unlock fd1, errno %d",
TEST_ERRNO);
else
tst_resm(TPASS, "Unlocked fd1");
TEST(flock(fd2, LOCK_EX | LOCK_NB));
if (TEST_RETURN == -1)
tst_resm(TFAIL, "Third attempt to flock() denied!");
else
tst_resm(TPASS, "Third attempt to flock() succeeded");
close(fd1);
close(fd2);
}
cleanup();
tst_exit();
}
/*
* setup()
* performs all ONE TIME setup for this test
*/
void setup(void)
{
int fd;
tst_sig(FORK, DEF_HANDLER, cleanup);
/* Pause if that option was specified
* TEST_PAUSE contains the code to fork the test with the -i option.
* You want to make sure you do this before you create your temporary
* directory.
*/
TEST_PAUSE;
/* Create a unique temporary directory and chdir() to it. */
tst_tmpdir();
sprintf(filename, "flock06.%d", getpid());
/* creating temporary file */
fd = creat(filename, 0666);
if (fd < 0)
tst_brkm(TBROK, tst_rmdir, "creating a new file failed");
close(fd);
}
/*
* cleanup()
* performs all ONE TIME cleanup for this test at
* completion or premature exit
*/
void cleanup(void)
{
unlink(filename);
tst_rmdir();
}
| itnihao/ltp | testcases/kernel/syscalls/flock/flock06.c | C | gpl-2.0 | 4,864 |
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm-core.h"
#include "dm-rq.h"
#include "dm-uevent.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/uio.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
#define DM_MSG_PREFIX "core"
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
*/
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24
static const char *_name = DM_NAME;
static unsigned int major = 0;
static unsigned int _major = 0;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
static void do_deferred_remove(struct work_struct *w);
static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
static struct workqueue_struct *deferred_remove_workqueue;
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
void dm_issue_global_event(void)
{
atomic_inc(&dm_global_event_nr);
wake_up(&dm_global_eventq);
}
/*
* One of these is allocated (on-stack) per original bio.
*/
struct clone_info {
struct dm_table *map;
struct bio *bio;
struct dm_io *io;
sector_t sector;
unsigned sector_count;
};
/*
* One of these is allocated per clone bio.
*/
#define DM_TIO_MAGIC 7282014
struct dm_target_io {
unsigned magic;
struct dm_io *io;
struct dm_target *ti;
unsigned target_bio_nr;
unsigned *len_ptr;
bool inside_dm_io;
struct bio clone;
};
/*
* One of these is allocated per original bio.
* It contains the first clone used for that original.
*/
#define DM_IO_MAGIC 5191977
struct dm_io {
unsigned magic;
struct mapped_device *md;
blk_status_t status;
atomic_t io_count;
struct bio *orig_bio;
unsigned long start_time;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
/* last member of dm_target_io is 'struct bio' */
struct dm_target_io tio;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
if (!tio->inside_dm_io)
return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
}
EXPORT_SYMBOL_GPL(dm_per_bio_data);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
struct dm_io *io = (struct dm_io *)((char *)data + data_size);
if (io->magic == DM_IO_MAGIC)
return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
BUG_ON(io->magic != DM_TIO_MAGIC);
return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
{
return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define MINOR_ALLOCED ((void *)-1)
/*
* Bits for the md->flags field.
*/
#define DMF_BLOCK_IO_FOR_SUSPEND 0
#define DMF_SUSPENDED 1
#define DMF_FROZEN 2
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
/*
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
struct bio_set bs;
struct bio_set io_bs;
};
struct table_device {
struct list_head list;
refcount_t count;
struct dm_dev dm_dev;
};
/*
* Bio-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_BIO_BASED_IOS 16
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
int param = READ_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
if (param < min)
modified_param = min;
else if (param > max)
modified_param = max;
else
modified = false;
if (modified) {
(void)cmpxchg(module_param, param, modified_param);
param = modified_param;
}
return param;
}
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
unsigned param = READ_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
modified_param = def;
else if (param > max)
modified_param = max;
if (modified_param) {
(void)cmpxchg(module_param, param, modified_param);
param = modified_param;
}
return param;
}
unsigned dm_get_reserved_bio_based_ios(void)
{
return __dm_get_module_param(&reserved_bio_based_ios,
RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
static unsigned dm_get_numa_node(void)
{
return __dm_get_module_param_int(&dm_numa_node,
DM_NUMA_NODE, num_online_nodes() - 1);
}
static int __init local_init(void)
{
int r;
r = dm_uevent_init();
if (r)
return r;
deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
if (!deferred_remove_workqueue) {
r = -ENOMEM;
goto out_uevent_exit;
}
_major = major;
r = register_blkdev(_major, _name);
if (r < 0)
goto out_free_workqueue;
if (!_major)
_major = r;
return 0;
out_free_workqueue:
destroy_workqueue(deferred_remove_workqueue);
out_uevent_exit:
dm_uevent_exit();
return r;
}
static void local_exit(void)
{
flush_scheduled_work();
destroy_workqueue(deferred_remove_workqueue);
unregister_blkdev(_major, _name);
dm_uevent_exit();
_major = 0;
DMINFO("cleaned up");
}
static int (*_inits[])(void) __initdata = {
local_init,
dm_target_init,
dm_linear_init,
dm_stripe_init,
dm_io_init,
dm_kcopyd_init,
dm_interface_init,
dm_statistics_init,
};
static void (*_exits[])(void) = {
local_exit,
dm_target_exit,
dm_linear_exit,
dm_stripe_exit,
dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
dm_statistics_exit,
};
static int __init dm_init(void)
{
const int count = ARRAY_SIZE(_inits);
int r, i;
for (i = 0; i < count; i++) {
r = _inits[i]();
if (r)
goto bad;
}
return 0;
bad:
while (i--)
_exits[i]();
return r;
}
static void __exit dm_exit(void)
{
int i = ARRAY_SIZE(_exits);
while (i--)
_exits[i]();
/*
* Should be empty by this point.
*/
idr_destroy(&_minor_idr);
}
/*
* Block device functions
*/
int dm_deleting_md(struct mapped_device *md)
{
return test_bit(DMF_DELETING, &md->flags);
}
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
if (!md)
goto out;
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
atomic_inc(&md->open_count);
out:
spin_unlock(&_minor_lock);
return md ? 0 : -ENXIO;
}
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
md = disk->private_data;
if (WARN_ON(!md))
goto out;
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
out:
spin_unlock(&_minor_lock);
}
int dm_open_count(struct mapped_device *md)
{
return atomic_read(&md->open_count);
}
/*
* Guarantees nothing is using the device before it's deleted.
*/
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
{
int r = 0;
spin_lock(&_minor_lock);
if (dm_open_count(md)) {
r = -EBUSY;
if (mark_deferred)
set_bit(DMF_DEFERRED_REMOVE, &md->flags);
} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
r = -EEXIST;
else
set_bit(DMF_DELETING, &md->flags);
spin_unlock(&_minor_lock);
return r;
}
int dm_cancel_deferred_remove(struct mapped_device *md)
{
int r = 0;
spin_lock(&_minor_lock);
if (test_bit(DMF_DELETING, &md->flags))
r = -EBUSY;
else
clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
spin_unlock(&_minor_lock);
return r;
}
static void do_deferred_remove(struct work_struct *w)
{
dm_deferred_remove();
}
sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
}
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
return md->queue;
}
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
return &md->stats;
}
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
return dm_get_geometry(md, geo);
}
#ifdef CONFIG_BLK_DEV_ZONED
int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
{
struct dm_report_zones_args *args = data;
sector_t sector_diff = args->tgt->begin - args->start;
/*
* Ignore zones beyond the target range.
*/
if (zone->start >= args->start + args->tgt->len)
return 0;
/*
* Remap the start sector and write pointer position of the zone
* to match its position in the target range.
*/
zone->start += sector_diff;
if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
if (zone->cond == BLK_ZONE_COND_FULL)
zone->wp = zone->start + zone->len;
else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start;
else
zone->wp += sector_diff;
}
args->next_sector = zone->start + zone->len;
return args->orig_cb(zone, args->zone_idx++, args->orig_data);
}
EXPORT_SYMBOL_GPL(dm_report_zones_cb);
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct mapped_device *md = disk->private_data;
struct dm_table *map;
int srcu_idx, ret;
struct dm_report_zones_args args = {
.next_sector = sector,
.orig_data = data,
.orig_cb = cb,
};
if (dm_suspended_md(md))
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
if (!map)
return -EIO;
do {
struct dm_target *tgt;
tgt = dm_table_find_target(map, args.next_sector);
if (WARN_ON_ONCE(!tgt->type->report_zones)) {
ret = -EIO;
goto out;
}
args.tgt = tgt;
ret = tgt->type->report_zones(tgt, &args,
nr_zones - args.zone_idx);
if (ret < 0)
goto out;
} while (args.zone_idx < nr_zones &&
args.next_sector < get_capacity(disk));
ret = args.zone_idx;
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
#else
#define dm_blk_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
__acquires(md->io_barrier)
{
struct dm_target *tgt;
struct dm_table *map;
int r;
retry:
r = -ENOTTY;
map = dm_get_live_table(md, srcu_idx);
if (!map || !dm_table_get_size(map))
return r;
/* We only support devices that have a single target */
if (dm_table_get_num_targets(map) != 1)
return r;
tgt = dm_table_get_target(map, 0);
if (!tgt->type->prepare_ioctl)
return r;
if (dm_suspended_md(md))
return -EAGAIN;
r = tgt->type->prepare_ioctl(tgt, bdev);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
msleep(10);
goto retry;
}
return r;
}
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
__releases(md->io_barrier)
{
dm_put_live_table(md, srcu_idx);
}
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
int r, srcu_idx;
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
if (r > 0) {
/*
* Target determined this ioctl is being issued against a
* subset of the parent bdev; require extra privileges.
*/
if (!capable(CAP_SYS_RAWIO)) {
DMWARN_LIMIT(
"%s: sending ioctl %x to DM device without required privilege.",
current->comm, cmd);
r = -ENOIOCTLCMD;
goto out;
}
}
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
}
static void start_io_acct(struct dm_io *io);
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{
struct dm_io *io;
struct dm_target_io *tio;
struct bio *clone;
clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
if (!clone)
return NULL;
tio = container_of(clone, struct dm_target_io, clone);
tio->inside_dm_io = true;
tio->io = NULL;
io = container_of(tio, struct dm_io, tio);
io->magic = DM_IO_MAGIC;
io->status = 0;
atomic_set(&io->io_count, 1);
io->orig_bio = bio;
io->md = md;
spin_lock_init(&io->endio_lock);
start_io_acct(io);
return io;
}
static void free_io(struct mapped_device *md, struct dm_io *io)
{
bio_put(&io->tio.clone);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
unsigned target_bio_nr, gfp_t gfp_mask)
{
struct dm_target_io *tio;
if (!ci->io->tio.io) {
/* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio;
} else {
struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
if (!clone)
return NULL;
tio = container_of(clone, struct dm_target_io, clone);
tio->inside_dm_io = false;
}
tio->magic = DM_TIO_MAGIC;
tio->io = ci->io;
tio->ti = ti;
tio->target_bio_nr = target_bio_nr;
return tio;
}
static void free_tio(struct dm_target_io *tio)
{
if (tio->inside_dm_io)
return;
bio_put(&tio->clone);
}
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
return jiffies_to_nsecs(io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
io->start_time = bio_start_io_acct(bio);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
bio_end_io_acct(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
true, duration, &io->stats_aux);
/* nudge anyone waiting on suspend queue */
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
}
/*
* Add the bio to the list of deferred io.
*/
static void queue_io(struct mapped_device *md, struct bio *bio)
{
unsigned long flags;
spin_lock_irqsave(&md->deferred_lock, flags);
bio_list_add(&md->deferred, bio);
spin_unlock_irqrestore(&md->deferred_lock, flags);
queue_work(md->wq, &md->work);
}
/*
* Everyone (including functions in this file), should use this
* function to access the md->map field, and make sure they call
* dm_put_live_table() when finished.
*/
struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
{
*srcu_idx = srcu_read_lock(&md->io_barrier);
return srcu_dereference(md->map, &md->io_barrier);
}
void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
{
srcu_read_unlock(&md->io_barrier, srcu_idx);
}
void dm_sync_table(struct mapped_device *md)
{
synchronize_srcu(&md->io_barrier);
synchronize_rcu_expedited();
}
/*
* A fast alternative to dm_get_live_table/dm_put_live_table.
* The caller must not block between these two functions.
*/
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{
rcu_read_lock();
return rcu_dereference(md->map);
}
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{
rcu_read_unlock();
}
static char *_dm_claim_ptr = "I belong to device-mapper";
/*
* Open a table device so we can use it as a map destination.
*/
static int open_table_device(struct table_device *td, dev_t dev,
struct mapped_device *md)
{
struct block_device *bdev;
int r;
BUG_ON(td->dm_dev.bdev);
bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_link_disk_holder(bdev, dm_disk(md));
if (r) {
blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
return r;
}
td->dm_dev.bdev = bdev;
td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
return 0;
}
/*
* Close a table device that we've been using.
*/
static void close_table_device(struct table_device *td, struct mapped_device *md)
{
if (!td->dm_dev.bdev)
return;
bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
put_dax(td->dm_dev.dax_dev);
td->dm_dev.bdev = NULL;
td->dm_dev.dax_dev = NULL;
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
fmode_t mode)
{
struct table_device *td;
list_for_each_entry(td, l, list)
if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
return td;
return NULL;
}
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
struct dm_dev **result)
{
int r;
struct table_device *td;
mutex_lock(&md->table_devices_lock);
td = find_table_device(&md->table_devices, dev, mode);
if (!td) {
td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
if (!td) {
mutex_unlock(&md->table_devices_lock);
return -ENOMEM;
}
td->dm_dev.mode = mode;
td->dm_dev.bdev = NULL;
if ((r = open_table_device(td, dev, md))) {
mutex_unlock(&md->table_devices_lock);
kfree(td);
return r;
}
format_dev_t(td->dm_dev.name, dev);
refcount_set(&td->count, 1);
list_add(&td->list, &md->table_devices);
} else {
refcount_inc(&td->count);
}
mutex_unlock(&md->table_devices_lock);
*result = &td->dm_dev;
return 0;
}
EXPORT_SYMBOL_GPL(dm_get_table_device);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
struct table_device *td = container_of(d, struct table_device, dm_dev);
mutex_lock(&md->table_devices_lock);
if (refcount_dec_and_test(&td->count)) {
close_table_device(td, md);
list_del(&td->list);
kfree(td);
}
mutex_unlock(&md->table_devices_lock);
}
EXPORT_SYMBOL(dm_put_table_device);
static void free_table_devices(struct list_head *devices)
{
struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
struct table_device *td = list_entry(tmp, struct table_device, list);
DMWARN("dm_destroy: %s still exists with %d references",
td->dm_dev.name, refcount_read(&td->count));
kfree(td);
}
}
/*
* Get the geometry associated with a dm device
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
*geo = md->geometry;
return 0;
}
/*
* Set the geometry of a device.
*/
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
if (geo->start > sz) {
DMWARN("Start sector is beyond the geometry limits.");
return -EINVAL;
}
md->geometry = *geo;
return 0;
}
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}
/*
* Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc.
*/
static void dec_pending(struct dm_io *io, blk_status_t error)
{
unsigned long flags;
blk_status_t io_error;
struct bio *bio;
struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
if (atomic_dec_and_test(&io->io_count)) {
if (io->status == BLK_STS_DM_REQUEUE) {
/*
* Target requested pushing back the I/O.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
/* NOTE early return due to BLK_STS_DM_REQUEUE below */
bio_list_add_head(&md->deferred, io->orig_bio);
else
/* noflush suspend was interrupted. */
io->status = BLK_STS_IOERR;
spin_unlock_irqrestore(&md->deferred_lock, flags);
}
io_error = io->status;
bio = io->orig_bio;
end_io_acct(io);
free_io(md, io);
if (io_error == BLK_STS_DM_REQUEUE)
return;
if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
*/
bio->bi_opf &= ~REQ_PREFLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
if (io_error)
bio->bi_status = io_error;
bio_endio(bio);
}
}
}
void disable_discard(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support DISCARD, disable it */
limits->max_discard_sectors = 0;
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
}
void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support WRITE SAME, disable it */
limits->max_write_same_sectors = 0;
}
void disable_write_zeroes(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support WRITE ZEROES, disable it */
limits->max_write_zeroes_sectors = 0;
}
static void clone_endio(struct bio *bio)
{
blk_status_t error = bio->bi_status;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bio->bi_disk->queue->limits.max_discard_sectors)
disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
/*
* For zone-append bios get offset in zone of the written
* sector and add that to the original bio sector pos.
*/
if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
sector_t written_sector = bio->bi_iter.bi_sector;
struct request_queue *q = orig_bio->bi_disk->queue;
u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
orig_bio->bi_iter.bi_sector += written_sector & mask;
}
if (endio) {
int r = endio(tio->ti, bio, &error);
switch (r) {
case DM_ENDIO_REQUEUE:
error = BLK_STS_DM_REQUEUE;
/*FALLTHRU*/
case DM_ENDIO_DONE:
break;
case DM_ENDIO_INCOMPLETE:
/* The target will handle the io */
return;
default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}
}
free_tio(tio);
dec_pending(io, error);
}
/*
* Return maximum size of I/O possible at the supplied sector up to the current
* target boundary.
*/
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
sector_t target_offset = dm_target_offset(ti, sector);
return ti->len - target_offset;
}
static sector_t max_io_len(sector_t sector, struct dm_target *ti)
{
sector_t len = max_io_len_target_boundary(sector, ti);
sector_t offset, max_len;
/*
* Does the target need to split even further?
*/
if (ti->max_io_len) {
offset = dm_target_offset(ti, sector);
if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
max_len = sector_div(offset, ti->max_io_len);
else
max_len = offset & (ti->max_io_len - 1);
max_len = ti->max_io_len - max_len;
if (len > max_len)
len = max_len;
}
return len;
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
if (len > UINT_MAX) {
DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
(unsigned long long)len, UINT_MAX);
ti->error = "Maximum size of target IO is too large";
return -EINVAL;
}
ti->max_io_len = (uint32_t) len;
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
sector_t sector, int *srcu_idx)
__acquires(md->io_barrier)
{
struct dm_table *map;
struct dm_target *ti;
map = dm_get_live_table(md, srcu_idx);
if (!map)
return NULL;
ti = dm_table_find_target(map, sector);
if (!ti)
return NULL;
return ti;
}
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
long len, ret = -EIO;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (!ti->type->direct_access)
goto out;
len = max_io_len(sector, ti) / PAGE_SECTORS;
if (len < 1)
goto out;
nr_pages = min(len, nr_pages);
ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
int blocksize, sector_t start, sector_t len)
{
struct mapped_device *md = dax_get_private(dax_dev);
struct dm_table *map;
int srcu_idx;
bool ret;
map = dm_get_live_table(md, &srcu_idx);
if (!map)
return false;
ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
dm_put_live_table(md, srcu_idx);
return ret;
}
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
long ret = 0;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (!ti->type->dax_copy_from_iter) {
ret = copy_from_iter(addr, bytes, i);
goto out;
}
ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
long ret = 0;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (!ti->type->dax_copy_to_iter) {
ret = copy_to_iter(addr, bytes, i);
goto out;
}
ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS;
struct dm_target *ti;
int ret = -EIO;
int srcu_idx;
ti = dm_dax_get_live_target(md, sector, &srcu_idx);
if (!ti)
goto out;
if (WARN_ON(!ti->type->dax_zero_page_range)) {
/*
* ->zero_page_range() is mandatory dax operation. If we are
* here, something is wrong.
*/
dm_put_live_table(md, srcu_idx);
goto out;
}
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
* REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
* sent in a next bio.
*
* A diagram that explains the arithmetics:
* +--------------------+---------------+-------+
* | 1 | 2 | 3 |
* +--------------------+---------------+-------+
*
* <-------------- *tio->len_ptr --------------->
* <------- bi_size ------->
* <-- n_sectors -->
*
* Region 1 was already iterated over with bio_advance or similar function.
* (it may be empty if the target doesn't use bio_advance)
* Region 2 is the remaining bio size that the target wants to process.
* (it may be empty if region 1 is non-empty, although there is no reason
* to make it empty)
* The target requires that region 3 is to be sent in the next bio.
*
* If the target wants to receive multiple copies of the bio (via num_*bios, etc),
* the partially processed part (the sum of regions 1+2) must be the same for all
* copies of the bio.
*/
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
/*
* Map the clone. If r == 0 we don't need to do
* anything, the target has assumed ownership of
* this io.
*/
atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
bio_dev(io->orig_bio), sector);
ret = submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
free_tio(tio);
dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
free_tio(tio);
dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
return ret;
}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
{
bio->bi_iter.bi_sector = sector;
bio->bi_iter.bi_size = to_bytes(len);
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static int clone_bio(struct dm_target_io *tio, struct bio *bio,
sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
__bio_clone_fast(clone, bio);
bio_crypt_clone(clone, bio, GFP_NOIO);
if (bio_integrity(bio)) {
int r;
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
!dm_target_passes_integrity(tio->ti->type))) {
DMWARN("%s: the target %s doesn't support integrity data.",
dm_device_name(tio->io->md),
tio->ti->type->name);
return -EIO;
}
r = bio_integrity_clone(clone, bio, GFP_NOIO);
if (r < 0)
return r;
}
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (bio_integrity(bio))
bio_integrity_trim(clone);
return 0;
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
struct dm_target *ti, unsigned num_bios)
{
struct dm_target_io *tio;
int try;
if (!num_bios)
return;
if (num_bios == 1) {
tio = alloc_tio(ci, ti, 0, GFP_NOIO);
bio_list_add(blist, &tio->clone);
return;
}
for (try = 0; try < 2; try++) {
int bio_nr;
struct bio *bio;
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
if (!tio)
break;
bio_list_add(blist, &tio->clone);
}
if (try)
mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios)
return;
while ((bio = bio_list_pop(blist))) {
tio = container_of(bio, struct dm_target_io, clone);
free_tio(tio);
}
}
}
static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target_io *tio, unsigned *len)
{
struct bio *clone = &tio->clone;
tio->len_ptr = len;
__bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, *len);
return __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *bio;
struct dm_target_io *tio;
alloc_multiple_bios(&blist, ci, ti, num_bios);
while ((bio = bio_list_pop(&blist))) {
tio = container_of(bio, struct dm_target_io, clone);
(void) __clone_and_map_simple_bio(ci, tio, len);
}
}
static int __send_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
/*
* Empty flush uses a statically initialized bio, as the base for
* cloning. However, blkg association requires that a bdev is
* associated with a gendisk, which doesn't happen until the bdev is
* opened. So, blkg association is done at issue time of the flush
* rather than when the device is created in alloc_dev().
*/
bio_set_dev(ci->bio, ci->io->md->bdev);
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
return 0;
}
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
int r;
tio = alloc_tio(ci, ti, 0, GFP_NOIO);
tio->len_ptr = len;
r = clone_bio(tio, bio, sector, *len);
if (r < 0) {
free_tio(tio);
return r;
}
(void) __map_bio(tio);
return 0;
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
static unsigned get_num_discard_bios(struct dm_target *ti)
{
return ti->num_discard_bios;
}
static unsigned get_num_secure_erase_bios(struct dm_target *ti)
{
return ti->num_secure_erase_bios;
}
static unsigned get_num_write_same_bios(struct dm_target *ti)
{
return ti->num_write_same_bios;
}
static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
{
return ti->num_write_zeroes_bios;
}
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios)
{
unsigned len;
/*
* Even though the device advertised support for this type of
* request, that does not mean every target supports it, and
* reconfiguration might also have changed that since the
* check was performed.
*/
if (!num_bios)
return -EOPNOTSUPP;
len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
__send_duplicate_bios(ci, ti, num_bios, &len);
ci->sector += len;
ci->sector_count -= len;
return 0;
}
static int __send_discard(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
}
static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
}
static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
}
static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
{
return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
}
static bool is_abnormal_io(struct bio *bio)
{
bool r = false;
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE_ZEROES:
r = true;
break;
}
return r;
}
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
int *result)
{
struct bio *bio = ci->bio;
if (bio_op(bio) == REQ_OP_DISCARD)
*result = __send_discard(ci, ti);
else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
*result = __send_secure_erase(ci, ti);
else if (bio_op(bio) == REQ_OP_WRITE_SAME)
*result = __send_write_same(ci, ti);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
*result = __send_write_zeroes(ci, ti);
else
return false;
return true;
}
/*
* Select the correct strategy for processing a non-flush bio.
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
struct dm_target *ti;
unsigned len;
int r;
ti = dm_table_find_target(ci->map, ci->sector);
if (!ti)
return -EIO;
if (__process_abnormal_io(ci, ti, &r))
return r;
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0)
return r;
ci->sector += len;
ci->sector_count -= len;
return 0;
}
static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
ci->map = map;
ci->io = alloc_io(md, bio);
ci->sector = bio->bi_iter.bi_sector;
}
#define __dm_part_stat_sub(part, field, subnd) \
(part_stat_get(part, field) -= (subnd))
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
static blk_qc_t __split_and_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
struct bio flush_bio;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
ci.sector_count = 0;
error = __split_and_process_non_flush(&ci);
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
if (current->bio_list && ci.sector_count && !error) {
/*
* Remainder must be passed to submit_bio_noacct()
* so that it gets handled *after* bios already submitted
* have been completely processed.
* We take a clone of the original to store in
* ci.io->orig_bio to be used by end_io_acct() and
* for dec_pending to use for completion handling.
*/
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
/*
* Adjust IO stats for each split, otherwise upon queue
* reentry there will be redundant IO accounting.
* NOTE: this is a stop-gap fix, a proper fix involves
* significant refactoring of DM core's bio splitting
* (by eliminating DM's splitting and just using bio_split)
*/
part_stat_lock();
__dm_part_stat_sub(&dm_disk(md)->part0,
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
part_stat_unlock();
bio_chain(b, bio);
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
ret = submit_bio_noacct(bio);
break;
}
}
}
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
return ret;
}
/*
* Optimized variant of __split_and_process_bio that leverages the
* fact that targets that use it do _not_ have a need to split bios.
*/
static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
struct bio *bio, struct dm_target *ti)
{
struct clone_info ci;
blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
struct bio flush_bio;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
if (__process_abnormal_io(&ci, ti, &error))
goto out;
tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
ret = __clone_and_map_simple_bio(&ci, tio, NULL);
}
out:
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
return ret;
}
static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
{
unsigned len, sector_count;
sector_count = bio_sectors(*bio);
len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
if (sector_count > len) {
struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
bio_chain(split, *bio);
trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
submit_bio_noacct(*bio);
*bio = split;
}
}
static blk_qc_t dm_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
blk_qc_t ret = BLK_QC_T_NONE;
struct dm_target *ti = md->immutable_target;
if (unlikely(!map)) {
bio_io_error(bio);
return ret;
}
if (!ti) {
ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
if (unlikely(!ti)) {
bio_io_error(bio);
return ret;
}
}
/*
* If in ->queue_bio we need to use blk_queue_split(), otherwise
* queue_limits for abnormal requests (e.g. discard, writesame, etc)
* won't be imposed.
*/
if (current->bio_list) {
if (is_abnormal_io(bio))
blk_queue_split(&bio);
else
dm_queue_split(md, ti, &bio);
}
if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
return __process_bio(md, map, bio, ti);
else
return __split_and_process_bio(md, map, bio);
}
static blk_qc_t dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
/*
* We are called with a live reference on q_usage_counter, but
* that one will be released as soon as we return. Grab an
* extra one as blk_mq_submit_bio expects to be able to consume
* a reference (which lives until the request is freed in case a
* request is allocated).
*/
percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
return blk_mq_submit_bio(bio);
}
map = dm_get_live_table(md, &srcu_idx);
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
if (!(bio->bi_opf & REQ_RAHEAD))
queue_io(md, bio);
else
bio_io_error(bio);
return ret;
}
ret = dm_process_bio(md, map, bio);
dm_put_live_table(md, srcu_idx);
return ret;
}
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
idr_remove(&_minor_idr, minor);
spin_unlock(&_minor_lock);
}
/*
* See if the device with a specific minor # is free.
*/
static int specific_minor(int minor)
{
int r;
if (minor >= (1 << MINORBITS))
return -EINVAL;
idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
spin_unlock(&_minor_lock);
idr_preload_end();
if (r < 0)
return r == -ENOSPC ? -EBUSY : r;
return 0;
}
static int next_free_minor(int *minor)
{
int r;
idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
spin_unlock(&_minor_lock);
idr_preload_end();
if (r < 0)
return r;
*minor = r;
return 0;
}
static const struct block_device_operations dm_blk_dops;
static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
destroy_workqueue(md->wq);
bioset_exit(&md->bs);
bioset_exit(&md->io_bs);
if (md->dax_dev) {
kill_dax(md->dax_dev);
put_dax(md->dax_dev);
md->dax_dev = NULL;
}
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
del_gendisk(md->disk);
put_disk(md->disk);
}
if (md->queue)
blk_cleanup_queue(md->queue);
cleanup_srcu_struct(&md->io_barrier);
if (md->bdev) {
bdput(md->bdev);
md->bdev = NULL;
}
mutex_destroy(&md->suspend_lock);
mutex_destroy(&md->type_lock);
mutex_destroy(&md->table_devices_lock);
dm_mq_cleanup_mapped_device(md);
}
/*
* Allocate and initialise a blank device with a given minor.
*/
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
struct mapped_device *md;
void *old_md;
md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
}
if (!try_module_get(THIS_MODULE))
goto bad_module_get;
/* get a minor number for the dev */
if (minor == DM_ANY_MINOR)
r = next_free_minor(&minor);
else
r = specific_minor(minor);
if (r < 0)
goto bad_minor;
r = init_srcu_struct(&md->io_barrier);
if (r < 0)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
mutex_init(&md->table_devices_lock);
spin_lock_init(&md->deferred_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
atomic_set(&md->event_nr, 0);
atomic_set(&md->uevent_seq, 0);
INIT_LIST_HEAD(&md->uevent_list);
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
/*
* default to bio-based until DM table is loaded and md->type
* established. If request-based table is loaded: blk-mq will
* override accordingly.
*/
md->queue = blk_alloc_queue(numa_node_id);
if (!md->queue)
goto bad;
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
goto bad;
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
md->dax_dev = alloc_dax(md, md->disk->disk_name,
&dm_dax_ops, 0);
if (IS_ERR(md->dax_dev))
goto bad;
}
add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
if (!md->wq)
goto bad;
md->bdev = bdget_disk(md->disk, 0);
if (!md->bdev)
goto bad;
dm_stats_init(&md->stats);
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
spin_unlock(&_minor_lock);
BUG_ON(old_md != MINOR_ALLOCED);
return md;
bad:
cleanup_mapped_device(md);
bad_io_barrier:
free_minor(minor);
bad_minor:
module_put(THIS_MODULE);
bad_module_get:
kvfree(md);
return NULL;
}
static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
cleanup_mapped_device(md);
free_table_devices(&md->table_devices);
dm_stats_cleanup(&md->stats);
free_minor(minor);
module_put(THIS_MODULE);
kvfree(md);
}
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
int ret = 0;
if (dm_table_bio_based(t)) {
/*
* The md may already have mempools that need changing.
* If so, reload bioset because front_pad may have changed
* because a different table was loaded.
*/
bioset_exit(&md->bs);
bioset_exit(&md->io_bs);
} else if (bioset_initialized(&md->bs)) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
* Note for future: If you are to reload bioset,
* prep-ed requests in the queue may refer
* to bio from the old bioset, so you must walk
* through the queue to unprep.
*/
goto out;
}
BUG_ON(!p ||
bioset_initialized(&md->bs) ||
bioset_initialized(&md->io_bs));
ret = bioset_init_from_src(&md->bs, &p->bs);
if (ret)
goto out;
ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
if (ret)
bioset_exit(&md->bs);
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
return ret;
}
/*
* Bind a table to the device.
*/
static void event_callback(void *context)
{
unsigned long flags;
LIST_HEAD(uevents);
struct mapped_device *md = (struct mapped_device *) context;
spin_lock_irqsave(&md->uevent_lock, flags);
list_splice_init(&md->uevent_list, &uevents);
spin_unlock_irqrestore(&md->uevent_lock, flags);
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
dm_issue_global_event();
}
/*
* Protected by md->suspend_lock obtained by dm_swap_table().
*/
static void __set_size(struct mapped_device *md, sector_t size)
{
lockdep_assert_held(&md->suspend_lock);
set_capacity(md->disk, size);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
}
/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
struct request_queue *q = md->queue;
bool request_based = dm_table_request_based(t);
sector_t size;
int ret;
lockdep_assert_held(&md->suspend_lock);
size = dm_table_get_size(t);
/*
* Wipe any geometry if the size of the table changed.
*/
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
__set_size(md, size);
dm_table_event_callback(t, event_callback, md);
/*
* The queue hasn't been stopped yet, if the old table type wasn't
* for request-based during suspension. So stop it to prevent
* I/O mapping before resume.
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
if (request_based)
dm_stop_queue(q);
if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
/*
* Leverage the fact that request-based DM targets and
* NVMe bio based targets are immutable singletons
* - used to optimize both dm_request_fn and dm_mq_queue_rq;
* and __process_bio.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
ret = __bind_mempools(md, t);
if (ret) {
old_map = ERR_PTR(ret);
goto out;
}
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
md->immutable_target_type = dm_table_get_immutable_target_type(t);
dm_table_set_restrictions(t, q, limits);
if (old_map)
dm_sync_table(md);
out:
return old_map;
}
/*
* Returns unbound table for the caller to free.
*/
static struct dm_table *__unbind(struct mapped_device *md)
{
struct dm_table *map = rcu_dereference_protected(md->map, 1);
if (!map)
return NULL;
dm_table_event_callback(map, NULL, NULL);
RCU_INIT_POINTER(md->map, NULL);
dm_sync_table(md);
return map;
}
/*
* Constructor for a new device.
*/
int dm_create(int minor, struct mapped_device **result)
{
int r;
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
r = dm_sysfs_init(md);
if (r) {
free_dev(md);
return r;
}
*result = md;
return 0;
}
/*
* Functions to manage md->type.
* All are required to hold md->type_lock.
*/
void dm_lock_md_type(struct mapped_device *md)
{
mutex_lock(&md->type_lock);
}
void dm_unlock_md_type(struct mapped_device *md)
{
mutex_unlock(&md->type_lock);
}
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{
BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type;
}
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{
return md->type;
}
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
return md->immutable_target_type;
}
/*
* The queue_limits are only valid as long as you have a reference
* count on 'md'.
*/
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
BUG_ON(!atomic_read(&md->holders));
return &md->queue->limits;
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
/*
* Setup the DM device's queue based on md's type
*/
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
struct queue_limits limits;
enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
case DM_TYPE_REQUEST_BASED:
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
}
r = dm_calculate_queue_limits(t, &limits);
if (r) {
DMERR("Cannot calculate initial queue limits");
return r;
}
dm_table_set_restrictions(t, md->queue, &limits);
blk_register_queue(md->disk);
return 0;
}
struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md;
unsigned minor = MINOR(dev);
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
out:
spin_unlock(&_minor_lock);
return md;
}
EXPORT_SYMBOL_GPL(dm_get_md);
void *dm_get_mdptr(struct mapped_device *md)
{
return md->interface_ptr;
}
void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
md->interface_ptr = ptr;
}
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
int dm_hold(struct mapped_device *md)
{
spin_lock(&_minor_lock);
if (test_bit(DMF_FREEING, &md->flags)) {
spin_unlock(&_minor_lock);
return -EBUSY;
}
dm_get(md);
spin_unlock(&_minor_lock);
return 0;
}
EXPORT_SYMBOL_GPL(dm_hold);
const char *dm_device_name(struct mapped_device *md)
{
return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct dm_table *map;
int srcu_idx;
might_sleep();
spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
blk_set_queue_dying(md->queue);
/*
* Take suspend_lock so that presuspend and postsuspend methods
* do not race with internal suspend.
*/
mutex_lock(&md->suspend_lock);
map = dm_get_live_table(md, &srcu_idx);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
mutex_unlock(&md->suspend_lock);
/*
* Rare, but there may be I/O requests still going to complete,
* for example. Wait for all references to disappear.
* No one should increment the reference count of the mapped_device,
* after the mapped_device state becomes DMF_FREEING.
*/
if (wait)
while (atomic_read(&md->holders))
msleep(1);
else if (atomic_read(&md->holders))
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
dm_sysfs_exit(md);
dm_table_destroy(__unbind(md));
free_dev(md);
}
void dm_destroy(struct mapped_device *md)
{
__dm_destroy(md, true);
}
void dm_destroy_immediate(struct mapped_device *md)
{
__dm_destroy(md, false);
}
void dm_put(struct mapped_device *md)
{
atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
static bool md_in_flight_bios(struct mapped_device *md)
{
int cpu;
struct hd_struct *part = &dm_disk(md)->part0;
long sum = 0;
for_each_possible_cpu(cpu) {
sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
}
return sum != 0;
}
static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
{
int r = 0;
DEFINE_WAIT(wait);
while (true) {
prepare_to_wait(&md->wait, &wait, task_state);
if (!md_in_flight_bios(md))
break;
if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
io_schedule();
}
finish_wait(&md->wait, &wait);
return r;
}
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
{
int r = 0;
if (!queue_is_mq(md->queue))
return dm_wait_for_bios_completion(md, task_state);
while (true) {
if (!blk_mq_queue_inflight(md->queue))
break;
if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
msleep(5);
}
return r;
}
/*
* Process the deferred bios
*/
static void dm_wq_work(struct work_struct *work)
{
struct mapped_device *md = container_of(work, struct mapped_device,
work);
struct bio *c;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
spin_lock_irq(&md->deferred_lock);
c = bio_list_pop(&md->deferred);
spin_unlock_irq(&md->deferred_lock);
if (!c)
break;
if (dm_request_based(md))
(void) submit_bio_noacct(c);
else
(void) dm_process_bio(md, map, c);
}
dm_put_live_table(md, srcu_idx);
}
static void dm_queue_flush(struct mapped_device *md)
{
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
smp_mb__after_atomic();
queue_work(md->wq, &md->work);
}
/*
* Swap in a new table, returning the old one for the caller to destroy.
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
mutex_lock(&md->suspend_lock);
/* device must be suspended */
if (!dm_suspended_md(md))
goto out;
/*
* If the new table has no data devices, retain the existing limits.
* This helps multipath with queue_if_no_path if all paths disappear,
* then new I/O is queued based on these limits, and then some paths
* reappear.
*/
if (dm_table_has_no_data_devices(table)) {
live_map = dm_get_live_table_fast(md);
if (live_map)
limits = md->queue->limits;
dm_put_live_table_fast(md);
}
if (!live_map) {
r = dm_calculate_queue_limits(table, &limits);
if (r) {
map = ERR_PTR(r);
goto out;
}
}
map = __bind(md, table, &limits);
dm_issue_global_event();
out:
mutex_unlock(&md->suspend_lock);
return map;
}
/*
* Functions to lock and unlock any filesystem running on the
* device.
*/
static int lock_fs(struct mapped_device *md)
{
int r;
WARN_ON(md->frozen_sb);
md->frozen_sb = freeze_bdev(md->bdev);
if (IS_ERR(md->frozen_sb)) {
r = PTR_ERR(md->frozen_sb);
md->frozen_sb = NULL;
return r;
}
set_bit(DMF_FROZEN, &md->flags);
return 0;
}
static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
thaw_bdev(md->bdev, md->frozen_sb);
md->frozen_sb = NULL;
clear_bit(DMF_FROZEN, &md->flags);
}
/*
* @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
* @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
* @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
*
* If __dm_suspend returns 0, the device is completely quiescent
* now. There is no request-processing activity. All new requests
* are being added to md->deferred list.
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
unsigned suspend_flags, long task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
int r;
lockdep_assert_held(&md->suspend_lock);
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
* This flag is cleared before dm_suspend returns.
*/
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
else
DMDEBUG("%s: suspending with flush", dm_device_name(md));
/*
* This gets reverted if there's an error later and the targets
* provide the .presuspend_undo hook.
*/
dm_table_presuspend_targets(map);
/*
* Flush I/O to the device.
* Any I/O submitted after lock_fs() may not be flushed.
* noflush takes precedence over do_lockfs.
* (lock_fs() flushes I/Os and waits for them to complete.)
*/
if (!noflush && do_lockfs) {
r = lock_fs(md);
if (r) {
dm_table_presuspend_undo_targets(map);
return r;
}
}
/*
* Here we must make sure that no processes are submitting requests
* to target drivers i.e. no one may be executing
* __split_and_process_bio. This is called from dm_request and
* dm_wq_work.
*
* To get all processes out of __split_and_process_bio in dm_request,
* we take the write lock. To prevent any process from reentering
* __split_and_process_bio from dm_request and quiesce the thread
* (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
* flush_workqueue(md->wq).
*/
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
if (map)
synchronize_srcu(&md->io_barrier);
/*
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
if (dm_request_based(md))
dm_stop_queue(md->queue);
flush_workqueue(md->wq);
/*
* At this point no more requests are entering target request routines.
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
if (noflush)
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
if (map)
synchronize_srcu(&md->io_barrier);
/* were we interrupted ? */
if (r < 0) {
dm_queue_flush(md);
if (dm_request_based(md))
dm_start_queue(md->queue);
unlock_fs(md);
dm_table_presuspend_undo_targets(map);
/* pushback list is already flushed, so skip flush */
}
return r;
}
/*
* We need to be able to change a mapping table under a mounted
* filesystem. For example we might want to move some data in
* the background. Before the table can be swapped with
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
/*
* Suspend mechanism in request-based dm.
*
* 1. Flush all I/Os by lock_fs() if needed.
* 2. Stop dispatching any I/O by stopping the request_queue.
* 3. Wait for all in-flight I/Os to be completed or requeued.
*
* To abort suspend, start the request_queue.
*/
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
int r = 0;
retry:
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (dm_suspended_md(md)) {
r = -EINVAL;
goto out_unlock;
}
if (dm_suspended_internally_md(md)) {
/* already internally suspended, wait for internal resume */
mutex_unlock(&md->suspend_lock);
r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
if (r)
return r;
goto retry;
}
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
if (r)
goto out_unlock;
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
return r;
}
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
if (map) {
int r = dm_table_resume_targets(map);
if (r)
return r;
}
dm_queue_flush(md);
/*
* Flushing deferred I/Os must be done after targets are resumed
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
if (dm_request_based(md))
dm_start_queue(md->queue);
unlock_fs(md);
return 0;
}
int dm_resume(struct mapped_device *md)
{
int r;
struct dm_table *map = NULL;
retry:
r = -EINVAL;
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (!dm_suspended_md(md))
goto out;
if (dm_suspended_internally_md(md)) {
/* already internally suspended, wait for internal resume */
mutex_unlock(&md->suspend_lock);
r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
if (r)
return r;
goto retry;
}
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
if (!map || !dm_table_get_size(map))
goto out;
r = __dm_resume(md, map);
if (r)
goto out;
clear_bit(DMF_SUSPENDED, &md->flags);
out:
mutex_unlock(&md->suspend_lock);
return r;
}
/*
* Internal suspend/resume works like userspace-driven suspend. It waits
* until all bios finish and prevents issuing new bios to the target drivers.
* It may be used only from the kernel.
*/
static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
lockdep_assert_held(&md->suspend_lock);
if (md->internal_suspend_count++)
return; /* nested internal suspend */
if (dm_suspended_md(md)) {
set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
return; /* nest suspend */
}
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
/*
* Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
* supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
* would require changing .presuspend to return an error -- avoid this
* until there is a need for more elaborate variants of internal suspend.
*/
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
{
BUG_ON(!md->internal_suspend_count);
if (--md->internal_suspend_count)
return; /* resume from nested internal suspend */
if (dm_suspended_md(md))
goto done; /* resume from nested suspend */
/*
* NOTE: existing callers don't need to call dm_table_resume_targets
* (which may fail -- so best to avoid it for now by passing NULL map)
*/
(void) __dm_resume(md, NULL);
done:
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
smp_mb__after_atomic();
wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
}
void dm_internal_suspend_noflush(struct mapped_device *md)
{
mutex_lock(&md->suspend_lock);
__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
void dm_internal_resume(struct mapped_device *md)
{
mutex_lock(&md->suspend_lock);
__dm_internal_resume(md);
mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume);
/*
* Fast variants of internal suspend/resume hold md->suspend_lock,
* which prevents interaction with userspace-driven suspend.
*/
void dm_internal_suspend_fast(struct mapped_device *md)
{
mutex_lock(&md->suspend_lock);
if (dm_suspended_md(md) || dm_suspended_internally_md(md))
return;
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
synchronize_srcu(&md->io_barrier);
flush_workqueue(md->wq);
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
void dm_internal_resume_fast(struct mapped_device *md)
{
if (dm_suspended_md(md) || dm_suspended_internally_md(md))
goto done;
dm_queue_flush(md);
done:
mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
int r;
unsigned noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
noio_flag = memalloc_noio_save();
if (!cookie)
r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
action, envp);
}
memalloc_noio_restore(noio_flag);
return r;
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
return atomic_add_return(1, &md->uevent_seq);
}
uint32_t dm_get_event_nr(struct mapped_device *md)
{
return atomic_read(&md->event_nr);
}
int dm_wait_event(struct mapped_device *md, int event_nr)
{
return wait_event_interruptible(md->eventq,
(event_nr != atomic_read(&md->event_nr)));
}
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
unsigned long flags;
spin_lock_irqsave(&md->uevent_lock, flags);
list_add(elist, &md->uevent_list);
spin_unlock_irqrestore(&md->uevent_lock, flags);
}
/*
* The gendisk is only valid as long as you have a reference
* count on 'md'.
*/
struct gendisk *dm_disk(struct mapped_device *md)
{
return md->disk;
}
EXPORT_SYMBOL_GPL(dm_disk);
struct kobject *dm_kobject(struct mapped_device *md)
{
return &md->kobj_holder.kobj;
}
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
struct mapped_device *md;
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
spin_lock(&_minor_lock);
if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
out:
spin_unlock(&_minor_lock);
return md;
}
int dm_suspended_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED, &md->flags);
}
static int dm_post_suspending_md(struct mapped_device *md)
{
return test_bit(DMF_POST_SUSPENDING, &md->flags);
}
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
}
int dm_test_deferred_remove_flag(struct mapped_device *md)
{
return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
}
int dm_suspended(struct dm_target *ti)
{
return dm_suspended_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti)
{
return dm_post_suspending_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned integrity, unsigned per_io_data_size,
unsigned min_pool_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0;
unsigned int front_pad, io_front_pad;
int ret;
if (!pools)
return NULL;
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
if (ret)
goto out;
if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
goto out;
break;
case DM_TYPE_REQUEST_BASED:
pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
default:
BUG();
}
ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
if (ret)
goto out;
if (integrity && bioset_integrity_create(&pools->bs, pool_size))
goto out;
return pools;
out:
dm_free_md_mempools(pools);
return NULL;
}
void dm_free_md_mempools(struct dm_md_mempools *pools)
{
if (!pools)
return;
bioset_exit(&pools->bs);
bioset_exit(&pools->io_bs);
kfree(pools);
}
struct dm_pr {
u64 old_key;
u64 new_key;
u32 flags;
bool fail_early;
};
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
void *data)
{
struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_table *table;
struct dm_target *ti;
int ret = -ENOTTY, srcu_idx;
table = dm_get_live_table(md, &srcu_idx);
if (!table || !dm_table_get_size(table))
goto out;
/* We only support devices that have a single target */
if (dm_table_get_num_targets(table) != 1)
goto out;
ti = dm_table_get_target(table, 0);
ret = -EINVAL;
if (!ti->type->iterate_devices)
goto out;
ret = ti->type->iterate_devices(ti, fn, data);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
/*
* For register / unregister we need to manually call out to every path.
*/
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
if (!ops || !ops->pr_register)
return -EOPNOTSUPP;
return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
}
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
u32 flags)
{
struct dm_pr pr = {
.old_key = old_key,
.new_key = new_key,
.flags = flags,
.fail_early = true,
};
int ret;
ret = dm_call_pr(bdev, __dm_pr_register, &pr);
if (ret && new_key) {
/* unregister all paths if we failed to register any path */
pr.old_key = new_key;
pr.new_key = 0;
pr.flags = 0;
pr.fail_early = false;
dm_call_pr(bdev, __dm_pr_register, &pr);
}
return ret;
}
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
u32 flags)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_reserve)
r = ops->pr_reserve(bdev, key, type, flags);
else
r = -EOPNOTSUPP;
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
}
static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_release)
r = ops->pr_release(bdev, key, type);
else
r = -EOPNOTSUPP;
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
}
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_preempt)
r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
else
r = -EOPNOTSUPP;
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
}
static int dm_pr_clear(struct block_device *bdev, u64 key)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_clear)
r = ops->pr_clear(bdev, key);
else
r = -EOPNOTSUPP;
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
}
static const struct pr_ops dm_pr_ops = {
.pr_register = dm_pr_register,
.pr_reserve = dm_pr_reserve,
.pr_release = dm_pr_release,
.pr_preempt = dm_pr_preempt,
.pr_clear = dm_pr_clear,
};
static const struct block_device_operations dm_blk_dops = {
.submit_bio = dm_submit_bio,
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.report_zones = dm_blk_report_zones,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
.dax_supported = dm_dax_supported,
.copy_from_iter = dm_dax_copy_from_iter,
.copy_to_iter = dm_dax_copy_to_iter,
.zero_page_range = dm_dax_zero_page_range,
};
/*
* module hooks
*/
module_init(dm_init);
module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| penberg/linux | drivers/md/dm.c | C | gpl-2.0 | 77,041 |
#include "keycodes.h"
typedef struct {
qboolean down;
int repeats; // if > 1, it is autorepeating
char *binding;
} qkey_t;
#define MAX_EDIT_LINE 256
#define COMMAND_HISTORY 32
typedef struct {
int cursor;
int scroll;
int widthInChars;
char buffer[MAX_EDIT_LINE];
} field_t;
typedef struct keyGlobals_s
{
field_t historyEditLines[COMMAND_HISTORY];
int nextHistoryLine; // the last line in the history buffer, not masked
int historyLine; // the line being displayed from history buffer
// will be <= nextHistoryLine
field_t g_consoleField;
qboolean anykeydown;
qboolean key_overstrikeMode;
int keyDownCount;
qkey_t keys[MAX_KEYS];
} keyGlobals_t;
typedef struct
{
word upper;
word lower;
char *name;
int keynum;
bool menukey;
} keyname_t;
extern keyGlobals_t kg;
extern keyname_t keynames[MAX_KEYS];
void Field_Clear( field_t *edit );
void Field_KeyDownEvent( field_t *edit, int key );
void Field_Draw( field_t *edit, int x, int y, int width, qboolean showCursor );
void Field_BigDraw( field_t *edit, int x, int y, int width, qboolean showCursor );
extern field_t chatField;
void Key_WriteBindings( fileHandle_t f );
void Key_SetBinding( int keynum, const char *binding );
char *Key_GetBinding( int keynum );
qboolean Key_IsDown( int keynum );
qboolean Key_GetOverstrikeMode( void );
void Key_SetOverstrikeMode( qboolean state );
void Key_ClearStates( void );
| aarongraham9/jediOutcast | code/client/keys.h | C | gpl-2.0 | 1,485 |
; Project name : Assembly Library
; Description : Functions for initializing menu system.
;
; XTIDE Universal BIOS and Associated Tools
; Copyright (C) 2009-2010 by Tomi Tilli, 2011-2013 by XTIDE Universal BIOS Team.
;
; This program is free software; you can redistribute it and/or modify
; it under the terms of the GNU General Public License as published by
; the Free Software Foundation; either version 2 of the License, or
; (at your option) any later version.
;
; This program is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
; GNU General Public License for more details.
; Visit http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
;
; Section containing code
SECTION .text
;--------------------------------------------------------------------
; MenuInit_DisplayMenuWithHandlerInBXandUserDataInDXAX
; Parameters
; DX:AX: User specified data
; BX: Menu event handler
; Returns:
; AX: Index of selected item or NO_ITEM_SELECTED
; Corrupts registers:
; All except segments
;--------------------------------------------------------------------
ALIGN MENU_JUMP_ALIGN
MenuInit_DisplayMenuWithHandlerInBXandUserDataInDXAX:
push es
push ds
xchg cx, ax ; Backup user data
CALL_DISPLAY_LIBRARY PushDisplayContext
; Create MENU struct to stack
mov ax, MENU_size
eENTER_STRUCT ax
xchg ax, cx ; Restore user data to AX
call Memory_ZeroSSBPwithSizeInCX
; Display menu
call MenuInit_EnterMenuWithHandlerInBXandUserDataInDXAX
; Get menu selection and destroy menu variables from stack
mov dx, [bp+MENUINIT.wHighlightedItem]
eLEAVE_STRUCT MENU_size
CALL_DISPLAY_LIBRARY PopDisplayContext
xchg ax, dx ; Return highlighted item in AX
pop ds
pop es
ret
;--------------------------------------------------------------------
; EnterMenuWithHandlerInBXandUserDataInDXAX
; Parameters
; DX:AX: User specified data
; BX: Menu event handler
; SS:BP: Ptr to MENU
; Returns:
; Nothing
; Corrupts registers:
; All, except SS:BP
;--------------------------------------------------------------------
ALIGN MENU_JUMP_ALIGN
MenuInit_EnterMenuWithHandlerInBXandUserDataInDXAX:
mov [bp+MENU.fnEventHandler], bx
mov [bp+MENU.dwUserData], ax
mov [bp+MENU.dwUserData+2], dx
mov ax, CURSOR_HIDDEN
CALL_DISPLAY_LIBRARY SetCursorShapeFromAX
call MenuEvent_InitializeMenuinit ; User initialization
%ifndef USE_186
call MenuInit_RefreshMenuWindow
jmp MenuLoop_Enter
%else
push MenuLoop_Enter
; Fall to MenuInit_RefreshMenuWindow
%endif
;--------------------------------------------------------------------
; MenuInit_RefreshMenuWindow
; Parameters
; SS:BP: Ptr to MENU
; Returns:
; Nothing
; Corrupts registers:
; AX, BX, CX, DX, SI, DI
;--------------------------------------------------------------------
ALIGN MENU_JUMP_ALIGN
MenuInit_RefreshMenuWindow:
call MenuBorders_RefreshAll ; Draw borders
call MenuText_RefreshTitle ; Draw title strings
call MenuText_RefreshAllItems ; Draw item strings
jmp MenuText_RefreshInformation ; Draw information strings
;--------------------------------------------------------------------
; MenuInit_CloseMenuIfExitEventAllows
; Parameters
; SS:BP: Ptr to MENU
; Returns:
; Nothing
; Corrupts registers:
; AX, BX, DX
;--------------------------------------------------------------------
%ifndef EXCLUDE_FROM_XTIDE_UNIVERSAL_BIOS
ALIGN MENU_JUMP_ALIGN
MenuInit_CloseMenuIfExitEventAllows:
call MenuEvent_ExitMenu
jc SHORT MenuInit_CloseMenuWindow
ret
%endif
;--------------------------------------------------------------------
; MenuInit_CloseMenuWindow
; Parameters
; SS:BP: Ptr to MENU
; Returns:
; Nothing
; Corrupts registers:
; Nothing
;--------------------------------------------------------------------
ALIGN MENU_JUMP_ALIGN
MenuInit_CloseMenuWindow:
or BYTE [bp+MENU.bFlags], FLG_MENU_EXIT
ret
%ifndef EXCLUDE_FROM_XTIDE_UNIVERSAL_BIOS
;--------------------------------------------------------------------
; MenuInit_HighlightItemFromAX
; Parameters
; AX: Item to highlight
; SS:BP: Ptr to MENU
; Returns:
; Nothing
; Corrupts registers:
; AX, BX, CX, DX, SI, DI
;--------------------------------------------------------------------
ALIGN MENU_JUMP_ALIGN
MenuInit_HighlightItemFromAX:
sub ax, [bp+MENUINIT.wHighlightedItem]
jmp MenuScrollbars_MoveHighlightedItemByAX
;--------------------------------------------------------------------
; MenuInit_GetHighlightedItemToAX
; Parameters
; SS:BP: Ptr to MENU
; Returns:
; AX: Index of highlighted item or NO_ITEM_HIGHLIGHTED
; Corrupts registers:
; Nothing
;--------------------------------------------------------------------
ALIGN MENU_JUMP_ALIGN
MenuInit_GetHighlightedItemToAX:
mov ax, [bp+MENUINIT.wHighlightedItem]
ret
%endif ; EXCLUDE_FROM_XTIDE_UNIVERSAL_BIOS
;--------------------------------------------------------------------
; MenuInit_SetTitleHeightFromAL
; MenuInit_SetInformationHeightFromAL
; MenuInit_SetTotalItemsFromAX
; Parameters
; AX/AL: Parameter
; SS:BP: Ptr to MENU
; Returns:
; Nothing
; Corrupts registers:
; Nothing
;--------------------------------------------------------------------
%ifndef EXCLUDE_FROM_XTIDE_UNIVERSAL_BIOS
ALIGN MENU_JUMP_ALIGN
MenuInit_SetTitleHeightFromAL:
mov [bp+MENUINIT.bTitleLines], al
ret
ALIGN MENU_JUMP_ALIGN
MenuInit_SetInformationHeightFromAL:
mov [bp+MENUINIT.bInfoLines], al
ret
ALIGN MENU_JUMP_ALIGN
MenuInit_SetTotalItemsFromAX:
mov [bp+MENUINIT.wItems], ax
ret
%endif
;--------------------------------------------------------------------
; MenuInit_SetUserDataFromDSSI
; MenuInit_GetUserDataToDSSI
; Parameters
; DS:SI: User data (MenuInit_SetUserDataFromDSSI)
; SS:BP: Ptr to MENU
; Returns:
; DS:SI: User data (MenuInit_GetUserDataToDSSI)
; Corrupts registers:
; Nothing
;--------------------------------------------------------------------
%ifndef EXCLUDE_FROM_XTIDE_UNIVERSAL_BIOS
ALIGN MENU_JUMP_ALIGN
MenuInit_SetUserDataFromDSSI:
mov [bp+MENU.dwUserData], si
mov [bp+MENU.dwUserData+2], ds
ret
ALIGN MENU_JUMP_ALIGN
MenuInit_GetUserDataToDSSI:
lds si, [bp+MENU.dwUserData]
ret
%endif
| orinocoz/xtideuniversalbios | Assembly_Library/Src/Menu/MenuInit.asm | Assembly | gpl-2.0 | 6,427 |
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm-core.h"
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>
#define DM_MSG_PREFIX "table"
#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
struct dm_table {
struct mapped_device *md;
enum dm_queue_mode type;
/* btree table */
unsigned int depth;
unsigned int counts[MAX_DEPTH]; /* in nodes */
sector_t *index[MAX_DEPTH];
unsigned int num_targets;
unsigned int num_allocated;
sector_t *highs;
struct dm_target *targets;
struct target_type *immutable_target_type;
bool integrity_supported:1;
bool singleton:1;
bool all_blk_mq:1;
unsigned integrity_added:1;
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
struct dm_md_mempools *mempools;
struct list_head target_callbacks;
};
/*
* Similar to ceiling(log_size(n))
*/
static unsigned int int_log(unsigned int n, unsigned int base)
{
int result = 0;
while (n > 1) {
n = dm_div_up(n, base);
result++;
}
return result;
}
/*
* Calculate the index of the child node of the n'th node k'th key.
*/
static inline unsigned int get_child(unsigned int n, unsigned int k)
{
return (n * CHILDREN_PER_NODE) + k;
}
/*
* Return the n'th node of level l from table t.
*/
static inline sector_t *get_node(struct dm_table *t,
unsigned int l, unsigned int n)
{
return t->index[l] + (n * KEYS_PER_NODE);
}
/*
* Return the highest key that you could lookup from the n'th
* node on level l of the btree.
*/
static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
{
for (; l < t->depth - 1; l++)
n = get_child(n, CHILDREN_PER_NODE - 1);
if (n >= t->counts[l])
return (sector_t) - 1;
return get_node(t, l, n)[KEYS_PER_NODE - 1];
}
/*
* Fills in a level of the btree based on the highs of the level
* below it.
*/
static int setup_btree_index(unsigned int l, struct dm_table *t)
{
unsigned int n, k;
sector_t *node;
for (n = 0U; n < t->counts[l]; n++) {
node = get_node(t, l, n);
for (k = 0U; k < KEYS_PER_NODE; k++)
node[k] = high(t, l + 1, get_child(n, k));
}
return 0;
}
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
{
unsigned long size;
void *addr;
/*
* Check that we're not going to overflow.
*/
if (nmemb > (ULONG_MAX / elem_size))
return NULL;
size = nmemb * elem_size;
addr = vzalloc(size);
return addr;
}
EXPORT_SYMBOL(dm_vcalloc);
/*
* highs, and targets are managed as dynamic arrays during a
* table load.
*/
static int alloc_targets(struct dm_table *t, unsigned int num)
{
sector_t *n_highs;
struct dm_target *n_targets;
/*
* Allocate both the target array and offset array at once.
* Append an empty entry to catch sectors beyond the end of
* the device.
*/
n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
sizeof(sector_t));
if (!n_highs)
return -ENOMEM;
n_targets = (struct dm_target *) (n_highs + num);
memset(n_highs, -1, sizeof(*n_highs) * num);
vfree(t->highs);
t->num_allocated = num;
t->highs = n_highs;
t->targets = n_targets;
return 0;
}
int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
INIT_LIST_HEAD(&t->target_callbacks);
if (!num_targets)
num_targets = KEYS_PER_NODE;
num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
if (!num_targets) {
kfree(t);
return -ENOMEM;
}
if (alloc_targets(t, num_targets)) {
kfree(t);
return -ENOMEM;
}
t->type = DM_TYPE_NONE;
t->mode = mode;
t->md = md;
*result = t;
return 0;
}
static void free_devices(struct list_head *devices, struct mapped_device *md)
{
struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
struct dm_dev_internal *dd =
list_entry(tmp, struct dm_dev_internal, list);
DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
dm_device_name(md), dd->dm_dev->name);
dm_put_table_device(md, dd->dm_dev);
kfree(dd);
}
}
void dm_table_destroy(struct dm_table *t)
{
unsigned int i;
if (!t)
return;
/* free the indexes */
if (t->depth >= 2)
vfree(t->index[t->depth - 2]);
/* free the targets */
for (i = 0; i < t->num_targets; i++) {
struct dm_target *tgt = t->targets + i;
if (tgt->type->dtr)
tgt->type->dtr(tgt);
dm_put_target_type(tgt->type);
}
vfree(t->highs);
/* free the device list */
free_devices(&t->devices, t->md);
dm_free_md_mempools(t->mempools);
kfree(t);
}
/*
* See if we've already got a device in the list.
*/
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
if (dd->dm_dev->bdev->bd_dev == dev)
return dd;
return NULL;
}
/*
* If possible, this checks an area of a destination device is invalid.
*/
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
unsigned short logical_block_size_sectors =
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
/*
* Some devices exist without request functions,
* such as loop devices not yet bound to backing files.
* Forbid the use of such devices.
*/
q = bdev_get_queue(bdev);
if (!q || !q->make_request_fn) {
DMWARN("%s: %s is not yet initialised: "
"start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
(unsigned long long)start,
(unsigned long long)len,
(unsigned long long)dev_size);
return 1;
}
if (!dev_size)
return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
DMWARN("%s: %s too small for target: "
"start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
(unsigned long long)start,
(unsigned long long)len,
(unsigned long long)dev_size);
return 1;
}
/*
* If the target is mapped to zoned block device(s), check
* that the zones are not partially mapped.
*/
if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
unsigned int zone_sectors = bdev_zone_sectors(bdev);
if (start & (zone_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
dm_device_name(ti->table->md),
(unsigned long long)start,
zone_sectors, bdevname(bdev, b));
return 1;
}
/*
* Note: The last zone of a zoned block device may be smaller
* than other zones. So for a target mapping the end of a
* zoned block device with such a zone, len would not be zone
* aligned. We do not allow such last smaller zone to be part
* of the mapping here to ensure that mappings with multiple
* devices do not end up with a smaller zone in the middle of
* the sector range.
*/
if (len & (zone_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
dm_device_name(ti->table->md),
(unsigned long long)len,
zone_sectors, bdevname(bdev, b));
return 1;
}
}
if (logical_block_size_sectors <= 1)
return 0;
if (start & (logical_block_size_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w "
"logical block size %u of %s",
dm_device_name(ti->table->md),
(unsigned long long)start,
limits->logical_block_size, bdevname(bdev, b));
return 1;
}
if (len & (logical_block_size_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w "
"logical block size %u of %s",
dm_device_name(ti->table->md),
(unsigned long long)len,
limits->logical_block_size, bdevname(bdev, b));
return 1;
}
return 0;
}
/*
* This upgrades the mode on an already open dm_dev, being
* careful to leave things as they were if we fail to reopen the
* device and not to touch the existing bdev field in case
* it is accessed concurrently inside dm_table_any_congested().
*/
static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
struct mapped_device *md)
{
int r;
struct dm_dev *old_dev, *new_dev;
old_dev = dd->dm_dev;
r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
dd->dm_dev->mode | new_mode, &new_dev);
if (r)
return r;
dd->dm_dev = new_dev;
dm_put_table_device(md, old_dev);
return 0;
}
/*
* Convert the path to a device
*/
dev_t dm_get_dev_t(const char *path)
{
dev_t dev;
struct block_device *bdev;
bdev = lookup_bdev(path);
if (IS_ERR(bdev))
dev = name_to_dev_t(path);
else {
dev = bdev->bd_dev;
bdput(bdev);
}
return dev;
}
EXPORT_SYMBOL_GPL(dm_get_dev_t);
/*
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
struct dm_dev **result)
{
int r;
dev_t dev;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
BUG_ON(!t);
dev = dm_get_dev_t(path);
if (!dev)
return -ENODEV;
dd = find_device(&t->devices, dev);
if (!dd) {
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
kfree(dd);
return r;
}
refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
refcount_inc(&dd->count);
}
*result = dd->dm_dev;
return 0;
}
EXPORT_SYMBOL(dm_get_device);
static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
char b[BDEVNAME_SIZE];
if (unlikely(!q)) {
DMWARN("%s: Cannot set limits for nonexistent device %s",
dm_device_name(ti->table->md), bdevname(bdev, b));
return 0;
}
if (bdev_stack_limits(limits, bdev, start) < 0)
DMWARN("%s: adding target device %s caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
q->limits.physical_block_size,
q->limits.logical_block_size,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
limits->zoned = blk_queue_zoned_model(q);
return 0;
}
/*
* Decrement a device's use count and remove it if necessary.
*/
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
int found = 0;
struct list_head *devices = &ti->table->devices;
struct dm_dev_internal *dd;
list_for_each_entry(dd, devices, list) {
if (dd->dm_dev == d) {
found = 1;
break;
}
}
if (!found) {
DMWARN("%s: device %s not in table devices list",
dm_device_name(ti->table->md), d->name);
return;
}
if (refcount_dec_and_test(&dd->count)) {
dm_put_table_device(ti->table->md, d);
list_del(&dd->list);
kfree(dd);
}
}
EXPORT_SYMBOL(dm_put_device);
/*
* Checks to see if the target joins onto the end of the table.
*/
static int adjoin(struct dm_table *table, struct dm_target *ti)
{
struct dm_target *prev;
if (!table->num_targets)
return !ti->begin;
prev = &table->targets[table->num_targets - 1];
return (ti->begin == (prev->begin + prev->len));
}
/*
* Used to dynamically allocate the arg array.
*
* We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
* process messages even if some device is suspended. These messages have a
* small fixed number of arguments.
*
* On the other hand, dm-switch needs to process bulk data using messages and
* excessive use of GFP_NOIO could cause trouble.
*/
static char **realloc_argv(unsigned *array_size, char **old_argv)
{
char **argv;
unsigned new_size;
gfp_t gfp;
if (*array_size) {
new_size = *array_size * 2;
gfp = GFP_KERNEL;
} else {
new_size = 8;
gfp = GFP_NOIO;
}
argv = kmalloc(new_size * sizeof(*argv), gfp);
if (argv) {
memcpy(argv, old_argv, *array_size * sizeof(*argv));
*array_size = new_size;
}
kfree(old_argv);
return argv;
}
/*
* Destructively splits up the argument list to pass to ctr.
*/
int dm_split_args(int *argc, char ***argvp, char *input)
{
char *start, *end = input, *out, **argv = NULL;
unsigned array_size = 0;
*argc = 0;
if (!input) {
*argvp = NULL;
return 0;
}
argv = realloc_argv(&array_size, argv);
if (!argv)
return -ENOMEM;
while (1) {
/* Skip whitespace */
start = skip_spaces(end);
if (!*start)
break; /* success, we hit the end */
/* 'out' is used to remove any back-quotes */
end = out = start;
while (*end) {
/* Everything apart from '\0' can be quoted */
if (*end == '\\' && *(end + 1)) {
*out++ = *(end + 1);
end += 2;
continue;
}
if (isspace(*end))
break; /* end of token */
*out++ = *end++;
}
/* have we already filled the array ? */
if ((*argc + 1) > array_size) {
argv = realloc_argv(&array_size, argv);
if (!argv)
return -ENOMEM;
}
/* we know this is whitespace */
if (*end)
end++;
/* terminate the string and put it in the array */
*out = '\0';
argv[*argc] = start;
(*argc)++;
}
*argvp = argv;
return 0;
}
/*
* Impose necessary and sufficient conditions on a devices's table such
* that any incoming bio which respects its logical_block_size can be
* processed successfully. If it falls across the boundary between
* two or more targets, the size of each piece it gets split into must
* be compatible with the logical_block_size of the target processing it.
*/
static int validate_hardware_logical_block_alignment(struct dm_table *table,
struct queue_limits *limits)
{
/*
* This function uses arithmetic modulo the logical_block_size
* (in units of 512-byte sectors).
*/
unsigned short device_logical_block_size_sects =
limits->logical_block_size >> SECTOR_SHIFT;
/*
* Offset of the start of the next table entry, mod logical_block_size.
*/
unsigned short next_target_start = 0;
/*
* Given an aligned bio that extends beyond the end of a
* target, how many sectors must the next target handle?
*/
unsigned short remaining = 0;
struct dm_target *uninitialized_var(ti);
struct queue_limits ti_limits;
unsigned i;
/*
* Check each entry in the table in turn.
*/
for (i = 0; i < dm_table_get_num_targets(table); i++) {
ti = dm_table_get_target(table, i);
blk_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */
if (ti->type->iterate_devices)
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
/*
* If the remaining sectors fall entirely within this
* table entry are they compatible with its logical_block_size?
*/
if (remaining < ti->len &&
remaining & ((ti_limits.logical_block_size >>
SECTOR_SHIFT) - 1))
break; /* Error */
next_target_start =
(unsigned short) ((next_target_start + ti->len) &
(device_logical_block_size_sects - 1));
remaining = next_target_start ?
device_logical_block_size_sects - next_target_start : 0;
}
if (remaining) {
DMWARN("%s: table line %u (start sect %llu len %llu) "
"not aligned to h/w logical block size %u",
dm_device_name(table->md), i,
(unsigned long long) ti->begin,
(unsigned long long) ti->len,
limits->logical_block_size);
return -EINVAL;
}
return 0;
}
int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params)
{
int r = -EINVAL, argc;
char **argv;
struct dm_target *tgt;
if (t->singleton) {
DMERR("%s: target type %s must appear alone in table",
dm_device_name(t->md), t->targets->type->name);
return -EINVAL;
}
BUG_ON(t->num_targets >= t->num_allocated);
tgt = t->targets + t->num_targets;
memset(tgt, 0, sizeof(*tgt));
if (!len) {
DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
tgt->error = "singleton target type must appear alone in table";
goto bad;
}
t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
tgt->error = "target type may not be included in a read-only table";
goto bad;
}
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
tgt->error = "immutable target type cannot be mixed with other target types";
goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
tgt->error = "immutable target type cannot be mixed with other target types";
goto bad;
}
t->immutable_target_type = tgt->type;
}
if (dm_target_has_integrity(tgt->type))
t->integrity_added = 1;
tgt->table = t;
tgt->begin = start;
tgt->len = len;
tgt->error = "Unknown error";
/*
* Does this target adjoin the previous one ?
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
goto bad;
}
r = dm_split_args(&argc, &argv, params);
if (r) {
tgt->error = "couldn't split parameters (insufficient memory)";
goto bad;
}
r = tgt->type->ctr(tgt, argc, argv);
kfree(argv);
if (r)
goto bad;
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
if (!tgt->num_discard_bios && tgt->discards_supported)
DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type);
return 0;
bad:
DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
dm_put_target_type(tgt->type);
return r;
}
/*
* Target argument parsing helpers.
*/
static int validate_next_arg(const struct dm_arg *arg,
struct dm_arg_set *arg_set,
unsigned *value, char **error, unsigned grouped)
{
const char *arg_str = dm_shift_arg(arg_set);
char dummy;
if (!arg_str ||
(sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
(*value < arg->min) ||
(*value > arg->max) ||
(grouped && arg_set->argc < *value)) {
*error = arg->error;
return -EINVAL;
}
return 0;
}
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 0);
}
EXPORT_SYMBOL(dm_read_arg);
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 1);
}
EXPORT_SYMBOL(dm_read_arg_group);
const char *dm_shift_arg(struct dm_arg_set *as)
{
char *r;
if (as->argc) {
as->argc--;
r = *as->argv;
as->argv++;
return r;
}
return NULL;
}
EXPORT_SYMBOL(dm_shift_arg);
void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
{
BUG_ON(as->argc < num_args);
as->argc -= num_args;
as->argv += num_args;
}
EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
table_type == DM_TYPE_DAX_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_REQUEST_BASED ||
table_type == DM_TYPE_MQ_REQUEST_BASED);
}
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
{
t->type = type;
}
EXPORT_SYMBOL_GPL(dm_table_set_type);
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && blk_queue_dax(q);
}
static bool dm_table_supports_dax(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
/* Ensure that all targets support DAX. */
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->direct_access)
return false;
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_supports_dax, NULL))
return false;
}
return true;
}
static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
unsigned sq_count = 0, mq_count = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
if (t->type != DM_TYPE_NONE) {
/* target already set the table's type */
if (t->type == DM_TYPE_BIO_BASED)
return 0;
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
goto verify_rq_based;
}
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
if (dm_target_hybrid(tgt))
hybrid = 1;
else if (dm_target_request_based(tgt))
request_based = 1;
else
bio_based = 1;
if (bio_based && request_based) {
DMWARN("Inconsistent table: different target types"
" can't be mixed up");
return -EINVAL;
}
}
if (hybrid && !bio_based && !request_based) {
/*
* The targets can work either way.
* Determine the type from the live device.
* Default to bio-based if device is new.
*/
if (__table_type_request_based(live_md_type))
request_based = 1;
else
bio_based = 1;
}
if (bio_based) {
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
if (dm_table_supports_dax(t) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
t->type = DM_TYPE_DAX_BIO_BASED;
return 0;
}
BUG_ON(!request_based); /* No targets in this table */
/*
* The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
* having a compatible target use dm_table_set_type.
*/
t->type = DM_TYPE_REQUEST_BASED;
verify_rq_based:
/*
* Request-based dm supports only tables that have a single target now.
* To support multiple targets, request splitting support is needed,
* and that needs lots of changes in the block-layer.
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
DMWARN("Request-based dm doesn't support multiple targets yet");
return -EINVAL;
}
if (list_empty(devices)) {
int srcu_idx;
struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
/* inherit live table's type and all_blk_mq */
if (live_table) {
t->type = live_table->type;
t->all_blk_mq = live_table->all_blk_mq;
}
dm_put_live_table(t->md, srcu_idx);
return 0;
}
/* Non-request-stackable devices can't be used for request-based dm */
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
if (!queue_is_rq_based(q)) {
DMERR("table load rejected: including"
" non-request-stackable devices");
return -EINVAL;
}
if (q->mq_ops)
mq_count++;
else
sq_count++;
}
if (sq_count && mq_count) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
t->all_blk_mq = mq_count > 0;
if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
DMERR("table load rejected: all devices are not blk-mq request-stackable");
return -EINVAL;
}
return 0;
}
enum dm_queue_mode dm_table_get_type(struct dm_table *t)
{
return t->type;
}
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
{
return t->immutable_target_type;
}
struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
{
/* Immutable target is implicitly a singleton */
if (t->num_targets > 1 ||
!dm_target_is_immutable(t->targets[0].type))
return NULL;
return t->targets;
}
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (dm_target_is_wildcard(ti->type))
return ti;
}
return NULL;
}
bool dm_table_bio_based(struct dm_table *t)
{
return __table_type_bio_based(dm_table_get_type(t));
}
bool dm_table_request_based(struct dm_table *t)
{
return __table_type_request_based(dm_table_get_type(t));
}
bool dm_table_all_blk_mq_devices(struct dm_table *t)
{
return t->all_blk_mq;
}
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned per_io_data_size = 0;
struct dm_target *tgt;
unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
DMWARN("no table type is set, can't allocate mempools");
return -EINVAL;
}
if (__table_type_bio_based(type))
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
}
t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
if (!t->mempools)
return -ENOMEM;
return 0;
}
void dm_table_free_md_mempools(struct dm_table *t)
{
dm_free_md_mempools(t->mempools);
t->mempools = NULL;
}
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
{
return t->mempools;
}
static int setup_indexes(struct dm_table *t)
{
int i;
unsigned int total = 0;
sector_t *indexes;
/* allocate the space for *all* the indexes */
for (i = t->depth - 2; i >= 0; i--) {
t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
total += t->counts[i];
}
indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
if (!indexes)
return -ENOMEM;
/* set up internal nodes, bottom-up */
for (i = t->depth - 2; i >= 0; i--) {
t->index[i] = indexes;
indexes += (KEYS_PER_NODE * t->counts[i]);
setup_btree_index(i, t);
}
return 0;
}
/*
* Builds the btree to index the map.
*/
static int dm_table_build_index(struct dm_table *t)
{
int r = 0;
unsigned int leaf_nodes;
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
/* leaf layer has already been set up */
t->counts[t->depth - 1] = leaf_nodes;
t->index[t->depth - 1] = t->highs;
if (t->depth >= 2)
r = setup_indexes(t);
return r;
}
static bool integrity_profile_exists(struct gendisk *disk)
{
return !!blk_get_integrity(disk);
}
/*
* Get a disk whose integrity profile reflects the table's profile.
* Returns NULL if integrity support was inconsistent or unavailable.
*/
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
struct gendisk *prev_disk = NULL, *template_disk = NULL;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_passes_integrity(ti->type))
goto no_integrity;
}
list_for_each_entry(dd, devices, list) {
template_disk = dd->dm_dev->bdev->bd_disk;
if (!integrity_profile_exists(template_disk))
goto no_integrity;
else if (prev_disk &&
blk_integrity_compare(prev_disk, template_disk) < 0)
goto no_integrity;
prev_disk = template_disk;
}
return template_disk;
no_integrity:
if (prev_disk)
DMWARN("%s: integrity not set: %s and %s profile mismatch",
dm_device_name(t->md),
prev_disk->disk_name,
template_disk->disk_name);
return NULL;
}
/*
* Register the mapped device for blk_integrity support if the
* underlying devices have an integrity profile. But all devices may
* not have matching profiles (checking all devices isn't reliable
* during table load because this table may use other DM device(s) which
* must be resumed before they will have an initialized integity
* profile). Consequently, stacked DM devices force a 2 stage integrity
* profile validation: First pass during table load, final pass during
* resume.
*/
static int dm_table_register_integrity(struct dm_table *t)
{
struct mapped_device *md = t->md;
struct gendisk *template_disk = NULL;
/* If target handles integrity itself do not register it here. */
if (t->integrity_added)
return 0;
template_disk = dm_table_get_integrity_disk(t);
if (!template_disk)
return 0;
if (!integrity_profile_exists(dm_disk(md))) {
t->integrity_supported = true;
/*
* Register integrity profile during table load; we can do
* this because the final profile must match during resume.
*/
blk_integrity_register(dm_disk(md),
blk_get_integrity(template_disk));
return 0;
}
/*
* If DM device already has an initialized integrity
* profile the new profile should not conflict.
*/
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
DMWARN("%s: conflict with existing integrity profile: "
"%s profile mismatch",
dm_device_name(t->md),
template_disk->disk_name);
return 1;
}
/* Preserve existing integrity profile */
t->integrity_supported = true;
return 0;
}
/*
* Prepares the table for use by building the indices,
* setting the type, and allocating mempools.
*/
int dm_table_complete(struct dm_table *t)
{
int r;
r = dm_table_determine_type(t);
if (r) {
DMERR("unable to determine table type");
return r;
}
r = dm_table_build_index(t);
if (r) {
DMERR("unable to build btrees");
return r;
}
r = dm_table_register_integrity(t);
if (r) {
DMERR("could not register integrity profile.");
return r;
}
r = dm_table_alloc_md_mempools(t, t->md);
if (r)
DMERR("unable to allocate mempools");
return r;
}
static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context)
{
mutex_lock(&_event_lock);
t->event_fn = fn;
t->event_context = context;
mutex_unlock(&_event_lock);
}
void dm_table_event(struct dm_table *t)
{
/*
* You can no longer call dm_table_event() from interrupt
* context, use a bottom half instead.
*/
BUG_ON(in_interrupt());
mutex_lock(&_event_lock);
if (t->event_fn)
t->event_fn(t->event_context);
mutex_unlock(&_event_lock);
}
EXPORT_SYMBOL(dm_table_event);
sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
EXPORT_SYMBOL(dm_table_get_size);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
if (index >= t->num_targets)
return NULL;
return t->targets + index;
}
/*
* Search the btree for the correct target.
*
* Caller should check returned pointer with dm_target_is_valid()
* to trap I/O beyond end of device.
*/
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{
unsigned int l, n = 0, k = 0;
sector_t *node;
for (l = 0; l < t->depth; l++) {
n = get_child(n, k);
node = get_node(t, l, n);
for (k = 0; k < KEYS_PER_NODE; k++)
if (node[k] >= sector)
break;
}
return &t->targets[(KEYS_PER_NODE * n) + k];
}
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
unsigned *num_devices = data;
(*num_devices)++;
return 0;
}
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
* Returns false if the result is unknown because a target doesn't
* support iterate_devices.
*/
bool dm_table_has_no_data_devices(struct dm_table *table)
{
struct dm_target *ti;
unsigned i, num_devices;
for (i = 0; i < dm_table_get_num_targets(table); i++) {
ti = dm_table_get_target(table, i);
if (!ti->type->iterate_devices)
return false;
num_devices = 0;
ti->type->iterate_devices(ti, count_device, &num_devices);
if (num_devices)
return false;
}
return true;
}
static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
enum blk_zoned_model *zoned_model = data;
return q && blk_queue_zoned_model(q) == *zoned_model;
}
static bool dm_table_supports_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (zoned_model == BLK_ZONED_HM &&
!dm_target_supports_zoned_hm(ti->type))
return false;
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
return false;
}
return true;
}
static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data;
return q && blk_queue_zone_sectors(q) == *zone_sectors;
}
static bool dm_table_matches_zone_sectors(struct dm_table *t,
unsigned int zone_sectors)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
return false;
}
return true;
}
static int validate_hardware_zoned_model(struct dm_table *table,
enum blk_zoned_model zoned_model,
unsigned int zone_sectors)
{
if (zoned_model == BLK_ZONED_NONE)
return 0;
if (!dm_table_supports_zoned_model(table, zoned_model)) {
DMERR("%s: zoned model is not consistent across all devices",
dm_device_name(table->md));
return -EINVAL;
}
/* Check zone size validity and compatibility */
if (!zone_sectors || !is_power_of_2(zone_sectors))
return -EINVAL;
if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
DMERR("%s: zone sectors is not consistent across all devices",
dm_device_name(table->md));
return -EINVAL;
}
return 0;
}
/*
* Establish the new table's queue_limits and validate them.
*/
int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits)
{
struct dm_target *ti;
struct queue_limits ti_limits;
unsigned i;
enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
unsigned int zone_sectors = 0;
blk_set_stacking_limits(limits);
for (i = 0; i < dm_table_get_num_targets(table); i++) {
blk_set_stacking_limits(&ti_limits);
ti = dm_table_get_target(table, i);
if (!ti->type->iterate_devices)
goto combine_limits;
/*
* Combine queue limits of all the devices this target uses.
*/
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
/*
* After stacking all limits, validate all devices
* in table support this zoned model and zone sectors.
*/
zoned_model = ti_limits.zoned;
zone_sectors = ti_limits.chunk_sectors;
}
/* Set I/O hints portion of queue limits */
if (ti->type->io_hints)
ti->type->io_hints(ti, &ti_limits);
/*
* Check each device area is consistent with the target's
* overall queue limits.
*/
if (ti->type->iterate_devices(ti, device_area_is_invalid,
&ti_limits))
return -EINVAL;
combine_limits:
/*
* Merge this target's queue limits into the overall limits
* for the table.
*/
if (blk_stack_limits(limits, &ti_limits, 0) < 0)
DMWARN("%s: adding target device "
"(start sect %llu len %llu) "
"caused an alignment inconsistency",
dm_device_name(table->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
/*
* FIXME: this should likely be moved to blk_stack_limits(), would
* also eliminate limits->zoned stacking hack in dm_set_device_limits()
*/
if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
/*
* By default, the stacked limits zoned model is set to
* BLK_ZONED_NONE in blk_set_stacking_limits(). Update
* this model using the first target model reported
* that is not BLK_ZONED_NONE. This will be either the
* first target device zoned model or the model reported
* by the target .io_hints.
*/
limits->zoned = ti_limits.zoned;
}
}
/*
* Verify that the zoned model and zone sectors, as determined before
* any .io_hints override, are the same across all devices in the table.
* - this is especially relevant if .io_hints is emulating a disk-managed
* zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
* BUT...
*/
if (limits->zoned != BLK_ZONED_NONE) {
/*
* ...IF the above limits stacking determined a zoned model
* validate that all of the table's devices conform to it.
*/
zoned_model = limits->zoned;
zone_sectors = limits->chunk_sectors;
}
if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
return -EINVAL;
return validate_hardware_logical_block_alignment(table, limits);
}
/*
* Verify that all devices have an integrity profile that matches the
* DM device's registered integrity profile. If the profiles don't
* match then unregister the DM device's integrity profile.
*/
static void dm_table_verify_integrity(struct dm_table *t)
{
struct gendisk *template_disk = NULL;
if (t->integrity_added)
return;
if (t->integrity_supported) {
/*
* Verify that the original integrity profile
* matches all the devices in this table.
*/
template_disk = dm_table_get_integrity_disk(t);
if (template_disk &&
blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
return;
}
if (integrity_profile_exists(dm_disk(t->md))) {
DMWARN("%s: unable to establish an integrity profile",
dm_device_name(t->md));
blk_integrity_unregister(dm_disk(t->md));
}
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
unsigned long flush = (unsigned long) data;
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && (q->queue_flags & flush);
}
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
{
struct dm_target *ti;
unsigned i;
/*
* Require at least one underlying device to support flushes.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting flushes must provide.
*/
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_flush_bios)
continue;
if (ti->flush_supported)
return true;
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
return true;
}
return false;
}
static int device_dax_write_cache_enabled(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct dax_device *dax_dev = dev->dax_dev;
if (!dax_dev)
return false;
if (dax_write_cache_enabled(dax_dev))
return true;
return false;
}
static int dm_table_supports_dax_write_cache(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti,
device_dax_write_cache_enabled, NULL))
return true;
}
return false;
}
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && blk_queue_nonrot(q);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !blk_queue_add_random(q);
}
static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}
static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, func, NULL))
return false;
}
return true;
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !q->limits.max_write_same_sectors;
}
static bool dm_table_supports_write_same(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_write_same_bios)
return false;
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
return false;
}
return true;
}
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !q->limits.max_write_zeroes_sectors;
}
static bool dm_table_supports_write_zeroes(struct dm_table *t)
{
struct dm_target *ti;
unsigned i = 0;
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
if (!ti->num_write_zeroes_bios)
return false;
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL))
return false;
}
return true;
}
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !blk_queue_discard(q);
}
static bool dm_table_supports_discards(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
return false;
/*
* Either the target provides discard support (as implied by setting
* 'discards_supported') or it relies on _all_ data devices having
* discard support.
*/
if (!ti->discards_supported &&
(!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
return false;
}
return true;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
bool wc = false, fua = false;
/*
* Copy table's limits to the DM device's request_queue
*/
q->limits = *limits;
if (!dm_table_supports_discards(t)) {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
/* Must also clear discard limits... */
q->limits.max_discard_sectors = 0;
q->limits.max_hw_discard_sectors = 0;
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
q->limits.discard_misaligned = 0;
} else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
fua = true;
}
blk_queue_write_cache(q, wc, fua);
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0;
if (!dm_table_supports_write_zeroes(t))
q->limits.max_write_zeroes_sectors = 0;
if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
dm_table_verify_integrity(t);
/*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
* have it set.
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
{
return t->num_targets;
}
struct list_head *dm_table_get_devices(struct dm_table *t)
{
return &t->devices;
}
fmode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
EXPORT_SYMBOL(dm_table_get_mode);
enum suspend_mode {
PRESUSPEND,
PRESUSPEND_UNDO,
POSTSUSPEND,
};
static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
{
int i = t->num_targets;
struct dm_target *ti = t->targets;
lockdep_assert_held(&t->md->suspend_lock);
while (i--) {
switch (mode) {
case PRESUSPEND:
if (ti->type->presuspend)
ti->type->presuspend(ti);
break;
case PRESUSPEND_UNDO:
if (ti->type->presuspend_undo)
ti->type->presuspend_undo(ti);
break;
case POSTSUSPEND:
if (ti->type->postsuspend)
ti->type->postsuspend(ti);
break;
}
ti++;
}
}
void dm_table_presuspend_targets(struct dm_table *t)
{
if (!t)
return;
suspend_targets(t, PRESUSPEND);
}
void dm_table_presuspend_undo_targets(struct dm_table *t)
{
if (!t)
return;
suspend_targets(t, PRESUSPEND_UNDO);
}
void dm_table_postsuspend_targets(struct dm_table *t)
{
if (!t)
return;
suspend_targets(t, POSTSUSPEND);
}
int dm_table_resume_targets(struct dm_table *t)
{
int i, r = 0;
lockdep_assert_held(&t->md->suspend_lock);
for (i = 0; i < t->num_targets; i++) {
struct dm_target *ti = t->targets + i;
if (!ti->type->preresume)
continue;
r = ti->type->preresume(ti);
if (r) {
DMERR("%s: %s: preresume failed, error = %d",
dm_device_name(t->md), ti->type->name, r);
return r;
}
}
for (i = 0; i < t->num_targets; i++) {
struct dm_target *ti = t->targets + i;
if (ti->type->resume)
ti->type->resume(ti);
}
return 0;
}
void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
{
list_add(&cb->list, &t->target_callbacks);
}
EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
struct dm_target_callbacks *cb;
int r = 0;
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
char b[BDEVNAME_SIZE];
if (likely(q))
r |= bdi_congested(q->backing_dev_info, bdi_bits);
else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md),
bdevname(dd->dm_dev->bdev, b));
}
list_for_each_entry(cb, &t->target_callbacks, list)
if (cb->congested_fn)
r |= cb->congested_fn(cb, bdi_bits);
return r;
}
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
}
EXPORT_SYMBOL(dm_table_get_md);
void dm_table_run_md_queue_async(struct dm_table *t)
{
struct mapped_device *md;
struct request_queue *queue;
unsigned long flags;
if (!dm_table_request_based(t))
return;
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
if (queue) {
if (queue->mq_ops)
blk_mq_run_hw_queues(queue, true);
else {
spin_lock_irqsave(queue->queue_lock, flags);
blk_run_queue_async(queue);
spin_unlock_irqrestore(queue->queue_lock, flags);
}
}
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
| michael2012z/myKernel | drivers/md/dm-table.c | C | gpl-2.0 | 48,154 |
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>1055304</title>
</head>
<body>
<img src="xx.png" alt="" ismap>
<a href="aa"><img src="xx.png" alt="" ismap></a>
</body>
</html> | geoffmcl/tidy-test | test/input/in_1055304.html | HTML | gpl-2.0 | 251 |
<?php
/* Icinga Web 2 | (c) 2013-2015 Icinga Development Team | GPLv2+ */
namespace Icinga\Module\Setup;
use ArrayIterator;
use IteratorAggregate;
use Icinga\Module\Setup\Exception\SetupException;
/**
* Container for multiple configuration steps
*/
class Setup implements IteratorAggregate
{
protected $steps;
protected $state;
public function __construct()
{
$this->steps = array();
}
public function getIterator()
{
return new ArrayIterator($this->getSteps());
}
public function addStep(Step $step)
{
$this->steps[] = $step;
}
public function addSteps(array $steps)
{
foreach ($steps as $step) {
$this->addStep($step);
}
}
public function getSteps()
{
return $this->steps;
}
/**
* Run the configuration and return whether it succeeded
*
* @return bool
*/
public function run()
{
$this->state = true;
try {
foreach ($this->steps as $step) {
$this->state &= $step->apply();
}
} catch (SetupException $_) {
$this->state = false;
}
return $this->state;
}
/**
* Return a summary of all actions designated to run
*
* @return array An array of HTML strings
*/
public function getSummary()
{
$summaries = array();
foreach ($this->steps as $step) {
$summaries[] = $step->getSummary();
}
return $summaries;
}
/**
* Return a report of all actions that were run
*
* @return array An array of arrays of strings
*/
public function getReport()
{
$reports = array();
foreach ($this->steps as $step) {
$report = $step->getReport();
if (! empty($report)) {
$reports[] = $report;
}
}
return $reports;
}
}
| nE0sIghT/icingaweb2 | modules/setup/library/Setup/Setup.php | PHP | gpl-2.0 | 1,977 |
var AppDispatcher = require('../dispatcher/app-dispatcher'),
EventEmitter = require('events').EventEmitter,
JPSConstants = require('../constants/jetpack-onboarding-constants');
var CHANGE_EVENT = 'change';
var spinnerEnabled = false,
spinnerMessage = null;
function show(message) {
spinnerEnabled = true;
spinnerMessage = message;
}
function hide() {
spinnerEnabled = false;
spinnerMessage = null;
}
var SpinnerStore = _.extend({}, EventEmitter.prototype, {
showing: function() {
return spinnerEnabled;
},
getMessage: function() {
return spinnerMessage;
},
addChangeListener: function(callback) {
this.on( CHANGE_EVENT, callback );
},
removeChangeListener: function(callback) {
this.removeListener( CHANGE_EVENT, callback );
},
emitChange: function() {
this.emit( CHANGE_EVENT );
},
});
AppDispatcher.register(function(action) {
switch(action.actionType) {
case JPSConstants.SHOW_SPINNER:
show(action.message);
SpinnerStore.emitChange();
break;
case JPSConstants.HIDE_SPINNER:
hide();
SpinnerStore.emitChange();
break;
default:
// no op
}
});
module.exports = SpinnerStore; | sarahwilkes/beaunoise | wp-content/plugins/mojo-marketplace-wp-plugin/vendor/jetpack/jetpack-onboarding/client/stores/spinner-store.js | JavaScript | gpl-2.0 | 1,163 |
/* wd.c: A WD80x3 ethernet driver for linux. */
/*
Written 1993-94 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
This is a driver for WD8003 and WD8013 "compatible" ethercards.
Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013.
Changelog:
Paul Gortmaker : multiple card support for module users, support
for non-standard memory sizes.
*/
static const char version[] =
"wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <asm/io.h>
#include <asm/system.h>
#include "8390.h"
#define DRV_NAME "wd"
/* A zero-terminated list of I/O addresses to be probed. */
static unsigned int wd_portlist[] __initdata =
{0x300, 0x280, 0x380, 0x240, 0};
static int wd_probe1(struct net_device *dev, int ioaddr);
static int wd_open(struct net_device *dev);
static void wd_reset_8390(struct net_device *dev);
static void wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page);
static void wd_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void wd_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static int wd_close(struct net_device *dev);
#define WD_START_PG 0x00 /* First page of TX buffer */
#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */
#define WD_CMDREG 0 /* Offset to ASIC command register. */
#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */
#define WD_MEMENB 0x40 /* Enable the shared memory. */
#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */
#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */
#define NIC16 0x40 /* Enable 16 bit access from the 8390. */
#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */
#define WD_IO_EXTENT 32
/* Probe for the WD8003 and WD8013. These cards have the station
address PROM at I/O ports <base>+8 to <base>+13, with a checksum
following. A Soundblaster can have the same checksum as an WDethercard,
so we have an extra exclusionary check for it.
The wd_probe1() routine initializes the card and fills the
station address field. */
static int __init do_wd_probe(struct net_device *dev)
{
int i;
struct resource *r;
int base_addr = dev->base_addr;
int irq = dev->irq;
int mem_start = dev->mem_start;
int mem_end = dev->mem_end;
SET_MODULE_OWNER(dev);
if (base_addr > 0x1ff) { /* Check a user specified location. */
r = request_region(base_addr, WD_IO_EXTENT, "wd-probe");
if ( r == NULL)
return -EBUSY;
i = wd_probe1(dev, base_addr);
if (i != 0)
release_region(base_addr, WD_IO_EXTENT);
else
r->name = dev->name;
return i;
}
else if (base_addr != 0) /* Don't probe at all. */
return -ENXIO;
for (i = 0; wd_portlist[i]; i++) {
int ioaddr = wd_portlist[i];
r = request_region(ioaddr, WD_IO_EXTENT, "wd-probe");
if (r == NULL)
continue;
if (wd_probe1(dev, ioaddr) == 0) {
r->name = dev->name;
return 0;
}
release_region(ioaddr, WD_IO_EXTENT);
dev->irq = irq;
dev->mem_start = mem_start;
dev->mem_end = mem_end;
}
return -ENODEV;
}
static void cleanup_card(struct net_device *dev)
{
free_irq(dev->irq, dev);
release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT);
}
#ifndef MODULE
struct net_device * __init wd_probe(int unit)
{
struct net_device *dev = alloc_ei_netdev();
int err;
if (!dev)
return ERR_PTR(-ENOMEM);
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
err = do_wd_probe(dev);
if (err)
goto out;
err = register_netdev(dev);
if (err)
goto out1;
return dev;
out1:
cleanup_card(dev);
out:
free_netdev(dev);
return ERR_PTR(err);
}
#endif
static int __init wd_probe1(struct net_device *dev, int ioaddr)
{
int i;
int checksum = 0;
int ancient = 0; /* An old card without config registers. */
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
const char *model_name;
static unsigned version_printed;
for (i = 0; i < 8; i++)
checksum += inb(ioaddr + 8 + i);
if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */
|| inb(ioaddr + 9) == 0xff
|| (checksum & 0xff) != 0xFF)
return -ENODEV;
/* Check for semi-valid mem_start/end values if supplied. */
if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
dev->mem_start = 0;
dev->mem_end = 0;
}
if (ei_debug && version_printed++ == 0)
printk(version);
printk("%s: WD80x3 at %#3x,", dev->name, ioaddr);
for (i = 0; i < 6; i++)
printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
/* The following PureData probe code was contributed by
Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
configuration differently from others so we have to check for them.
This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card.
*/
if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') {
unsigned char reg5 = inb(ioaddr+5);
switch (inb(ioaddr+2)) {
case 0x03: word16 = 0; model_name = "PDI8023-8"; break;
case 0x05: word16 = 0; model_name = "PDUC8023"; break;
case 0x0a: word16 = 1; model_name = "PDI8023-16"; break;
/* Either 0x01 (dumb) or they've released a new version. */
default: word16 = 0; model_name = "PDI8023"; break;
}
dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12;
dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1;
} else { /* End of PureData probe */
/* This method of checking for a 16-bit board is borrowed from the
we.c driver. A simpler method is just to look in ASIC reg. 0x03.
I'm comparing the two method in alpha test to make certain they
return the same result. */
/* Check for the old 8 bit board - it has register 0/8 aliasing.
Do NOT check i>=6 here -- it hangs the old 8003 boards! */
for (i = 0; i < 6; i++)
if (inb(ioaddr+i) != inb(ioaddr+8+i))
break;
if (i >= 6) {
ancient = 1;
model_name = "WD8003-old";
word16 = 0;
} else {
int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */
outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */
if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */
&& (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */
int asic_reg5 = inb(ioaddr+WD_CMDREG5);
/* Magic to set ASIC to word-wide mode. */
outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5);
outb(tmp, ioaddr+1);
model_name = "WD8013";
word16 = 1; /* We have a 16bit board here! */
} else {
model_name = "WD8003";
word16 = 0;
}
outb(tmp, ioaddr+1); /* Restore original reg1 value. */
}
#ifndef final_version
if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
#endif
}
#if defined(WD_SHMEM) && WD_SHMEM > 0x80000
/* Allow a compile-time override. */
dev->mem_start = WD_SHMEM;
#else
if (dev->mem_start == 0) {
/* Sanity and old 8003 check */
int reg0 = inb(ioaddr);
if (reg0 == 0xff || reg0 == 0) {
/* Future plan: this could check a few likely locations first. */
dev->mem_start = 0xd0000;
printk(" assigning address %#lx", dev->mem_start);
} else {
int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
/* Some boards don't have the register 5 -- it returns 0xff. */
if (high_addr_bits == 0x1f || word16 == 0)
high_addr_bits = 0x01;
dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19);
}
}
#endif
/* The 8390 isn't at the base address -- the ASIC regs are there! */
dev->base_addr = ioaddr+WD_NIC_OFFSET;
if (dev->irq < 2) {
int irqmap[] = {9,3,5,7,10,11,15,4};
int reg1 = inb(ioaddr+1);
int reg4 = inb(ioaddr+4);
if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
short nic_addr = ioaddr+WD_NIC_OFFSET;
unsigned long irq_mask;
/* We have an old-style ethercard that doesn't report its IRQ
line. Do autoirq to find the IRQ line. Note that this IS NOT
a reliable way to trigger an interrupt. */
outb_p(E8390_NODMA + E8390_STOP, nic_addr);
outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */
irq_mask = probe_irq_on();
outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */
outb_p(0x00, nic_addr + EN0_RCNTLO);
outb_p(0x00, nic_addr + EN0_RCNTHI);
outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */
mdelay(20);
dev->irq = probe_irq_off(irq_mask);
outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
if (ei_debug > 2)
printk(" autoirq is %d", dev->irq);
if (dev->irq < 2)
dev->irq = word16 ? 10 : 5;
} else
dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)];
} else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */
dev->irq = 9;
/* Snarf the interrupt now. There's no point in waiting since we cannot
share and the board will usually be enabled. */
i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
if (i) {
printk (" unable to get IRQ %d.\n", dev->irq);
return i;
}
/* OK, were are certain this is going to work. Setup the device. */
ei_status.name = model_name;
ei_status.word16 = word16;
ei_status.tx_start_page = WD_START_PG;
ei_status.rx_start_page = WD_START_PG + TX_PAGES;
/* Don't map in the shared memory until the board is actually opened. */
ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
/* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */
if (dev->mem_end != 0) {
ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
} else {
ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG;
dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256;
}
ei_status.rmem_end = dev->mem_end;
printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
model_name, dev->irq, dev->mem_start, dev->mem_end-1);
ei_status.reset_8390 = &wd_reset_8390;
ei_status.block_input = &wd_block_input;
ei_status.block_output = &wd_block_output;
ei_status.get_8390_hdr = &wd_get_8390_hdr;
dev->open = &wd_open;
dev->stop = &wd_close;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ei_poll;
#endif
NS8390_init(dev, 0);
#if 1
/* Enable interrupt generation on softconfig cards -- M.U */
/* .. but possibly potentially unsafe - Donald */
if (inb(ioaddr+14) & 0x20)
outb(inb(ioaddr+4)|0x80, ioaddr+4);
#endif
return 0;
}
static int
wd_open(struct net_device *dev)
{
int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
/* Map in the shared memory. Always set register 0 last to remain
compatible with very old boards. */
ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB;
ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16;
if (ei_status.word16)
outb(ei_status.reg5, ioaddr+WD_CMDREG5);
outb(ei_status.reg0, ioaddr); /* WD_CMDREG */
ei_open(dev);
return 0;
}
static void
wd_reset_8390(struct net_device *dev)
{
int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
outb(WD_RESET, wd_cmd_port);
if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
ei_status.txing = 0;
/* Set up the ASIC registers, just in case something changed them. */
outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port);
if (ei_status.word16)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
if (ei_debug > 1) printk("reset done\n");
return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
we don't need to be concerned with ring wrap as the header will be at
the start of a page, so we optimize accordingly. */
static void
wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
unsigned long hdr_start = dev->mem_start + ((ring_page - WD_START_PG)<<8);
/* We'll always get a 4 byte header read followed by a packet read, so
we enable 16 bit mode before the header, and disable after the body. */
if (ei_status.word16)
outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
#ifdef __BIG_ENDIAN
/* Officially this is what we are doing, but the readl() is faster */
/* unfortunately it isn't endian aware of the struct */
isa_memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
hdr->count = le16_to_cpu(hdr->count);
#else
((unsigned int*)hdr)[0] = isa_readl(hdr_start);
#endif
}
/* Block input and output are easy on shared memory ethercards, and trivial
on the Western digital card where there is no choice of how to do it.
The only complications are that the ring buffer wraps, and need to map
switch between 8- and 16-bit modes. */
static void
wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
unsigned long xfer_start = dev->mem_start + ring_offset - (WD_START_PG<<8);
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
isa_memcpy_fromio(skb->data, xfer_start, semi_count);
count -= semi_count;
isa_memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, count);
} else {
/* Packet is in one chunk -- we can copy + cksum. */
isa_eth_io_copy_and_sum(skb, xfer_start, count, 0);
}
/* Turn off 16 bit access so that reboot works. ISA brain-damage */
if (ei_status.word16)
outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
}
static void
wd_block_output(struct net_device *dev, int count, const unsigned char *buf,
int start_page)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
long shmem = dev->mem_start + ((start_page - WD_START_PG)<<8);
if (ei_status.word16) {
/* Turn on and off 16 bit access so that reboot works. */
outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
isa_memcpy_toio(shmem, buf, count);
outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
} else
isa_memcpy_toio(shmem, buf, count);
}
static int
wd_close(struct net_device *dev)
{
int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
if (ei_debug > 1)
printk("%s: Shutting down ethercard.\n", dev->name);
ei_close(dev);
/* Change from 16-bit to 8-bit shared memory so reboot works. */
if (ei_status.word16)
outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 );
/* And disable the shared memory. */
outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg);
return 0;
}
#ifdef MODULE
#define MAX_WD_CARDS 4 /* Max number of wd cards per module */
static struct net_device *dev_wd[MAX_WD_CARDS];
static int io[MAX_WD_CARDS];
static int irq[MAX_WD_CARDS];
static int mem[MAX_WD_CARDS];
static int mem_end[MAX_WD_CARDS]; /* for non std. mem size */
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param_array(mem_end, int, NULL, 0);
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
MODULE_PARM_DESC(mem_end, "memory end address(es)");
MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
int
init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
if (io[this_dev] == 0) {
if (this_dev != 0) break; /* only autoprobe 1st one */
printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n");
}
dev = alloc_ei_netdev();
if (!dev)
break;
dev->irq = irq[this_dev];
dev->base_addr = io[this_dev];
dev->mem_start = mem[this_dev];
dev->mem_end = mem_end[this_dev];
if (do_wd_probe(dev) == 0) {
if (register_netdev(dev) == 0) {
dev_wd[found++] = dev;
continue;
}
cleanup_card(dev);
}
free_netdev(dev);
printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]);
break;
}
if (found)
return 0;
return -ENXIO;
}
void
cleanup_module(void)
{
int this_dev;
for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
struct net_device *dev = dev_wd[this_dev];
if (dev) {
unregister_netdev(dev);
cleanup_card(dev);
free_netdev(dev);
}
}
}
#endif /* MODULE */
| lirunlong/linux-comment | drivers/net/wd.c | C | gpl-2.0 | 17,074 |
/*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckfinder.com/license
*/
CKFINDER.skins.add('v1',(function(){var a=['images/loaders/16x16.gif','images/loaders/32x32.gif','images/ckffolder.gif','images/ckffolderopened.gif'];if(CKFINDER.env.ie&&CKFINDER.env.version<7)a.push('icons.png','images/sprites_ie6.png');return{preload:a,application:{css:['app.css']},fixMainContentWidth:1,fixMainContentWidthValue:-8,marginSidebarContainer:0,host:{intoHostPage:1,css:['host.css']}};})());(function(){CKFINDER.dialog?a():CKFINDER.on('dialogPluginReady',a);function a(){CKFINDER.dialog.on('resize',function(b){var c=b.data,d=c.width,e=c.height,f=c.dialog,g=f.parts.contents;if(c.skin!='v1')return;g.setStyles({width:d+'px',height:e+'px'});setTimeout(function(){var h=f.parts.dialog.getChild([0,0,0]),i=h.getChild(0),j=h.getChild(2);j.setStyle('width',i.$.offsetWidth+'px');j=h.getChild(7);j.setStyle('width',i.$.offsetWidth-28+'px');j=h.getChild(4);j.setStyle('height',i.$.offsetHeight-31-14+'px');j=h.getChild(5);j.setStyle('height',i.$.offsetHeight-31-14+'px');},100);});};})();
| aakb/metropol-danmark | sites/all/modules/contrib/ckeditor/ckfinder/skins/v1/skin.js | JavaScript | gpl-2.0 | 1,146 |
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*-
*
* Copyright (C) 2009 Richard Hughes <richard@hughsie.com>
*
* Licensed under the GNU Lesser General Public License Version 2.1
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <packagekit-glib2/pk-enum.h>
#include <packagekit-glib2/pk-results.h>
#include <packagekit-glib2/pk-package-id.h>
#include "pk-task-wrapper.h"
static void pk_task_wrapper_finalize (GObject *object);
#define PK_TASK_WRAPPER_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), PK_TYPE_TASK_WRAPPER, PkTaskWrapperPrivate))
/**
* PkTaskWrapperPrivate:
*
* Private #PkTaskWrapper data
**/
struct _PkTaskWrapperPrivate
{
gpointer user_data;
};
G_DEFINE_TYPE (PkTaskWrapper, pk_task_wrapper, PK_TYPE_TASK)
/**
* pk_task_wrapper_untrusted_question:
**/
static void
pk_task_wrapper_untrusted_question (PkTask *task, guint request, PkResults *results)
{
PkTaskWrapperPrivate *priv = PK_TASK_WRAPPER(task)->priv;
/* set some user data, for no reason */
priv->user_data = NULL;
g_print ("UNTRUSTED\n");
/* just accept without asking */
pk_task_user_accepted (task, request);
}
/**
* pk_task_wrapper_key_question:
**/
static void
pk_task_wrapper_key_question (PkTask *task, guint request, PkResults *results)
{
PkTaskWrapperPrivate *priv = PK_TASK_WRAPPER(task)->priv;
/* set some user data, for no reason */
priv->user_data = NULL;
/* just accept without asking */
pk_task_user_accepted (task, request);
}
/**
* pk_task_wrapper_eula_question:
**/
static void
pk_task_wrapper_eula_question (PkTask *task, guint request, PkResults *results)
{
PkTaskWrapperPrivate *priv = PK_TASK_WRAPPER(task)->priv;
/* set some user data, for no reason */
priv->user_data = NULL;
/* just accept without asking */
pk_task_user_accepted (task, request);
}
/**
* pk_task_wrapper_media_change_question:
**/
static void
pk_task_wrapper_media_change_question (PkTask *task, guint request, PkResults *results)
{
PkTaskWrapperPrivate *priv = PK_TASK_WRAPPER(task)->priv;
/* set some user data, for no reason */
priv->user_data = NULL;
/* just accept without asking */
pk_task_user_accepted (task, request);
}
/**
* pk_task_wrapper_simulate_question:
**/
static void
pk_task_wrapper_simulate_question (PkTask *task, guint request, PkResults *results)
{
guint i;
GPtrArray *array;
const gchar *package_id;
gchar *printable;
gchar *summary;
PkPackage *package;
PkPackageSack *sack;
PkInfoEnum info;
PkTaskWrapperPrivate *priv = PK_TASK_WRAPPER(task)->priv;
/* set some user data, for no reason */
priv->user_data = NULL;
/* get data */
sack = pk_results_get_package_sack (results);
/* print data */
array = pk_package_sack_get_array (sack);
for (i=0; i<array->len; i++) {
package = g_ptr_array_index (array, i);
g_object_get (package,
"info", &info,
"summary", &summary,
NULL);
package_id = pk_package_get_id (package);
printable = pk_package_id_to_printable (package_id);
g_print ("%s\t%s\t%s\n", pk_info_enum_to_string (info), printable, summary);
g_free (summary);
g_free (printable);
}
/* just accept without asking */
pk_task_user_accepted (task, request);
g_object_unref (sack);
g_ptr_array_unref (array);
}
/**
* pk_task_wrapper_class_init:
**/
static void
pk_task_wrapper_class_init (PkTaskWrapperClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
PkTaskClass *task_class = PK_TASK_CLASS (klass);
object_class->finalize = pk_task_wrapper_finalize;
task_class->untrusted_question = pk_task_wrapper_untrusted_question;
task_class->key_question = pk_task_wrapper_key_question;
task_class->eula_question = pk_task_wrapper_eula_question;
task_class->media_change_question = pk_task_wrapper_media_change_question;
task_class->simulate_question = pk_task_wrapper_simulate_question;
g_type_class_add_private (klass, sizeof (PkTaskWrapperPrivate));
}
/**
* pk_task_wrapper_init:
* @task_wrapper: This class instance
**/
static void
pk_task_wrapper_init (PkTaskWrapper *task)
{
task->priv = PK_TASK_WRAPPER_GET_PRIVATE (task);
task->priv->user_data = NULL;
}
/**
* pk_task_wrapper_finalize:
* @object: The object to finalize
**/
static void
pk_task_wrapper_finalize (GObject *object)
{
PkTaskWrapper *task = PK_TASK_WRAPPER (object);
task->priv->user_data = NULL;
G_OBJECT_CLASS (pk_task_wrapper_parent_class)->finalize (object);
}
/**
* pk_task_wrapper_new:
*
* Return value: a new PkTaskWrapper object.
**/
PkTaskWrapper *
pk_task_wrapper_new (void)
{
PkTaskWrapper *task;
task = g_object_new (PK_TYPE_TASK_WRAPPER, NULL);
return PK_TASK_WRAPPER (task);
}
| axaxs/PackageKit-0.8.17 | lib/packagekit-glib2/pk-task-wrapper.c | C | gpl-2.0 | 5,384 |
/**
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
package org.cspoker.server.rmi;
import java.io.IOException;
import org.apache.log4j.Logger;
import org.cspoker.common.util.Log4JPropertiesLoader;
import org.cspoker.server.embedded.CSPokerServerImpl;
public class RunRMIServer {
static RMIServer server;
static {
Log4JPropertiesLoader
.load("org/cspoker/server/rmi/logging/log4j.properties");
}
private final static Logger logger = Logger.getLogger(RunRMIServer.class);
public static void main(String[] args) throws NumberFormatException,
IOException {
if (args.length != 1) {
usage();
}
int port = Integer.parseInt(args[0]);
logger.info("Starting RMI server at port " + port);
// need to do this in two steps to prevent GC!!
server = new RMIServer(port, new CSPokerServerImpl());
server.start();
}
private static void usage() {
logger.fatal("usage: java -jar cspoker-server-rmi.jar [portnumber]");
System.exit(0);
}
}
| BeyondTheBoundary/cspoker | server/rmi/src/main/java/org/cspoker/server/rmi/RunRMIServer.java | Java | gpl-2.0 | 1,647 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_25) on Wed Jul 23 10:47:42 EDT 2014 -->
<meta http-equiv="Content-Type" content="text/html" charset="UTF-8">
<title>Constant Field Values</title>
<meta name="date" content="2014-07-23">
<link rel="stylesheet" type="text/css" href="stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Constant Field Values";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="overview-summary.html">Overview</a></li>
<li>Package</li>
<li>Class</li>
<li><a href="overview-tree.html">Tree</a></li>
<li><a href="deprecated-list.html">Deprecated</a></li>
<li><a href="index-files/index-1.html">Index</a></li>
<li><a href="help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="index.html?constant-values.html" target="_top">Frames</a></li>
<li><a href="constant-values.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 title="Constant Field Values" class="title">Constant Field Values</h1>
<h2 title="Contents">Contents</h2>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="overview-summary.html">Overview</a></li>
<li>Package</li>
<li>Class</li>
<li><a href="overview-tree.html">Tree</a></li>
<li><a href="deprecated-list.html">Deprecated</a></li>
<li><a href="index-files/index-1.html">Index</a></li>
<li><a href="help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="index.html?constant-values.html" target="_top">Frames</a></li>
<li><a href="constant-values.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| cosimoiaia/pyAiml-2.0 | doc/program-ab-reference-doc/constant-values.html | HTML | gpl-2.0 | 3,527 |
#######################################################################
#
# Author: Gabi Roeger
# Modified by: Silvia Richter (silvia.richter@nicta.com.au)
# (C) Copyright 2008: Gabi Roeger and NICTA
#
# This file is part of LAMA.
#
# LAMA is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the license, or (at your option) any later version.
#
# LAMA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import string
import conditions
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return PrimitiveNumericExpression(functionsymbol,
[conditions.parse_term(arg) for arg in exp[1:]])
elif exp.replace(".","").isdigit():
return NumericConstant(string.atof(exp))
else:
return PrimitiveNumericExpression(exp,[])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return Assign(head, exp)
elif op == "increase":
return Increase(head, exp)
else:
assert False, "Assignment operator not supported."
class FunctionalExpression(object):
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.value == other.value)
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
def __eq__(self, other):
if not (self.__class__ == other.__class__ and self.symbol == other.symbol
and len(self.args) == len(other.args)):
return False
else:
for s,o in zip(self.args, other.args):
if not s == o:
return False
return True
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for arg in self.args:
arg.dump(indent + " ")
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
args = [conditions.ObjectTerm(var_mapping.get(arg.name, arg.name)) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert not self.symbol == "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
for fact in init_facts:
if isinstance(fact, FunctionAssignment):
if fact.fluent == pne:
return fact.expression
assert False, "Could not find instantiation for PNE!"
class FunctionAssignment(object):
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| PlanTool/plantool | wrappingPlanners/Deterministic/LAMA/seq-sat-lama/lama/translate/pddl/f_expression.py | Python | gpl-2.0 | 5,321 |
/**
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
package org.cspoker.ai.opponentmodels.weka;
import org.cspoker.common.elements.player.PlayerId;
import org.cspoker.common.util.Pair;
import org.cspoker.common.util.Triple;
import org.cspoker.ai.opponentmodels.weka.Propositionalizer;
import org.cspoker.ai.opponentmodels.weka.WekaModel;
import org.cspoker.ai.opponentmodels.weka.WekaRegressionModel;
import weka.classifiers.Classifier;
import weka.core.Instance;
public class WekaRegressionModel extends WekaModel {
protected Classifier preBetModel;
protected Classifier preFoldModel;
protected Classifier preCallModel;
protected Classifier preRaiseModel;
protected Classifier postBetModel;
protected Classifier postFoldModel;
protected Classifier postCallModel;
protected Classifier postRaiseModel;
protected Classifier showdown0Model;
protected Classifier showdown1Model;
protected Classifier showdown2Model;
protected Classifier showdown3Model;
protected Classifier showdown4Model;
protected Classifier showdown5Model;
public WekaRegressionModel(Classifier preBetModel, Classifier preFoldModel, Classifier preCallModel, Classifier preRaiseModel, Classifier postBetModel,
Classifier postFoldModel, Classifier postCallModel, Classifier postRaiseModel, Classifier showdown0Model, Classifier showdown1Model,
Classifier showdown2Model, Classifier showdown3Model, Classifier showdown4Model, Classifier showdown5Model) {
this.preBetModel = preBetModel;
this.preFoldModel = preFoldModel;
this.preCallModel = preCallModel;
this.preRaiseModel = preRaiseModel;
this.postBetModel = postBetModel;
this.postFoldModel = postFoldModel;
this.postCallModel = postCallModel;
this.postRaiseModel = postRaiseModel;
this.showdown0Model = showdown0Model;
this.showdown1Model = showdown1Model;
this.showdown2Model = showdown2Model;
this.showdown3Model = showdown3Model;
this.showdown4Model = showdown4Model;
this.showdown5Model = showdown5Model;
}
public WekaRegressionModel(WekaRegressionModel model) {
this.preBetModel = model.preBetModel;
this.preFoldModel = model.preFoldModel;
this.preCallModel = model.preCallModel;
this.preRaiseModel = model.preRaiseModel;
this.postBetModel = model.postBetModel;
this.postFoldModel = model.postFoldModel;
this.postCallModel = model.postCallModel;
this.postRaiseModel = model.postRaiseModel;
this.showdown0Model = model.showdown0Model;
this.showdown1Model = model.showdown1Model;
this.showdown2Model = model.showdown2Model;
this.showdown3Model = model.showdown3Model;
this.showdown4Model = model.showdown4Model;
this.showdown5Model = model.showdown5Model;
}
@Override
public String toString() {
String str = "";
str += "preBetModel " + preBetModel.toString(); // (preBetModel == null?"NULL":"OK");
str += "\npreFoldModel " + preFoldModel.toString(); // (preFoldModel == null?"NULL":"OK");
str += "\npreCallModel " + preCallModel.toString(); // (preCallModel == null?"NULL":"OK");
str += "\npreRaiseModel " + preRaiseModel.toString(); // (preRaiseModel == null?"NULL":"OK");
str += "\npostBetModel " + postBetModel.toString(); // (postBetModel == null?"NULL":"OK");
str += "\npostFoldModel " + postFoldModel.toString(); // (postFoldModel == null?"NULL":"OK");
str += "\npostCallModel " + postCallModel.toString(); // (postCallModel == null?"NULL":"OK");
str += "\npostRaiseModel " + postRaiseModel.toString().length(); // (postRaiseModel == null?"NULL":"OK");
str += "\nshowdown0Model " + showdown0Model.toString().length(); // (showdown0Model == null?"NULL":"OK");
str += "\nshowdown1Model " + showdown1Model.toString().length(); // (showdown1Model == null?"NULL":"OK");
str += "\nshowdown2Model " + showdown2Model.toString().length(); // (showdown2Model == null?"NULL":"OK");
str += "\nshowdown3Model " + showdown3Model.toString().length(); // (showdown3Model == null?"NULL":"OK");
str += "\nshowdown4Model " + showdown4Model.toString().length(); // (showdown4Model == null?"NULL":"OK");
str += "\nshowdown5Model " + showdown5Model.toString().length(); // (showdown5Model == null?"NULL":"OK");
return str;
}
public Pair<Double, Double> getCheckBetProbabilities(PlayerId actor, Propositionalizer props) {
Instance instance;
if ("preflop".equals(props.getRound())) {
instance = getPreCheckBetInstance(actor, props);
} else {
instance = getPostCheckBetInstance(actor, props);
}
try {
double prediction;
if ("preflop".equals(props.getRound())) {
prediction = preBetModel.classifyInstance(instance);
} else {
prediction = postBetModel.classifyInstance(instance);
}
double prob = Math.min(1, Math.max(0, prediction));
if (Double.isNaN(prob) || Double.isInfinite(prob)) {
throw new IllegalStateException("Bad probability: " + prob);
}
Pair<Double, Double> result = new Pair<Double, Double>(1 - prob, prob);
if (logger.isTraceEnabled()) {
logger.trace(instance + ": " + result);
}
return result;
} catch (Exception e) {
throw new IllegalStateException(e.toString() + "\n" + actor + " " + props.getRound() + ": " + instance.toString(), e);
}
}
public Triple<Double, Double, Double> getFoldCallRaiseProbabilities(PlayerId actor, Propositionalizer props) {
Instance instance;
boolean preflop = "preflop".equals(props.getRound());
if (preflop) {
instance = getPreFoldCallRaiseInstance(actor, props);
} else {
instance = getPostFoldCallRaiseInstance(actor, props);
}
try {
double probFold;
if (preflop) {
probFold = preFoldModel.classifyInstance(instance);
} else {
probFold = postFoldModel.classifyInstance(instance);
}
probFold = Math.min(1, Math.max(0, probFold));
double probCall;
if (preflop) {
probCall = preCallModel.classifyInstance(instance);
} else {
probCall = postCallModel.classifyInstance(instance);
}
probCall = Math.min(1, Math.max(0, probCall));
double probRaise;
if (preflop) {
probRaise = preRaiseModel.classifyInstance(instance);
} else {
probRaise = postRaiseModel.classifyInstance(instance);
}
probRaise = Math.min(1, Math.max(0, probRaise));
double sum = probFold + probCall + probRaise;
if (Double.isNaN(sum) || sum == 0 || Double.isInfinite(sum)) {
throw new IllegalStateException("Bad probabilities: " + probFold + " (probFold), " + probCall + " (probCall), " + probRaise + " (probRaise)");
}
Triple<Double, Double, Double> result = new Triple<Double, Double, Double>(probFold / sum, probCall / sum, probRaise / sum);
if (logger.isTraceEnabled()) {
logger.trace(instance + ": " + result);
}
return result;
} catch (Exception e) {
throw new IllegalStateException(e.toString() + "\n" + actor + " " + props.getRound() + ": " + instance.toString(), e);
}
}
public double[] getShowdownProbabilities(PlayerId actor, Propositionalizer props) {
Instance instance = getShowdownInstance(actor, props);
try {
double[] prob = {
Math.min(1,Math.max(0, showdown0Model.classifyInstance(instance))),
Math.min(1,Math.max(0, showdown1Model.classifyInstance(instance))),
Math.min(1,Math.max(0, showdown2Model.classifyInstance(instance))),
Math.min(1,Math.max(0, showdown3Model.classifyInstance(instance))),
Math.min(1,Math.max(0, showdown4Model.classifyInstance(instance))),
Math.min(1,Math.max(0, showdown5Model.classifyInstance(instance))),
};
if (logger.isTraceEnabled()) {
logger.trace(instance + ": " + prob);
}
return prob;
} catch (Exception e) {
throw new IllegalStateException(instance.toString(), e);
}
}
public Classifier getPreBetModel() {
return preBetModel;
}
public void setPreBetModel(Classifier preBetModel) {
this.preBetModel = preBetModel;
}
public Classifier getPreFoldModel() {
return preFoldModel;
}
public void setPreFoldModel(Classifier preFoldModel) {
this.preFoldModel = preFoldModel;
}
public Classifier getPreCallModel() {
return preCallModel;
}
public void setPreCallModel(Classifier preCallModel) {
this.preCallModel = preCallModel;
}
public Classifier getPreRaiseModel() {
return preRaiseModel;
}
public void setPreRaiseModel(Classifier preRaiseModel) {
this.preRaiseModel = preRaiseModel;
}
public Classifier getPostBetModel() {
return postBetModel;
}
public void setPostBetModel(Classifier postBetModel) {
this.postBetModel = postBetModel;
}
public Classifier getPostFoldModel() {
return postFoldModel;
}
public void setPostFoldModel(Classifier postFoldModel) {
this.postFoldModel = postFoldModel;
}
public Classifier getPostCallModel() {
return postCallModel;
}
public void setPostCallModel(Classifier postCallModel) {
this.postCallModel = postCallModel;
}
public Classifier getPostRaiseModel() {
return postRaiseModel;
}
public void setPostRaiseModel(Classifier postRaiseModel) {
this.postRaiseModel = postRaiseModel;
}
public Classifier getShowdown0Model() {
return showdown0Model;
}
public void setShowdown0Model(Classifier showdown0Model) {
this.showdown0Model = showdown0Model;
}
public Classifier getShowdown1Model() {
return showdown1Model;
}
public void setShowdown1Model(Classifier showdown1Model) {
this.showdown1Model = showdown1Model;
}
public Classifier getShowdown2Model() {
return showdown2Model;
}
public void setShowdown2Model(Classifier showdown2Model) {
this.showdown2Model = showdown2Model;
}
public Classifier getShowdown3Model() {
return showdown3Model;
}
public void setShowdown3Model(Classifier showdown3Model) {
this.showdown3Model = showdown3Model;
}
public Classifier getShowdown4Model() {
return showdown4Model;
}
public void setShowdown4Model(Classifier showdown4Model) {
this.showdown4Model = showdown4Model;
}
public Classifier getShowdown5Model() {
return showdown5Model;
}
public void setShowdown5Model(Classifier showdown5Model) {
this.showdown5Model = showdown5Model;
}
}
| BeyondTheBoundary/cspoker | ai/opponentmodels/weka/src/main/java/org/cspoker/ai/opponentmodels/weka/WekaRegressionModel.java | Java | gpl-2.0 | 10,668 |
<!--
tagline: Host your own composer repository
-->
# Handling private packages with Satis or Toran Proxy
# Toran Proxy
[Toran Proxy](https://toranproxy.com/) is a commercial alternative to Satis
offering professional support as well as a web UI to manage everything and a
better integration with Composer. It also provides proxying/mirroring for git
repos and package zip files which makes installs faster and independent from
third party systems.
Toran's revenue is also used to pay for Composer and Packagist development and
hosting so using it is a good way to support open source financially. You can
find more information about how to set it up and use it on the [Toran Proxy](https://toranproxy.com/) website.
# Satis
Satis on the other hand is open source but only a static `composer`
repository generator. It is a bit like an ultra-lightweight, static file-based
version of packagist and can be used to host the metadata of your company's
private packages, or your own. You can get it from [GitHub](http://github.com/composer/satis)
or install via CLI:
`php composer.phar create-project composer/satis --stability=dev --keep-vcs`.
## Setup
For example let's assume you have a few packages you want to reuse across your
company but don't really want to open-source. You would first define a Satis
configuration: a json file with an arbitrary name that lists your curated
[repositories](../05-repositories.md).
Here is an example configuration, you see that it holds a few VCS repositories,
but those could be any types of [repositories](../05-repositories.md). Then it
uses `"require-all": true` which selects all versions of all packages in the
repositories you defined.
The default file Satis looks for is `satis.json` in the root of the repository.
```json
{
"name": "My Repository",
"homepage": "http://packages.example.org",
"repositories": [
{ "type": "vcs", "url": "http://github.com/mycompany/privaterepo" },
{ "type": "vcs", "url": "http://svn.example.org/private/repo" },
{ "type": "vcs", "url": "http://github.com/mycompany/privaterepo2" }
],
"require-all": true
}
```
If you want to cherry pick which packages you want, you can list all the packages
you want to have in your satis repository inside the classic composer `require` key,
using a `"*"` constraint to make sure all versions are selected, or another
constraint if you want really specific versions.
```json
{
"repositories": [
{ "type": "vcs", "url": "http://github.com/mycompany/privaterepo" },
{ "type": "vcs", "url": "http://svn.example.org/private/repo" },
{ "type": "vcs", "url": "http://github.com/mycompany/privaterepo2" }
],
"require": {
"company/package": "*",
"company/package2": "*",
"company/package3": "2.0.0"
}
}
```
Once you've done this, you just run `php bin/satis build <configuration file> <build dir>`.
For example `php bin/satis build config.json web/` would read the `config.json`
file and build a static repository inside the `web/` directory.
When you ironed out that process, what you would typically do is run this
command as a cron job on a server. It would then update all your package info
much like Packagist does.
Note that if your private packages are hosted on GitHub, your server should have
an ssh key that gives it access to those packages, and then you should add
the `--no-interaction` (or `-n`) flag to the command to make sure it falls back
to ssh key authentication instead of prompting for a password. This is also a
good trick for continuous integration servers.
Set up a virtual-host that points to that `web/` directory, let's say it is
`packages.example.org`. Alternatively, with PHP >= 5.4.0, you can use the built-in
CLI server `php -S localhost:port -t satis-output-dir/` for a temporary solution.
## Usage
In your projects all you need to add now is your own composer repository using
the `packages.example.org` as URL, then you can require your private packages and
everything should work smoothly. You don't need to copy all your repositories
in every project anymore. Only that one unique repository that will update
itself.
```json
{
"repositories": [ { "type": "composer", "url": "http://packages.example.org/" } ],
"require": {
"company/package": "1.2.0",
"company/package2": "1.5.2",
"company/package3": "dev-master"
}
}
```
### Security
To secure your private repository you can host it over SSH or SSL using a client
certificate. In your project you can use the `options` parameter to specify the
connection options for the server.
Example using a custom repository using SSH (requires the SSH2 PECL extension):
```json
{
"repositories": [
{
"type": "composer",
"url": "ssh2.sftp://example.org",
"options": {
"ssh2": {
"username": "composer",
"pubkey_file": "/home/composer/.ssh/id_rsa.pub",
"privkey_file": "/home/composer/.ssh/id_rsa"
}
}
}
]
}
```
> **Tip:** See [ssh2 context options](http://www.php.net/manual/en/wrappers.ssh2.php#refsect1-wrappers.ssh2-options) for more information.
Example using HTTP over SSL using a client certificate:
```json
{
"repositories": [
{
"type": "composer",
"url": "https://example.org",
"options": {
"ssl": {
"local_cert": "/home/composer/.ssl/composer.pem"
}
}
}
]
}
```
> **Tip:** See [ssl context options](http://www.php.net/manual/en/context.ssl.php) for more information.
### Authentification
When your private repositories are password protected, you can store the authentification details permanently.
The first time Composer needs to authenticate against some domain it will prompt you for a username/password
and then you will be asked whether you want to store it.
The storage can be done either globally in the `COMPOSER_HOME/auth.json` file (`COMPOSER_HOME` defaults to
`~/.composer` or `%APPDATA%/Composer` on Windows) or also in the project directory directly sitting besides your
composer.json.
You can also configure these by hand using the config command if you need to configure a production machine
to be able to run non-interactive installs. For example to enter credentials for example.org one could type:
composer config http-basic.example.org username password
That will store it in the current directory's auth.json, but if you want it available globally you can use the
`--global` (`-g`) flag.
### Downloads
When GitHub or BitBucket repositories are mirrored on your local satis, the build process will include
the location of the downloads these platforms make available. This means that the repository and your setup depend
on the availability of these services.
At the same time, this implies that all code which is hosted somewhere else (on another service or for example in
Subversion) will not have downloads available and thus installations usually take a lot longer.
To enable your satis installation to create downloads for all (Git, Mercurial and Subversion) your packages, add the
following to your `satis.json`:
```json
{
"archive": {
"directory": "dist",
"format": "tar",
"prefix-url": "https://amazing.cdn.example.org",
"skip-dev": true
}
}
```
#### Options explained
* `directory`: the location of the dist files (inside the `output-dir`)
* `format`: optional, `zip` (default) or `tar`
* `prefix-url`: optional, location of the downloads, homepage (from `satis.json`) followed by `directory` by default
* `skip-dev`: optional, `false` by default, when enabled (`true`) satis will not create downloads for branches
Once enabled, all downloads (include those from GitHub and BitBucket) will be replaced with a _local_ version.
#### prefix-url
Prefixing the URL with another host is especially helpful if the downloads end up in a private Amazon S3
bucket or on a CDN host. A CDN would drastically improve download times and therefore package installation.
Example: A `prefix-url` of `http://my-bucket.s3.amazonaws.com` (and `directory` set to `dist`) creates download URLs
which look like the following: `http://my-bucket.s3.amazonaws.com/dist/vendor-package-version-ref.zip`.
### Resolving dependencies
It is possible to make satis automatically resolve and add all dependencies for your projects. This can be used
with the Downloads functionality to have a complete local mirror of packages. Just add the following
to your `satis.json`:
```json
{
"require-dependencies": true,
"require-dev-dependencies": true
}
```
When searching for packages, satis will attempt to resolve all the required packages from the listed repositories.
Therefore, if you are requiring a package from Packagist, you will need to define it in your `satis.json`.
Dev dependencies are packaged only if the `require-dev-dependencies` parameter is set to true.
| FPLD/project0 | vendor/composer/composer/doc/articles/handling-private-packages-with-satis.md | Markdown | gpl-2.0 | 9,101 |
ccflags-y := -Iinclude/drm
msm_kgsl_core-y = \
kgsl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
kgsl_pwrscale.o
msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_MMU) += kgsl_mmu.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
adreno_postmortem.o \
adreno_a2xx.o \
adreno.o
msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
msm_z180-y += z180.o
msm_kgsl_core-objs = $(msm_kgsl_core-y)
msm_adreno-objs = $(msm_adreno-y)
msm_z180-objs = $(msm_z180-y)
obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o
obj-$(CONFIG_MSM_KGSL) += msm_adreno.o
obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o
| Ateeq72/hTC_Pico_Kernel | drivers/gpu/msm/Makefile | Makefile | gpl-2.0 | 801 |
/* See http://python-ldap.sourceforge.net for details.
* $Id: ldapmodule.c,v 1.8 2008/03/20 12:24:56 stroeder Exp $ */
#include "common.h"
#include "version.h"
#include "constants.h"
#include "errors.h"
#include "functions.h"
#include "schema.h"
#include "ldapcontrol.h"
#include "LDAPObject.h"
DL_EXPORT(void) init_ldap(void);
/* dummy module methods */
static PyMethodDef methods[] = {
{ NULL, NULL }
};
/* module initialisation */
DL_EXPORT(void)
init_ldap()
{
PyObject *m, *d;
#if defined(MS_WINDOWS) || defined(__CYGWIN__)
LDAP_Type.ob_type = &PyType_Type;
#endif
/* Create the module and add the functions */
m = Py_InitModule("_ldap", methods);
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
LDAPinit_version(d);
LDAPinit_constants(d);
LDAPinit_errors(d);
LDAPinit_functions(d);
LDAPinit_schema(d);
LDAPinit_control(d);
/* Check for errors */
if (PyErr_Occurred())
Py_FatalError("can't initialize module _ldap");
}
| fossology/fossology | src/nomos/agent_tests/testdata/NomosTestfiles/See-file/ldapmodule.c | C | gpl-2.0 | 979 |
<?php
/**
* @package JCE
* @copyright Copyright © 2009-2011 Ryan Demmer. All rights reserved.
* @license GNU/GPL 2 or later - http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
* JCE is free software. This version may have been modified pursuant
* to the GNU General Public License, and as distributed it includes or
* is derivative of works licensed under the GNU General Public License or
* other free or open source software licenses.
*/
defined('_JEXEC') or die('RESTRICTED');
// load constants
require_once(dirname(__FILE__) . DS . 'constants.php');
// low level error handler
require_once(WF_ADMINISTRATOR . DS . 'classes' . DS . 'error.php');
// load loader
require_once(dirname(__FILE__) . DS . 'loader.php');
// load text
require_once(WF_ADMINISTRATOR. DS . 'classes' . DS . 'text.php');
// load xml
require_once(WF_ADMINISTRATOR . DS . 'classes' . DS . 'xml.php');
// load parameter
require_once(WF_ADMINISTRATOR . DS . 'classes' . DS . 'parameter.php');
?>
| Victor-arias/slr | administrator/components/com_jce/includes/base.php | PHP | gpl-2.0 | 990 |
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O2" } */
#include "arm_mve.h"
int16x8_t
foo (int16_t const * base, uint16x8_t offset, mve_pred16_t p)
{
return vldrhq_gather_shifted_offset_z_s16 (base, offset, p);
}
/* { dg-final { scan-assembler "vldrht.u16" } } */
int16x8_t
foo1 (int16_t const * base, uint16x8_t offset, mve_pred16_t p)
{
return vldrhq_gather_shifted_offset_z (base, offset, p);
}
/* { dg-final { scan-assembler "vldrht.u16" } } */
| Gurgel100/gcc | gcc/testsuite/gcc.target/arm/mve/intrinsics/vldrhq_gather_shifted_offset_z_s16.c | C | gpl-2.0 | 539 |
/* ------------------------------------------------------------------------- */
/* */
/* i2c-id.h - identifier values for i2c drivers and adapters */
/* */
/* ------------------------------------------------------------------------- */
/* Copyright (C) 1995-1999 Simon G. Vogl
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
/* ------------------------------------------------------------------------- */
#ifndef LINUX_I2C_ID_H
#define LINUX_I2C_ID_H
/*
* ---- Driver types -----------------------------------------------------
*/
#define I2C_DRIVERID_MSP3400 1
#define I2C_DRIVERID_TUNER 2
#define I2C_DRIVERID_VIDEOTEX 3 /* please rename */
#define I2C_DRIVERID_TDA8425 4 /* stereo sound processor */
#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */
#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */
#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */
#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
#define I2C_DRIVERID_SAA5281 9 /* videotext decoder */
#define I2C_DRIVERID_SAA7112 10 /* video decoder, image scaler */
#define I2C_DRIVERID_SAA7120 11 /* video encoder */
#define I2C_DRIVERID_SAA7121 12 /* video encoder */
#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
#define I2C_DRIVERID_CH7003 14 /* digital pc to tv encoder */
#define I2C_DRIVERID_PCF8574A 15 /* i2c expander - 8 bit in/out */
#define I2C_DRIVERID_PCF8582C 16 /* eeprom */
#define I2C_DRIVERID_AT24Cxx 17 /* eeprom 1/2/4/8/16 K */
#define I2C_DRIVERID_TEA6300 18 /* audio mixer */
#define I2C_DRIVERID_BT829 19 /* pc to tv encoder */
#define I2C_DRIVERID_TDA9850 20 /* audio mixer */
#define I2C_DRIVERID_TDA9855 21 /* audio mixer */
#define I2C_DRIVERID_SAA7110 22 /* video decoder */
#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */
#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
#define I2C_DRIVERID_PCF8583 25 /* real time clock */
#define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */
#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
#define I2C_DRIVERID_TVMIXER 28 /* Mixer driver for tv cards */
#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
#define I2C_DRIVERID_DPL3518 30 /* Dolby decoder chip */
#define I2C_DRIVERID_TDA9873 31 /* TV sound decoder chip */
#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */
#define I2C_DRIVERID_PIC16C54_PV9 33 /* Audio mux/ir receiver */
#define I2C_DRIVERID_SBATT 34 /* Smart Battery Device */
#define I2C_DRIVERID_SBS 35 /* SB System Manager */
#define I2C_DRIVERID_VES1893 36 /* VLSI DVB-S decoder */
#define I2C_DRIVERID_VES1820 37 /* VLSI DVB-C decoder */
#define I2C_DRIVERID_SAA7113 38 /* video decoder */
#define I2C_DRIVERID_TDA8444 39 /* octuple 6-bit DAC */
#define I2C_DRIVERID_BT819 40 /* video decoder */
#define I2C_DRIVERID_BT856 41 /* video encoder */
#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */
#define I2C_DRIVERID_DRP3510 43 /* ADR decoder (Astra Radio) */
#define I2C_DRIVERID_SP5055 44 /* Satellite tuner */
#define I2C_DRIVERID_STV0030 45 /* Multipurpose switch */
#define I2C_DRIVERID_SAA7108 46 /* video decoder, image scaler */
#define I2C_DRIVERID_DS1307 47 /* DS1307 real time clock */
#define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */
#define I2C_DRIVERID_SAA7114 49 /* video decoder */
#define I2C_DRIVERID_ZR36120 50 /* Zoran 36120 video encoder */
#define I2C_DRIVERID_24LC32A 51 /* Microchip 24LC32A 32k EEPROM */
#define I2C_DRIVERID_STM41T00 52 /* real time clock */
#define I2C_DRIVERID_UDA1342 53 /* UDA1342 audio codec */
#define I2C_DRIVERID_ADV7170 54 /* video encoder */
#define I2C_DRIVERID_RADEON 55 /* I2C bus on Radeon boards */
#define I2C_DRIVERID_MAX1617 56 /* temp sensor */
#define I2C_DRIVERID_SAA7191 57 /* video encoder */
#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */
#define I2C_DRIVERID_BT832 59 /* CMOS camera video processor */
#define I2C_DRIVERID_TDA9887 60 /* TDA988x IF-PLL demodulator */
#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */
#define I2C_DRIVERID_TDA7313 62 /* TDA7313 audio processor */
#define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */
#define I2C_DRIVERID_SAA7114H 64 /* video decoder */
#define I2C_DRIVERID_DS1374 65 /* DS1374 real time clock */
#define I2C_DRIVERID_TDA9874 66 /* TV sound decoder */
#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */
#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */
#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */
#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
#define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */
#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */
#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */
#define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */
#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
#define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */
#define I2C_DRIVERID_X1205 82 /* Xicor/Intersil X1205 RTC */
#define I2C_DRIVERID_PCF8563 83 /* Philips PCF8563 RTC */
#define I2C_DRIVERID_RS5C372 84 /* Ricoh RS5C372 RTC */
#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
#define I2C_DRIVERID_ISL1208 88 /* Intersil ISL1208 RTC */
#define I2C_DRIVERID_I2CDEV 900
#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
#define I2C_DRIVERID_ALERT 903 /* SMBus Alert Responder Client */
/* IDs -- Use DRIVERIDs 1000-1999 for sensors.
These were originally in sensors.h in the lm_sensors package */
#define I2C_DRIVERID_LM78 1002
#define I2C_DRIVERID_LM75 1003
#define I2C_DRIVERID_GL518 1004
#define I2C_DRIVERID_EEPROM 1005
#define I2C_DRIVERID_W83781D 1006
#define I2C_DRIVERID_LM80 1007
#define I2C_DRIVERID_ADM1021 1008
#define I2C_DRIVERID_ADM9240 1009
#define I2C_DRIVERID_LTC1710 1010
#define I2C_DRIVERID_ICSPLL 1012
#define I2C_DRIVERID_BT869 1013
#define I2C_DRIVERID_MAXILIFE 1014
#define I2C_DRIVERID_MATORB 1015
#define I2C_DRIVERID_GL520 1016
#define I2C_DRIVERID_THMC50 1017
#define I2C_DRIVERID_ADM1025 1020
#define I2C_DRIVERID_LM87 1021
#define I2C_DRIVERID_PCF8574 1022
#define I2C_DRIVERID_MTP008 1023
#define I2C_DRIVERID_DS1621 1024
#define I2C_DRIVERID_ADM1024 1025
#define I2C_DRIVERID_IT87 1026
#define I2C_DRIVERID_CH700X 1027 /* single driver for CH7003-7009 digital pc to tv encoders */
#define I2C_DRIVERID_FSCPOS 1028
#define I2C_DRIVERID_FSCSCY 1029
#define I2C_DRIVERID_PCF8591 1030
#define I2C_DRIVERID_LM92 1033
#define I2C_DRIVERID_SMARTBATT 1035
#define I2C_DRIVERID_BMCSENSORS 1036
#define I2C_DRIVERID_FS451 1037
#define I2C_DRIVERID_LM85 1039
#define I2C_DRIVERID_LM83 1040
#define I2C_DRIVERID_LM90 1042
#define I2C_DRIVERID_ASB100 1043
#define I2C_DRIVERID_FSCHER 1046
#define I2C_DRIVERID_W83L785TS 1047
/*
* ---- Adapter types ----------------------------------------------------
*/
/* --- Bit algorithm adapters */
#define I2C_HW_B_LP 0x010000 /* Parallel port Philips style */
#define I2C_HW_B_SER 0x010002 /* Serial line interface */
#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */
#define I2C_HW_B_WNV 0x010006 /* Winnov Videums */
#define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */
#define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */
#define I2C_HW_B_G400 0x010009 /* Matrox G400 */
#define I2C_HW_B_I810 0x01000a /* Intel I810 */
#define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */
#define I2C_HW_B_PPORT 0x01000c /* Primitive parallel port adapter */
#define I2C_HW_B_SAVG 0x01000d /* Savage 4 */
#define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */
#define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */
#define I2C_HW_B_TSUNA 0x010012 /* DEC Tsunami chipset */
#define I2C_HW_B_OMAHA 0x010014 /* Omaha I2C interface (ARM) */
#define I2C_HW_B_GUIDE 0x010015 /* Guide bit-basher */
#define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */
#define I2C_HW_B_IXP4XX 0x010017 /* GPIO on IXP4XX systems */
#define I2C_HW_B_S3VIA 0x010018 /* S3Via ProSavage adapter */
#define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */
#define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */
#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */
#define I2C_HW_B_NVIDIA 0x01001c /* nvidia framebuffer driver */
#define I2C_HW_B_SAVAGE 0x01001d /* savage framebuffer driver */
#define I2C_HW_B_RADEON 0x01001e /* radeon framebuffer driver */
#define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */
#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */
/* --- PCF 8584 based algorithms */
#define I2C_HW_P_LP 0x020000 /* Parallel port interface */
#define I2C_HW_P_ISA 0x020001 /* generic ISA Bus inteface card */
#define I2C_HW_P_ELEK 0x020002 /* Elektor ISA Bus inteface card */
/* --- PCA 9564 based algorithms */
#define I2C_HW_A_ISA 0x1a0000 /* generic ISA Bus interface card */
/* --- ACPI Embedded controller algorithms */
#define I2C_HW_ACPI_EC 0x1f0000
/* --- MPC824x PowerPC adapters */
#define I2C_HW_MPC824X 0x100001 /* Motorola 8240 / 8245 */
/* --- MPC8xx PowerPC adapters */
#define I2C_HW_MPC8XX_EPON 0x110000 /* Eponymous MPC8xx I2C adapter */
/* --- ITE based algorithms */
#define I2C_HW_I_IIC 0x080000 /* controller on the ITE */
/* --- PowerPC on-chip adapters */
#define I2C_HW_OCP 0x120000 /* IBM on-chip I2C adapter */
/* --- Broadcom SiByte adapters */
#define I2C_HW_SIBYTE 0x150000
/* --- SGI adapters */
#define I2C_HW_SGI_VINO 0x160000
#define I2C_HW_SGI_MACE 0x160001
/* --- XSCALE on-chip adapters */
#define I2C_HW_IOP3XX 0x140000
/* --- Au1550 PSC adapters adapters */
#define I2C_HW_AU1550_PSC 0x1b0000
/* --- SMBus only adapters */
#define I2C_HW_SMBUS_PIIX4 0x040000
#define I2C_HW_SMBUS_ALI15X3 0x040001
#define I2C_HW_SMBUS_VIA2 0x040002
#define I2C_HW_SMBUS_VOODOO3 0x040003
#define I2C_HW_SMBUS_I801 0x040004
#define I2C_HW_SMBUS_AMD756 0x040005
#define I2C_HW_SMBUS_SIS5595 0x040006
#define I2C_HW_SMBUS_ALI1535 0x040007
#define I2C_HW_SMBUS_SIS630 0x040008
#define I2C_HW_SMBUS_SIS96X 0x040009
#define I2C_HW_SMBUS_AMD8111 0x04000a
#define I2C_HW_SMBUS_SCX200 0x04000b
#define I2C_HW_SMBUS_NFORCE2 0x04000c
#define I2C_HW_SMBUS_W9968CF 0x04000d
#define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */
#define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */
#define I2C_HW_SMBUS_OV519 0x040010 /* OV519 USB 1.1 webcam IC */
#define I2C_HW_SMBUS_OVFX2 0x040011 /* Cypress/OmniVision FX2 webcam */
/* --- ISA pseudo-adapter */
#define I2C_HW_ISA 0x050000
/* --- IPMI pseudo-adapter */
#define I2C_HW_IPMI 0x0b0000
/* --- IPMB adapter */
#define I2C_HW_IPMB 0x0c0000
/* --- MCP107 adapter */
#define I2C_HW_MPC107 0x0d0000
/* --- Marvell mv64xxx i2c adapter */
#define I2C_HW_MV64XXX 0x190000
/* --- Miscellaneous adapters */
#define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */
#define I2C_HW_SAA7134 0x090000 /* SAA7134 video decoder bus */
#endif /* LINUX_I2C_ID_H */
| binhqnguyen/lena | nsc/linux-2.6.18/include/linux/i2c-id.h | C | gpl-2.0 | 12,507 |
#pragma once
#include <QDialog>
#include <QLineEdit>
#include <QTextEdit>
#include <QComboBox>
#include <QVBoxLayout>
#include <QPushButton>
#include <QSpinBox>
#include <QGroupBox>
#include <QFontDatabase>
#include <QLabel>
#include <QFont>
#include <QScrollBar>
#include <QPainter>
#include <QMouseEvent>
class memory_viewer_panel : public QDialog
{
u32 m_addr;
u32 m_colcount;
u32 m_rowcount;
QLineEdit* m_addr_line;
QLabel* m_mem_addr;
QLabel* m_mem_hex;
QLabel* m_mem_ascii;
QFontMetrics* m_fontMetrics;
public:
bool exit;
memory_viewer_panel(QWidget* parent);
~memory_viewer_panel();
virtual void wheelEvent(QWheelEvent *event);
virtual void ShowMemory();
void SetPC(const uint pc);
//Static methods
static void ShowImage(QWidget* parent, u32 addr, int mode, u32 sizex, u32 sizey, bool flipv);
};
| SakataGintokiYT/rpcs3 | rpcs3/rpcs3qt/memory_viewer_panel.h | C | gpl-2.0 | 828 |
/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
/* PLIP: A parallel port "network" driver for Linux. */
/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
/*
* Authors: Donald Becker <becker@scyld.com>
* Tommy Thorn <thorn@daimi.aau.dk>
* Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
* Alan Cox <gw4pts@gw4pts.ampr.org>
* Peter Bauer <100136.3530@compuserve.com>
* Niibe Yutaka <gniibe@mri.co.jp>
* Nimrod Zimerman <zimerman@mailandnews.com>
*
* Enhancements:
* Modularization and ifreq/ifmap support by Alan Cox.
* Rewritten by Niibe Yutaka.
* parport-sharing awareness code by Philip Blundell.
* SMP locking by Niibe Yutaka.
* Support for parallel ports with no IRQ (poll mode),
* Modifications to use the parallel port API
* by Nimrod Zimerman.
*
* Fixes:
* Niibe Yutaka
* - Module initialization.
* - MTU fix.
* - Make sure other end is OK, before sending a packet.
* - Fix immediate timer problem.
*
* Al Viro
* - Changed {enable,disable}_irq handling to make it work
* with new ("stack") semantics.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
* inspired by Russ Nelson's parallel port packet driver.
*
* NOTE:
* Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
* Because of the necessity to communicate to DOS machines with the
* Crynwr packet driver, Peter Bauer changed the protocol again
* back to original protocol.
*
* This version follows original PLIP protocol.
* So, this PLIP can't communicate the PLIP of Linux v1.0.
*/
/*
* To use with DOS box, please do (Turn on ARP switch):
* # ifconfig plip[0-2] arp
*/
static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
/*
Sources:
Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
"parallel.asm" parallel port packet driver.
The "Crynwr" parallel port standard specifies the following protocol:
Trigger by sending nibble '0x8' (this causes interrupt on other end)
count-low octet
count-high octet
... data octets
checksum octet
Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
<wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
The packet is encapsulated as if it were ethernet.
The cable used is a de facto standard parallel null cable -- sold as
a "LapLink" cable by various places. You'll need a 12-conductor cable to
make one yourself. The wiring is:
SLCTIN 17 - 17
GROUND 25 - 25
D0->ERROR 2 - 15 15 - 2
D1->SLCT 3 - 13 13 - 3
D2->PAPOUT 4 - 12 12 - 4
D3->ACK 5 - 10 10 - 5
D4->BUSY 6 - 11 11 - 6
Do not connect the other pins. They are
D5,D6,D7 are 7,8,9
STROBE is 1, FEED is 14, INIT is 16
extra grounds are 18,19,20,21,22,23,24
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <linux/skbuff.h>
#include <linux/if_plip.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/parport.h>
#include <linux/bitops.h>
#include <net/neighbour.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <asm/semaphore.h>
/* Maximum number of devices to support. */
#define PLIP_MAX 8
/* Use 0 for production, 1 for verification, >2 for debug */
#ifndef NET_DEBUG
#define NET_DEBUG 1
#endif
static const unsigned int net_debug = NET_DEBUG;
#define ENABLE(irq) if (irq != -1) enable_irq(irq)
#define DISABLE(irq) if (irq != -1) disable_irq(irq)
/* In micro second */
#define PLIP_DELAY_UNIT 1
/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
#define PLIP_TRIGGER_WAIT 500
/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
#define PLIP_NIBBLE_WAIT 3000
/* Bottom halves */
static void plip_kick_bh(struct net_device *dev);
static void plip_bh(struct net_device *dev);
static void plip_timer_bh(struct net_device *dev);
/* Interrupt handler */
static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
/* Functions for DEV methods */
static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr,
void *saddr, unsigned len);
static int plip_hard_header_cache(struct neighbour *neigh,
struct hh_cache *hh);
static int plip_open(struct net_device *dev);
static int plip_close(struct net_device *dev);
static struct net_device_stats *plip_get_stats(struct net_device *dev);
static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static int plip_preempt(void *handle);
static void plip_wakeup(void *handle);
enum plip_connection_state {
PLIP_CN_NONE=0,
PLIP_CN_RECEIVE,
PLIP_CN_SEND,
PLIP_CN_CLOSING,
PLIP_CN_ERROR
};
enum plip_packet_state {
PLIP_PK_DONE=0,
PLIP_PK_TRIGGER,
PLIP_PK_LENGTH_LSB,
PLIP_PK_LENGTH_MSB,
PLIP_PK_DATA,
PLIP_PK_CHECKSUM
};
enum plip_nibble_state {
PLIP_NB_BEGIN,
PLIP_NB_1,
PLIP_NB_2,
};
struct plip_local {
enum plip_packet_state state;
enum plip_nibble_state nibble;
union {
struct {
#if defined(__LITTLE_ENDIAN)
unsigned char lsb;
unsigned char msb;
#elif defined(__BIG_ENDIAN)
unsigned char msb;
unsigned char lsb;
#else
#error "Please fix the endianness defines in <asm/byteorder.h>"
#endif
} b;
unsigned short h;
} length;
unsigned short byte;
unsigned char checksum;
unsigned char data;
struct sk_buff *skb;
};
struct net_local {
struct net_device_stats enet_stats;
struct work_struct immediate;
struct work_struct deferred;
struct work_struct timer;
struct plip_local snd_data;
struct plip_local rcv_data;
struct pardevice *pardev;
unsigned long trigger;
unsigned long nibble;
enum plip_connection_state connection;
unsigned short timeout_count;
int is_deferred;
int port_owner;
int should_relinquish;
int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr,
void *saddr, unsigned len);
int (*orig_hard_header_cache)(struct neighbour *neigh,
struct hh_cache *hh);
spinlock_t lock;
atomic_t kill_timer;
struct semaphore killed_timer_sem;
};
static inline void enable_parport_interrupts (struct net_device *dev)
{
if (dev->irq != -1)
{
struct parport *port =
((struct net_local *)dev->priv)->pardev->port;
port->ops->enable_irq (port);
}
}
static inline void disable_parport_interrupts (struct net_device *dev)
{
if (dev->irq != -1)
{
struct parport *port =
((struct net_local *)dev->priv)->pardev->port;
port->ops->disable_irq (port);
}
}
static inline void write_data (struct net_device *dev, unsigned char data)
{
struct parport *port =
((struct net_local *)dev->priv)->pardev->port;
port->ops->write_data (port, data);
}
static inline unsigned char read_status (struct net_device *dev)
{
struct parport *port =
((struct net_local *)dev->priv)->pardev->port;
return port->ops->read_status (port);
}
/* Entry point of PLIP driver.
Probe the hardware, and register/initialize the driver.
PLIP is rather weird, because of the way it interacts with the parport
system. It is _not_ initialised from Space.c. Instead, plip_init()
is called, and that function makes up a "struct net_device" for each port, and
then calls us here.
*/
static void
plip_init_netdev(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
/* Then, override parts of it */
dev->hard_start_xmit = plip_tx_packet;
dev->open = plip_open;
dev->stop = plip_close;
dev->get_stats = plip_get_stats;
dev->do_ioctl = plip_ioctl;
dev->header_cache_update = NULL;
dev->tx_queue_len = 10;
dev->flags = IFF_POINTOPOINT|IFF_NOARP;
memset(dev->dev_addr, 0xfc, ETH_ALEN);
/* Set the private structure */
nl->orig_hard_header = dev->hard_header;
dev->hard_header = plip_hard_header;
nl->orig_hard_header_cache = dev->hard_header_cache;
dev->hard_header_cache = plip_hard_header_cache;
nl->port_owner = 0;
/* Initialize constants */
nl->trigger = PLIP_TRIGGER_WAIT;
nl->nibble = PLIP_NIBBLE_WAIT;
/* Initialize task queue structures */
INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
if (dev->irq == -1)
INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
spin_lock_init(&nl->lock);
}
/* Bottom half handler for the delayed request.
This routine is kicked by do_timer().
Request `plip_bh' to be invoked. */
static void
plip_kick_bh(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
if (nl->is_deferred)
schedule_work(&nl->immediate);
}
/* Forward declarations of internal routines */
static int plip_none(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_receive_packet(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_send_packet(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_connection_close(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_error(struct net_device *, struct net_local *,
struct plip_local *, struct plip_local *);
static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
struct plip_local *snd,
struct plip_local *rcv,
int error);
#define OK 0
#define TIMEOUT 1
#define ERROR 2
#define HS_TIMEOUT 3
typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv);
static const plip_func connection_state_table[] =
{
plip_none,
plip_receive_packet,
plip_send_packet,
plip_connection_close,
plip_error
};
/* Bottom half handler of PLIP. */
static void
plip_bh(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct plip_local *snd = &nl->snd_data;
struct plip_local *rcv = &nl->rcv_data;
plip_func f;
int r;
nl->is_deferred = 0;
f = connection_state_table[nl->connection];
if ((r = (*f)(dev, nl, snd, rcv)) != OK
&& (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
}
}
static void
plip_timer_bh(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
if (!(atomic_read (&nl->kill_timer))) {
plip_interrupt (-1, dev, NULL);
schedule_delayed_work(&nl->timer, 1);
}
else {
up (&nl->killed_timer_sem);
}
}
static int
plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv,
int error)
{
unsigned char c0;
/*
* This is tricky. If we got here from the beginning of send (either
* with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
* already disabled. With the old variant of {enable,disable}_irq()
* extra disable_irq() was a no-op. Now it became mortal - it's
* unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
* that is). So we have to treat HS_TIMEOUT and ERROR from send
* in a special way.
*/
spin_lock_irq(&nl->lock);
if (nl->connection == PLIP_CN_SEND) {
if (error != ERROR) { /* Timeout */
nl->timeout_count++;
if ((error == HS_TIMEOUT
&& nl->timeout_count <= 10)
|| nl->timeout_count <= 3) {
spin_unlock_irq(&nl->lock);
/* Try again later */
return TIMEOUT;
}
c0 = read_status(dev);
printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
dev->name, snd->state, c0);
} else
error = HS_TIMEOUT;
nl->enet_stats.tx_errors++;
nl->enet_stats.tx_aborted_errors++;
} else if (nl->connection == PLIP_CN_RECEIVE) {
if (rcv->state == PLIP_PK_TRIGGER) {
/* Transmission was interrupted. */
spin_unlock_irq(&nl->lock);
return OK;
}
if (error != ERROR) { /* Timeout */
if (++nl->timeout_count <= 3) {
spin_unlock_irq(&nl->lock);
/* Try again later */
return TIMEOUT;
}
c0 = read_status(dev);
printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
dev->name, rcv->state, c0);
}
nl->enet_stats.rx_dropped++;
}
rcv->state = PLIP_PK_DONE;
if (rcv->skb) {
kfree_skb(rcv->skb);
rcv->skb = NULL;
}
snd->state = PLIP_PK_DONE;
if (snd->skb) {
dev_kfree_skb(snd->skb);
snd->skb = NULL;
}
spin_unlock_irq(&nl->lock);
if (error == HS_TIMEOUT) {
DISABLE(dev->irq);
synchronize_irq(dev->irq);
}
disable_parport_interrupts (dev);
netif_stop_queue (dev);
nl->connection = PLIP_CN_ERROR;
write_data (dev, 0x00);
return TIMEOUT;
}
static int
plip_none(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
return OK;
}
/* PLIP_RECEIVE --- receive a byte(two nibbles)
Returns OK on success, TIMEOUT on timeout */
static inline int
plip_receive(unsigned short nibble_timeout, struct net_device *dev,
enum plip_nibble_state *ns_p, unsigned char *data_p)
{
unsigned char c0, c1;
unsigned int cx;
switch (*ns_p) {
case PLIP_NB_BEGIN:
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
udelay(PLIP_DELAY_UNIT);
if ((c0 & 0x80) == 0) {
c1 = read_status(dev);
if (c0 == c1)
break;
}
if (--cx == 0)
return TIMEOUT;
}
*data_p = (c0 >> 3) & 0x0f;
write_data (dev, 0x10); /* send ACK */
*ns_p = PLIP_NB_1;
case PLIP_NB_1:
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
udelay(PLIP_DELAY_UNIT);
if (c0 & 0x80) {
c1 = read_status(dev);
if (c0 == c1)
break;
}
if (--cx == 0)
return TIMEOUT;
}
*data_p |= (c0 << 1) & 0xf0;
write_data (dev, 0x00); /* send ACK */
*ns_p = PLIP_NB_BEGIN;
case PLIP_NB_2:
break;
}
return OK;
}
/*
* Determine the packet's protocol ID. The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
*
* PLIP is ethernet ish but the daddr might not be valid if unicast.
* PLIP fortunately has no bus architecture (its Point-to-point).
*
* We can't fix the daddr thing as that quirk (more bug) is embedded
* in far too many old systems not all even running Linux.
*/
static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct ethhdr *eth;
unsigned char *rawp;
skb->mac.raw=skb->data;
skb_pull(skb,dev->hard_header_len);
eth = eth_hdr(skb);
if(*eth->h_dest&1)
{
if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
skb->pkt_type=PACKET_BROADCAST;
else
skb->pkt_type=PACKET_MULTICAST;
}
/*
* This ALLMULTI check should be redundant by 1.4
* so don't forget to remove it.
*/
if (ntohs(eth->h_proto) >= 1536)
return eth->h_proto;
rawp = skb->data;
/*
* This is a magic hack to spot IPX packets. Older Novell breaks
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
*/
if (*(unsigned short *)rawp == 0xFFFF)
return htons(ETH_P_802_3);
/*
* Real 802.2 LLC
*/
return htons(ETH_P_802_2);
}
/* PLIP_RECEIVE_PACKET --- receive a packet */
static int
plip_receive_packet(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
unsigned short nibble_timeout = nl->nibble;
unsigned char *lbuf;
switch (rcv->state) {
case PLIP_PK_TRIGGER:
DISABLE(dev->irq);
/* Don't need to synchronize irq, as we can safely ignore it */
disable_parport_interrupts (dev);
write_data (dev, 0x01); /* send ACK */
if (net_debug > 2)
printk(KERN_DEBUG "%s: receive start\n", dev->name);
rcv->state = PLIP_PK_LENGTH_LSB;
rcv->nibble = PLIP_NB_BEGIN;
case PLIP_PK_LENGTH_LSB:
if (snd->state != PLIP_PK_DONE) {
if (plip_receive(nl->trigger, dev,
&rcv->nibble, &rcv->length.b.lsb)) {
/* collision, here dev->tbusy == 1 */
rcv->state = PLIP_PK_DONE;
nl->is_deferred = 1;
nl->connection = PLIP_CN_SEND;
schedule_delayed_work(&nl->deferred, 1);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
}
} else {
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &rcv->length.b.lsb))
return TIMEOUT;
}
rcv->state = PLIP_PK_LENGTH_MSB;
case PLIP_PK_LENGTH_MSB:
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &rcv->length.b.msb))
return TIMEOUT;
if (rcv->length.h > dev->mtu + dev->hard_header_len
|| rcv->length.h < 8) {
printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
return ERROR;
}
/* Malloc up new buffer. */
rcv->skb = dev_alloc_skb(rcv->length.h + 2);
if (rcv->skb == NULL) {
printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
return ERROR;
}
skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
skb_put(rcv->skb,rcv->length.h);
rcv->skb->dev = dev;
rcv->state = PLIP_PK_DATA;
rcv->byte = 0;
rcv->checksum = 0;
case PLIP_PK_DATA:
lbuf = rcv->skb->data;
do
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &lbuf[rcv->byte]))
return TIMEOUT;
while (++rcv->byte < rcv->length.h);
do
rcv->checksum += lbuf[--rcv->byte];
while (rcv->byte);
rcv->state = PLIP_PK_CHECKSUM;
case PLIP_PK_CHECKSUM:
if (plip_receive(nibble_timeout, dev,
&rcv->nibble, &rcv->data))
return TIMEOUT;
if (rcv->data != rcv->checksum) {
nl->enet_stats.rx_crc_errors++;
if (net_debug)
printk(KERN_DEBUG "%s: checksum error\n", dev->name);
return ERROR;
}
rcv->state = PLIP_PK_DONE;
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
netif_rx(rcv->skb);
dev->last_rx = jiffies;
nl->enet_stats.rx_bytes += rcv->length.h;
nl->enet_stats.rx_packets++;
rcv->skb = NULL;
if (net_debug > 2)
printk(KERN_DEBUG "%s: receive end\n", dev->name);
/* Close the connection. */
write_data (dev, 0x00);
spin_lock_irq(&nl->lock);
if (snd->state != PLIP_PK_DONE) {
nl->connection = PLIP_CN_SEND;
spin_unlock_irq(&nl->lock);
schedule_work(&nl->immediate);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
} else {
nl->connection = PLIP_CN_NONE;
spin_unlock_irq(&nl->lock);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
}
}
return OK;
}
/* PLIP_SEND --- send a byte (two nibbles)
Returns OK on success, TIMEOUT when timeout */
static inline int
plip_send(unsigned short nibble_timeout, struct net_device *dev,
enum plip_nibble_state *ns_p, unsigned char data)
{
unsigned char c0;
unsigned int cx;
switch (*ns_p) {
case PLIP_NB_BEGIN:
write_data (dev, data & 0x0f);
*ns_p = PLIP_NB_1;
case PLIP_NB_1:
write_data (dev, 0x10 | (data & 0x0f));
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
if ((c0 & 0x80) == 0)
break;
if (--cx == 0)
return TIMEOUT;
udelay(PLIP_DELAY_UNIT);
}
write_data (dev, 0x10 | (data >> 4));
*ns_p = PLIP_NB_2;
case PLIP_NB_2:
write_data (dev, (data >> 4));
cx = nibble_timeout;
while (1) {
c0 = read_status(dev);
if (c0 & 0x80)
break;
if (--cx == 0)
return TIMEOUT;
udelay(PLIP_DELAY_UNIT);
}
*ns_p = PLIP_NB_BEGIN;
return OK;
}
return OK;
}
/* PLIP_SEND_PACKET --- send a packet */
static int
plip_send_packet(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
unsigned short nibble_timeout = nl->nibble;
unsigned char *lbuf;
unsigned char c0;
unsigned int cx;
if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
snd->state = PLIP_PK_DONE;
snd->skb = NULL;
return ERROR;
}
switch (snd->state) {
case PLIP_PK_TRIGGER:
if ((read_status(dev) & 0xf8) != 0x80)
return HS_TIMEOUT;
/* Trigger remote rx interrupt. */
write_data (dev, 0x08);
cx = nl->trigger;
while (1) {
udelay(PLIP_DELAY_UNIT);
spin_lock_irq(&nl->lock);
if (nl->connection == PLIP_CN_RECEIVE) {
spin_unlock_irq(&nl->lock);
/* Interrupted. */
nl->enet_stats.collisions++;
return OK;
}
c0 = read_status(dev);
if (c0 & 0x08) {
spin_unlock_irq(&nl->lock);
DISABLE(dev->irq);
synchronize_irq(dev->irq);
if (nl->connection == PLIP_CN_RECEIVE) {
/* Interrupted.
We don't need to enable irq,
as it is soon disabled. */
/* Yes, we do. New variant of
{enable,disable}_irq *counts*
them. -- AV */
ENABLE(dev->irq);
nl->enet_stats.collisions++;
return OK;
}
disable_parport_interrupts (dev);
if (net_debug > 2)
printk(KERN_DEBUG "%s: send start\n", dev->name);
snd->state = PLIP_PK_LENGTH_LSB;
snd->nibble = PLIP_NB_BEGIN;
nl->timeout_count = 0;
break;
}
spin_unlock_irq(&nl->lock);
if (--cx == 0) {
write_data (dev, 0x00);
return HS_TIMEOUT;
}
}
case PLIP_PK_LENGTH_LSB:
if (plip_send(nibble_timeout, dev,
&snd->nibble, snd->length.b.lsb))
return TIMEOUT;
snd->state = PLIP_PK_LENGTH_MSB;
case PLIP_PK_LENGTH_MSB:
if (plip_send(nibble_timeout, dev,
&snd->nibble, snd->length.b.msb))
return TIMEOUT;
snd->state = PLIP_PK_DATA;
snd->byte = 0;
snd->checksum = 0;
case PLIP_PK_DATA:
do
if (plip_send(nibble_timeout, dev,
&snd->nibble, lbuf[snd->byte]))
return TIMEOUT;
while (++snd->byte < snd->length.h);
do
snd->checksum += lbuf[--snd->byte];
while (snd->byte);
snd->state = PLIP_PK_CHECKSUM;
case PLIP_PK_CHECKSUM:
if (plip_send(nibble_timeout, dev,
&snd->nibble, snd->checksum))
return TIMEOUT;
nl->enet_stats.tx_bytes += snd->skb->len;
dev_kfree_skb(snd->skb);
nl->enet_stats.tx_packets++;
snd->state = PLIP_PK_DONE;
case PLIP_PK_DONE:
/* Close the connection */
write_data (dev, 0x00);
snd->skb = NULL;
if (net_debug > 2)
printk(KERN_DEBUG "%s: send end\n", dev->name);
nl->connection = PLIP_CN_CLOSING;
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
return OK;
}
return OK;
}
static int
plip_connection_close(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
spin_lock_irq(&nl->lock);
if (nl->connection == PLIP_CN_CLOSING) {
nl->connection = PLIP_CN_NONE;
netif_wake_queue (dev);
}
spin_unlock_irq(&nl->lock);
if (nl->should_relinquish) {
nl->should_relinquish = nl->port_owner = 0;
parport_release(nl->pardev);
}
return OK;
}
/* PLIP_ERROR --- wait till other end settled */
static int
plip_error(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
{
unsigned char status;
status = read_status(dev);
if ((status & 0xf8) == 0x80) {
if (net_debug > 2)
printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
nl->connection = PLIP_CN_NONE;
nl->should_relinquish = 0;
netif_start_queue (dev);
enable_parport_interrupts (dev);
ENABLE(dev->irq);
netif_wake_queue (dev);
} else {
nl->is_deferred = 1;
schedule_delayed_work(&nl->deferred, 1);
}
return OK;
}
/* Handle the parallel port interrupts. */
static void
plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
struct net_device *dev = dev_id;
struct net_local *nl;
struct plip_local *rcv;
unsigned char c0;
if (dev == NULL) {
printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
return;
}
nl = netdev_priv(dev);
rcv = &nl->rcv_data;
spin_lock_irq (&nl->lock);
c0 = read_status(dev);
if ((c0 & 0xf8) != 0xc0) {
if ((dev->irq != -1) && (net_debug > 1))
printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
spin_unlock_irq (&nl->lock);
return;
}
if (net_debug > 3)
printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
switch (nl->connection) {
case PLIP_CN_CLOSING:
netif_wake_queue (dev);
case PLIP_CN_NONE:
case PLIP_CN_SEND:
rcv->state = PLIP_PK_TRIGGER;
nl->connection = PLIP_CN_RECEIVE;
nl->timeout_count = 0;
schedule_work(&nl->immediate);
break;
case PLIP_CN_RECEIVE:
/* May occur because there is race condition
around test and set of dev->interrupt.
Ignore this interrupt. */
break;
case PLIP_CN_ERROR:
printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
break;
}
spin_unlock_irq(&nl->lock);
}
static int
plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct plip_local *snd = &nl->snd_data;
if (netif_queue_stopped(dev))
return 1;
/* We may need to grab the bus */
if (!nl->port_owner) {
if (parport_claim(nl->pardev))
return 1;
nl->port_owner = 1;
}
netif_stop_queue (dev);
if (skb->len > dev->mtu + dev->hard_header_len) {
printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
netif_start_queue (dev);
return 1;
}
if (net_debug > 2)
printk(KERN_DEBUG "%s: send request\n", dev->name);
spin_lock_irq(&nl->lock);
dev->trans_start = jiffies;
snd->skb = skb;
snd->length.h = skb->len;
snd->state = PLIP_PK_TRIGGER;
if (nl->connection == PLIP_CN_NONE) {
nl->connection = PLIP_CN_SEND;
nl->timeout_count = 0;
}
schedule_work(&nl->immediate);
spin_unlock_irq(&nl->lock);
return 0;
}
static void
plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
{
struct in_device *in_dev;
if ((in_dev=dev->ip_ptr) != NULL) {
/* Any address will do - we take the first */
struct in_ifaddr *ifa=in_dev->ifa_list;
if (ifa != NULL) {
memcpy(eth->h_source, dev->dev_addr, 6);
memset(eth->h_dest, 0xfc, 2);
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
}
}
}
static int
plip_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr,
void *saddr, unsigned len)
{
struct net_local *nl = netdev_priv(dev);
int ret;
if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
plip_rewrite_address (dev, (struct ethhdr *)skb->data);
return ret;
}
int plip_hard_header_cache(struct neighbour *neigh,
struct hh_cache *hh)
{
struct net_local *nl = neigh->dev->priv;
int ret;
if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
{
struct ethhdr *eth;
eth = (struct ethhdr*)(((u8*)hh->hh_data) +
HH_DATA_OFF(sizeof(*eth)));
plip_rewrite_address (neigh->dev, eth);
}
return ret;
}
/* Open/initialize the board. This is called (in the current kernel)
sometime after booting when the 'ifconfig' program is run.
This routine gets exclusive access to the parallel port by allocating
its IRQ line.
*/
static int
plip_open(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct in_device *in_dev;
/* Grab the port */
if (!nl->port_owner) {
if (parport_claim(nl->pardev)) return -EAGAIN;
nl->port_owner = 1;
}
nl->should_relinquish = 0;
/* Clear the data port. */
write_data (dev, 0x00);
/* Enable rx interrupt. */
enable_parport_interrupts (dev);
if (dev->irq == -1)
{
atomic_set (&nl->kill_timer, 0);
schedule_delayed_work(&nl->timer, 1);
}
/* Initialize the state machine. */
nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
nl->rcv_data.skb = nl->snd_data.skb = NULL;
nl->connection = PLIP_CN_NONE;
nl->is_deferred = 0;
/* Fill in the MAC-level header.
We used to abuse dev->broadcast to store the point-to-point
MAC address, but we no longer do it. Instead, we fetch the
interface address whenever it is needed, which is cheap enough
because we use the hh_cache. Actually, abusing dev->broadcast
didn't work, because when using plip_open the point-to-point
address isn't yet known.
PLIP doesn't have a real MAC address, but we need it to be
DOS compatible, and to properly support taps (otherwise,
when the device address isn't identical to the address of a
received frame, the kernel incorrectly drops it). */
if ((in_dev=dev->ip_ptr) != NULL) {
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
struct in_ifaddr *ifa=in_dev->ifa_list;
if (ifa != NULL) {
memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
}
}
netif_start_queue (dev);
return 0;
}
/* The inverse routine to plip_open (). */
static int
plip_close(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct plip_local *snd = &nl->snd_data;
struct plip_local *rcv = &nl->rcv_data;
netif_stop_queue (dev);
DISABLE(dev->irq);
synchronize_irq(dev->irq);
if (dev->irq == -1)
{
init_MUTEX_LOCKED (&nl->killed_timer_sem);
atomic_set (&nl->kill_timer, 1);
down (&nl->killed_timer_sem);
}
#ifdef NOTDEF
outb(0x00, PAR_DATA(dev));
#endif
nl->is_deferred = 0;
nl->connection = PLIP_CN_NONE;
if (nl->port_owner) {
parport_release(nl->pardev);
nl->port_owner = 0;
}
snd->state = PLIP_PK_DONE;
if (snd->skb) {
dev_kfree_skb(snd->skb);
snd->skb = NULL;
}
rcv->state = PLIP_PK_DONE;
if (rcv->skb) {
kfree_skb(rcv->skb);
rcv->skb = NULL;
}
#ifdef NOTDEF
/* Reset. */
outb(0x00, PAR_CONTROL(dev));
#endif
return 0;
}
static int
plip_preempt(void *handle)
{
struct net_device *dev = (struct net_device *)handle;
struct net_local *nl = netdev_priv(dev);
/* Stand our ground if a datagram is on the wire */
if (nl->connection != PLIP_CN_NONE) {
nl->should_relinquish = 1;
return 1;
}
nl->port_owner = 0; /* Remember that we released the bus */
return 0;
}
static void
plip_wakeup(void *handle)
{
struct net_device *dev = (struct net_device *)handle;
struct net_local *nl = netdev_priv(dev);
if (nl->port_owner) {
/* Why are we being woken up? */
printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
if (!parport_claim(nl->pardev))
/* bus_owner is already set (but why?) */
printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
else
return;
}
if (!(dev->flags & IFF_UP))
/* Don't need the port when the interface is down */
return;
if (!parport_claim(nl->pardev)) {
nl->port_owner = 1;
/* Clear the data port. */
write_data (dev, 0x00);
}
return;
}
static struct net_device_stats *
plip_get_stats(struct net_device *dev)
{
struct net_local *nl = netdev_priv(dev);
struct net_device_stats *r = &nl->enet_stats;
return r;
}
static int
plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct net_local *nl = netdev_priv(dev);
struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
if (cmd != SIOCDEVPLIP)
return -EOPNOTSUPP;
switch(pc->pcmd) {
case PLIP_GET_TIMEOUT:
pc->trigger = nl->trigger;
pc->nibble = nl->nibble;
break;
case PLIP_SET_TIMEOUT:
if(!capable(CAP_NET_ADMIN))
return -EPERM;
nl->trigger = pc->trigger;
nl->nibble = pc->nibble;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
static int timid;
module_param_array(parport, int, NULL, 0);
module_param(timid, int, 0);
MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
static inline int
plip_searchfor(int list[], int a)
{
int i;
for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
if (list[i] == a) return 1;
}
return 0;
}
/* plip_attach() is called (by the parport code) when a port is
* available to use. */
static void plip_attach (struct parport *port)
{
static int unit;
struct net_device *dev;
struct net_local *nl;
char name[IFNAMSIZ];
if ((parport[0] == -1 && (!timid || !port->devices)) ||
plip_searchfor(parport, port->number)) {
if (unit == PLIP_MAX) {
printk(KERN_ERR "plip: too many devices\n");
return;
}
sprintf(name, "plip%d", unit);
dev = alloc_etherdev(sizeof(struct net_local));
if (!dev) {
printk(KERN_ERR "plip: memory squeeze\n");
return;
}
strcpy(dev->name, name);
SET_MODULE_OWNER(dev);
dev->irq = port->irq;
dev->base_addr = port->base;
if (port->irq == -1) {
printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
"which is fairly inefficient!\n", port->name);
}
nl = netdev_priv(dev);
nl->pardev = parport_register_device(port, name, plip_preempt,
plip_wakeup, plip_interrupt,
0, dev);
if (!nl->pardev) {
printk(KERN_ERR "%s: parport_register failed\n", name);
goto err_free_dev;
return;
}
plip_init_netdev(dev);
if (register_netdev(dev)) {
printk(KERN_ERR "%s: network register failed\n", name);
goto err_parport_unregister;
}
printk(KERN_INFO "%s", version);
if (dev->irq != -1)
printk(KERN_INFO "%s: Parallel port at %#3lx, "
"using IRQ %d.\n",
dev->name, dev->base_addr, dev->irq);
else
printk(KERN_INFO "%s: Parallel port at %#3lx, "
"not using IRQ.\n",
dev->name, dev->base_addr);
dev_plip[unit++] = dev;
}
return;
err_parport_unregister:
parport_unregister_device(nl->pardev);
err_free_dev:
free_netdev(dev);
return;
}
/* plip_detach() is called (by the parport code) when a port is
* no longer available to use. */
static void plip_detach (struct parport *port)
{
/* Nothing to do */
}
static struct parport_driver plip_driver = {
.name = "plip",
.attach = plip_attach,
.detach = plip_detach
};
static void __exit plip_cleanup_module (void)
{
struct net_device *dev;
int i;
parport_unregister_driver (&plip_driver);
for (i=0; i < PLIP_MAX; i++) {
if ((dev = dev_plip[i])) {
struct net_local *nl = netdev_priv(dev);
unregister_netdev(dev);
if (nl->port_owner)
parport_release(nl->pardev);
parport_unregister_device(nl->pardev);
free_netdev(dev);
dev_plip[i] = NULL;
}
}
}
#ifndef MODULE
static int parport_ptr;
static int __init plip_setup(char *str)
{
int ints[4];
str = get_options(str, ARRAY_SIZE(ints), ints);
/* Ugh. */
if (!strncmp(str, "parport", 7)) {
int n = simple_strtoul(str+7, NULL, 10);
if (parport_ptr < PLIP_MAX)
parport[parport_ptr++] = n;
else
printk(KERN_INFO "plip: too many ports, %s ignored.\n",
str);
} else if (!strcmp(str, "timid")) {
timid = 1;
} else {
if (ints[0] == 0 || ints[1] == 0) {
/* disable driver on "plip=" or "plip=0" */
parport[0] = -2;
} else {
printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
ints[1]);
}
}
return 1;
}
__setup("plip=", plip_setup);
#endif /* !MODULE */
static int __init plip_init (void)
{
if (parport[0] == -2)
return 0;
if (parport[0] != -1 && timid) {
printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
timid = 0;
}
if (parport_register_driver (&plip_driver)) {
printk (KERN_WARNING "plip: couldn't register driver\n");
return 1;
}
return 0;
}
module_init(plip_init);
module_exit(plip_cleanup_module);
MODULE_LICENSE("GPL");
/*
* Local variables:
* compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
* End:
*/
| zhoupeng/spice4xen | linux-2.6.18-xen.hg/drivers/net/plip.c | C | gpl-2.0 | 35,956 |
<?php
class C_NextGen_Admin_Page_Controller extends C_MVC_Controller
{
static $_instances = array();
static function &get_instance($context=FALSE)
{
if (!isset(self::$_instances[$context])) {
$klass = get_class();
self::$_instances[$context] = new $klass($context);
}
return self::$_instances[$context];
}
function define($context=FALSE)
{
if (is_array($context)) $this->name = $context[0];
else $this->name = $context;
parent::define($context);
$this->add_mixin('Mixin_NextGen_Admin_Page_Instance_Methods');
$this->implement('I_NextGen_Admin_Page');
}
function initialize()
{
parent::initialize();
$this->add_pre_hook(
'index_action',
'Enqueue Backend Resources',
'Hook_NextGen_Admin_Page_Resources',
'enqueue_backend_resources'
);
}
}
class Hook_NextGen_Admin_Page_Resources extends Hook
{
function enqueue_backend_resources()
{
$this->object->enqueue_backend_resources();
}
}
class Mixin_NextGen_Admin_Page_Instance_Methods extends Mixin
{
/**
* Authorizes the request
*/
function is_authorized_request($privilege=NULL)
{
if (!$privilege) $privilege = $this->object->get_required_permission();
$security = $this->get_registry()->get_utility('I_Security_Manager');
$retval = $sec_token = $security->get_request_token(str_replace(array(' ', "\n", "\t"), '_', $privilege));
$sec_actor = $security->get_current_actor();
// Ensure that the user has permission to access this page
if (!$sec_actor->is_allowed($privilege))
$retval = FALSE;
// Ensure that nonce is valid
if ($this->object->is_post_request() && !$sec_token->check_current_request()) {
$retval = FALSE;
}
return $retval;
}
/**
* Returns the permission required to access this page
* @return string
*/
function get_required_permission()
{
return $this->object->name;
}
/**
* Enqueues resources required by a NextGEN Admin page
*/
function enqueue_backend_resources()
{
wp_enqueue_script('jquery');
$this->object->enqueue_jquery_ui_theme();
wp_enqueue_script('jquery-ui-accordion');
wp_enqueue_script(
'nextgen_display_settings_page_placeholder_stub',
$this->get_static_url('photocrati-nextgen_admin#jquery.placeholder.min.js'),
array('jquery'),
'2.0.7',
TRUE
);
wp_register_script('iris', $this->get_router()->get_url('/wp-admin/js/iris.min.js', FALSE, TRUE), array('jquery-ui-draggable', 'jquery-ui-slider', 'jquery-touch-punch'));
wp_register_script('wp-color-picker', $this->get_router()->get_url('/wp-admin/js/color-picker.js', FALSE, TRUE), array('iris'));
wp_localize_script('wp-color-picker', 'wpColorPickerL10n', array(
'clear' => __( 'Clear' ),
'defaultString' => __( 'Default' ),
'pick' => __( 'Select Color' ),
'current' => __( 'Current Color' ),
));
wp_enqueue_script(
'nextgen_admin_page',
$this->get_static_url('photocrati-nextgen_admin#nextgen_admin_page.js'),
array('wp-color-picker')
);
wp_enqueue_style(
'nextgen_admin_page',
$this->get_static_url('photocrati-nextgen_admin#nextgen_admin_page.css'),
array('wp-color-picker')
);
// Ensure select2
wp_enqueue_style('select2');
wp_enqueue_script('select2');
}
function enqueue_jquery_ui_theme()
{
$settings = C_NextGen_Global_Settings::get_instance();
wp_enqueue_style(
$settings->jquery_ui_theme,
is_ssl() ?
str_replace('http:', 'https:', $settings->jquery_ui_theme_url) :
$settings->jquery_ui_theme_url,
NULL,
$settings->jquery_ui_theme_version
);
}
/**
* Returns the page title
* @return string
*/
function get_page_title()
{
return $this->object->name;
}
/**
* Returns the page heading
* @return string
*/
function get_page_heading()
{
return $this->object->get_page_title();
}
/**
* Returns the type of forms to render on this page
* @return string
*/
function get_form_type()
{
return is_array($this->object->context) ?
$this->object->context[0] : $this->object->context;
}
function get_success_message()
{
return "Saved successfully";
}
/**
* Returns an accordion tab, encapsulating the form
* @param I_Form $form
*/
function to_accordion_tab($form)
{
return $this->object->render_partial('photocrati-nextgen_admin#accordion_tab', array(
'id' => $form->get_id(),
'title' => $form->get_title(),
'content' => $form->render(TRUE)
), TRUE);
}
/**
* Returns the
* @return type
*/
function get_forms()
{
$forms = array();
$form_manager = C_Form_Manager::get_instance();
foreach ($form_manager->get_forms($this->object->get_form_type()) as $form) {
$forms[] = $this->get_registry()->get_utility('I_Form', $form);
}
return $forms;
}
/**
* Gets the action to be executed
* @return string
*/
function _get_action()
{
$retval = preg_quote($this->object->param('action'), '/');
$retval = strtolower(preg_replace(
"/[^\w]/",
'_',
$retval
));
return preg_replace("/_{2,}/", "_", $retval).'_action';
}
/**
* Returns the template to be rendered for the index action
* @return string
*/
function index_template()
{
return 'photocrati-nextgen_admin#nextgen_admin_page';
}
function show_save_button()
{
return TRUE;
}
/**
* Renders a NextGEN Admin Page using jQuery Accordions
*/
function index_action()
{
if (($token = $this->object->is_authorized_request())) {
// Get each form. Validate it and save any changes if this is a post
// request
$tabs = array();
$errors = array();
$success = $this->object->is_post_request() ?
$this->object->get_success_message() : '';
foreach ($this->object->get_forms() as $form) {
$form->enqueue_static_resources();
if ($this->object->is_post_request()) {
$action = $this->object->_get_action();
if ($form->has_method($action)) {
$form->$action($this->object->param($form->context));
}
}
$tabs[] = $this->object->to_accordion_tab($form);
if ($form->has_method('get_model') && $form->get_model()) {
if ($form->get_model()->is_invalid()) {
if (($form_errors = $this->object->show_errors_for($form->get_model(), TRUE))) {
$errors[] = $form_errors;
}
$form->get_model()->clear_errors();
}
}
}
// Render the view
$this->render_partial($this->object->index_template(), array(
'page_heading' => $this->object->get_page_heading(),
'tabs' => $tabs,
'errors' => $errors,
'success' => $success,
'form_header' => $token->get_form_html(),
'show_save_button' => $this->object->show_save_button()
));
}
// The user is not authorized to view this page
else {
$this->render_view('photocrati-nextgen_admin#not_authorized', array(
'name' => $this->object->name,
'title' => $this->object->get_page_title()
));
}
}
}
| pixelthing/restless-dev-wordpress | wp-content/plugins/nextgen-gallery/products/photocrati_nextgen/modules/nextgen_admin/class.nextgen_admin_page_controller.php | PHP | gpl-2.0 | 7,081 |
/* Tasks_Periodic
*
* This routine serves as a test task for the CBS scheduler
* implementation.
*
* Input parameters:
* argument - task argument
*
* Output parameters: NONE
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.org/license/LICENSE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "system.h"
rtems_task Task_Periodic(
rtems_task_argument argument
)
{
rtems_id rmid;
rtems_status_code status;
time_t approved_budget, exec_time, abs_time, remaining_budget;
int start, stop, now;
rtems_cbs_server_id server_id = 0, tsid;
rtems_cbs_parameters params, tparams;
params.deadline = Period;
params.budget = Execution+1;
/* Taks 1 will be attached to a server, task 2 not. */
if ( argument == 1 ) {
printf( "Periodic task: Create server and Attach thread\n" );
if ( rtems_cbs_create_server( ¶ms, NULL, &server_id ) )
printf( "ERROR: CREATE SERVER FAILED\n" );
if ( rtems_cbs_attach_thread( server_id, Task_id ) )
printf( "ERROR: ATTACH THREAD FAILED\n" );
printf( "Periodic task: ID and Get parameters\n" );
if ( rtems_cbs_get_server_id( Task_id, &tsid ) )
printf( "ERROR: GET SERVER ID FAILED\n" );
if ( tsid != server_id )
printf( "ERROR: SERVER ID MISMATCH\n" );
if ( rtems_cbs_get_parameters( server_id, &tparams ) )
printf( "ERROR: GET PARAMETERS FAILED\n" );
if ( params.deadline != tparams.deadline ||
params.budget != tparams.budget )
printf( "ERROR: PARAMETERS MISMATCH\n" );
printf( "Periodic task: Detach thread and Destroy server\n" );
if ( rtems_cbs_detach_thread( server_id, Task_id ) )
printf( "ERROR: DETACH THREAD FAILED\n" );
if ( rtems_cbs_destroy_server( server_id ) )
printf( "ERROR: DESTROY SERVER FAILED\n" );
if ( rtems_cbs_create_server( ¶ms, NULL, &server_id ) )
printf( "ERROR: CREATE SERVER FAILED\n" );
printf( "Periodic task: Remaining budget and Execution time\n" );
if ( rtems_cbs_get_remaining_budget( server_id, &remaining_budget ) )
printf( "ERROR: GET REMAINING BUDGET FAILED\n" );
if ( remaining_budget != params.budget )
printf( "ERROR: REMAINING BUDGET MISMATCH\n" );
if ( rtems_cbs_get_execution_time( server_id, &exec_time, &abs_time ) )
printf( "ERROR: GET EXECUTION TIME FAILED\n" );
printf( "Periodic task: Set parameters\n" );
if ( rtems_cbs_attach_thread( server_id, Task_id ) )
printf( "ERROR: ATTACH THREAD FAILED\n" );
params.deadline = Period * 2;
params.budget = Execution * 2 +1;
if ( rtems_cbs_set_parameters( server_id, ¶ms ) )
printf( "ERROR: SET PARAMS FAILED\n" );
if ( rtems_cbs_get_parameters( server_id, &tparams ) )
printf( "ERROR: GET PARAMS FAILED\n" );
if ( params.deadline != tparams.deadline ||
params.budget != tparams.budget )
printf( "ERROR: PARAMS MISMATCH\n" );
params.deadline = Period;
params.budget = Execution+1;
if ( rtems_cbs_set_parameters( server_id, ¶ms ) )
printf( "ERROR: SET PARAMS FAILED\n" );
if ( rtems_cbs_get_approved_budget( server_id, &approved_budget ) )
printf( "ERROR: GET APPROVED BUDGET FAILED\n" );
printf( "Periodic task: Approved budget\n" );
if ( approved_budget != params.budget )
printf( "ERROR: APPROVED BUDGET MISMATCH\n" );
}
status = rtems_rate_monotonic_create( argument, &rmid );
directive_failed( status, "rtems_rate_monotonic_create" );
/* Starting periodic behavior of the task */
printf( "Periodic task: Starting periodic behavior\n" );
status = rtems_task_wake_after( 1 + Phase );
directive_failed( status, "rtems_task_wake_after" );
while ( FOREVER ) {
if ( rtems_rate_monotonic_period(rmid, Period) == RTEMS_TIMEOUT )
printf( "P%" PRIdPTR " - Deadline miss\n", argument );
rtems_clock_get( RTEMS_CLOCK_GET_TICKS_SINCE_BOOT, &start );
printf( "P%" PRIdPTR "-S ticks:%d\n", argument, start );
if ( start > 4*Period+Phase ) break; /* stop */
/* active computing */
while(FOREVER) {
rtems_clock_get( RTEMS_CLOCK_GET_TICKS_SINCE_BOOT, &now );
if ( now >= start + Execution ) break;
if ( server_id != 0 ) {
if ( rtems_cbs_get_execution_time( server_id, &exec_time, &abs_time ) )
printf( "ERROR: GET EXECUTION TIME FAILED\n" );
if ( rtems_cbs_get_remaining_budget( server_id, &remaining_budget) )
printf( "ERROR: GET REMAINING BUDGET FAILED\n" );
if ( (remaining_budget + exec_time) > (Execution + 1) ) {
printf( "ERROR: REMAINING BUDGET AND EXECUTION TIME MISMATCH\n" );
rtems_test_exit( 0 );
}
}
}
rtems_clock_get( RTEMS_CLOCK_GET_TICKS_SINCE_BOOT, &stop );
printf( "P%" PRIdPTR "-F ticks:%d\n", argument, stop );
}
/* delete period and SELF */
status = rtems_rate_monotonic_delete( rmid );
if ( status != RTEMS_SUCCESSFUL ) {
printf("rtems_rate_monotonic_delete failed with status of %d.\n", status);
rtems_test_exit( 0 );
}
printf( "Periodic task: Deleting self\n" );
status = rtems_task_delete( RTEMS_SELF );
directive_failed( status, "rtems_task_delete of RTEMS_SELF" );
}
| atgreen/RTEMS | testsuites/sptests/spcbssched02/task_periodic.c | C | gpl-2.0 | 5,301 |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magentocommerce.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magentocommerce.com for more information.
*
* @category Mage
* @package Mage_Core
* @copyright Copyright (c) 2012 Magento Inc. (http://www.magentocommerce.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Session abstaract class
*
* @category Mage
* @package Mage_Core
* @author Magento Core Team <core@magentocommerce.com>
*/
abstract class Mage_Core_Model_Session_Abstract_Zend extends Varien_Object
{
/**
* Session namespace object
*
* @var Zend_Session_Namespace
*/
protected $_namespace;
public function getNamespace()
{
return $this->_namespace;
}
public function start()
{
Varien_Profiler::start(__METHOD__.'/setOptions');
$options = array(
'save_path'=>Mage::getBaseDir('session'),
'use_only_cookies'=>'off',
'throw_startup_exceptions' => E_ALL ^ E_NOTICE,
);
if ($this->getCookieDomain()) {
$options['cookie_domain'] = $this->getCookieDomain();
}
if ($this->getCookiePath()) {
$options['cookie_path'] = $this->getCookiePath();
}
if ($this->getCookieLifetime()) {
$options['cookie_lifetime'] = $this->getCookieLifetime();
}
Zend_Session::setOptions($options);
Varien_Profiler::stop(__METHOD__.'/setOptions');
/*
Varien_Profiler::start(__METHOD__.'/setHandler');
$sessionResource = Mage::getResourceSingleton('core/session');
if ($sessionResource->hasConnection()) {
Zend_Session::setSaveHandler($sessionResource);
}
Varien_Profiler::stop(__METHOD__.'/setHandler');
*/
Varien_Profiler::start(__METHOD__.'/start');
Zend_Session::start();
Varien_Profiler::stop(__METHOD__.'/start');
return $this;
}
/**
* Initialization session namespace
*
* @param string $namespace
*/
public function init($namespace)
{
if (!Zend_Session::sessionExists()) {
$this->start();
}
Varien_Profiler::start(__METHOD__.'/init');
$this->_namespace = new Zend_Session_Namespace($namespace, Zend_Session_Namespace::SINGLE_INSTANCE);
Varien_Profiler::stop(__METHOD__.'/init');
return $this;
}
/**
* Redeclaration object setter
*
* @param string $key
* @param mixed $value
* @return Mage_Core_Model_Session_Abstract
*/
public function setData($key, $value='', $isChanged = false)
{
if (!$this->_namespace->data) {
$this->_namespace->data = new Varien_Object();
}
$this->_namespace->data->setData($key, $value, $isChanged);
return $this;
}
/**
* Redeclaration object getter
*
* @param string $var
* @param bool $clear
* @return mixed
*/
public function getData($var=null, $clear=false)
{
if (!$this->_namespace->data) {
$this->_namespace->data = new Varien_Object();
}
$data = $this->_namespace->data->getData($var);
if ($clear) {
$this->_namespace->data->unsetData($var);
}
return $data;
}
/**
* Cleare session data
*
* @return Mage_Core_Model_Session_Abstract
*/
public function unsetAll()
{
$this->_namespace->unsetAll();
return $this;
}
/**
* Retrieve current session identifier
*
* @return string
*/
public function getSessionId()
{
return Zend_Session::getId();
}
public function setSessionId($id=null)
{
if (!is_null($id)) {
Zend_Session::setId($id);
}
return $this;
}
/**
* Regenerate session Id
*
* @return Mage_Core_Model_Session_Abstract_Zend
*/
public function regenerateSessionId()
{
Zend_Session::regenerateId();
return $this;
}
}
| keegan2149/magento | sites/default/app/code/core/Mage/Core/Model/Session/Abstract/Zend.php | PHP | gpl-2.0 | 4,726 |
/* <!-- copyright */
/*
* aria2 - The high speed download utility
*
* Copyright (C) 2012 Tatsuhiro Tsujikawa
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* In addition, as a special exception, the copyright holders give
* permission to link the code of portions of this program with the
* OpenSSL library under certain conditions as described in each
* individual source file, and distribute linked combinations
* including the two.
* You must obey the GNU General Public License in all respects
* for all of the code used other than OpenSSL. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you
* do not wish to do so, delete this exception statement from your
* version. If you delete this exception statement from all source
* files in the program, then also delete it here.
*/
/* copyright --> */
#ifndef D_NOTIFIER_H
#define D_NOTIFIER_H
#include "common.h"
#include <vector>
#include <memory>
#include <aria2/aria2.h>
namespace aria2 {
class RequestGroup;
struct DownloadEventListener {
virtual ~DownloadEventListener() {}
virtual void onEvent(DownloadEvent event, const RequestGroup* group) = 0;
};
class Notifier {
public:
Notifier();
~Notifier();
void addDownloadEventListener(DownloadEventListener* listener);
// Notifies the download event to all listeners.
void notifyDownloadEvent(DownloadEvent event, const RequestGroup* group);
void notifyDownloadEvent(DownloadEvent event,
const std::shared_ptr<RequestGroup>& group)
{
notifyDownloadEvent(event, group.get());
}
private:
std::vector<DownloadEventListener*> listeners_;
};
} // namespace aria2
#endif // D_NOTIFIER_H
| bright-sparks/aria2 | src/Notifier.h | C | gpl-2.0 | 2,440 |
#include <sys/cdefs.h>
/* __FBSDID("$FreeBSD: src/lib/msun/src/s_llroundf.c,v 1.2 2005/04/08 00:52:27 das Exp $"); */
#define type float
#define roundit roundf
#define dtype long long
#define DTYPE_MIN LONGLONG_MIN
#define DTYPE_MAX LONGLONG_MAX
#define fn llroundf
#include "s_lround.c"
| rex-xxx/mt6572_x201 | bionic/libm/src/s_llroundf.c | C | gpl-2.0 | 294 |
# Perl hooks into the routines in vms.c for interconversion
# of VMS and Unix file specification syntax.
#
# Version: see $VERSION below
# Author: Charles Bailey bailey@newman.upenn.edu
# Revised: 08-Mar-1995
=head1 NAME
VMS::Filespec - convert between VMS and Unix file specification syntax
=head1 SYNOPSIS
use VMS::Filespec;
$fullspec = rmsexpand('[.VMS]file.specification'[, 'default:[file.spec]']);
$vmsspec = vmsify('/my/Unix/file/specification');
$unixspec = unixify('my:[VMS]file.specification');
$path = pathify('my:[VMS.or.Unix.directory]specification.dir');
$dirfile = fileify('my:[VMS.or.Unix.directory.specification]');
$vmsdir = vmspath('my/VMS/or/Unix/directory/specification.dir');
$unixdir = unixpath('my:[VMS.or.Unix.directory]specification.dir');
candelete('my:[VMS.or.Unix]file.specification');
=head1 DESCRIPTION
This package provides routines to simplify conversion between VMS and
Unix syntax when processing file specifications. This is useful when
porting scripts designed to run under either OS, and also allows you
to take advantage of conveniences provided by either syntax (I<e.g.>
ability to easily concatenate Unix-style specifications). In
addition, it provides an additional file test routine, C<candelete>,
which determines whether you have delete access to a file.
If you're running under VMS, the routines in this package are special,
in that they're automatically made available to any Perl script,
whether you're running F<miniperl> or the full F<perl>. The C<use
VMS::Filespec> or C<require VMS::Filespec; import VMS::Filespec ...>
statement can be used to import the function names into the current
package, but they're always available if you use the fully qualified
name, whether or not you've mentioned the F<.pm> file in your script.
If you're running under another OS and have installed this package, it
behaves like a normal Perl extension (in fact, you're using Perl
substitutes to emulate the necessary VMS system calls).
Each of these routines accepts a file specification in either VMS or
Unix syntax, and returns the converted file specification, or C<undef>
if an error occurs. The conversions are, for the most part, simply
string manipulations; the routines do not check the details of syntax
(e.g. that only legal characters are used). There is one exception:
when running under VMS, conversions from VMS syntax use the $PARSE
service to expand specifications, so illegal syntax, or a relative
directory specification which extends above the tope of the current
directory path (e.g [---.foo] when in dev:[dir.sub]) will cause
errors. In general, any legal file specification will be converted
properly, but garbage input tends to produce garbage output.
Each of these routines is prototyped as taking a single scalar
argument, so you can use them as unary operators in complex
expressions (as long as you don't use the C<&> form of
subroutine call, which bypasses prototype checking).
The routines provided are:
=head2 rmsexpand
Uses the RMS $PARSE and $SEARCH services to expand the input
specification to its fully qualified form, except that a null type
or version is not added unless it was present in either the original
file specification or the default specification passed to C<rmsexpand>.
(If the file does not exist, the input specification is expanded as much
as possible.) If an error occurs, returns C<undef> and sets C<$!>
and C<$^E>.
=head2 vmsify
Converts a file specification to VMS syntax.
=head2 unixify
Converts a file specification to Unix syntax.
=head2 pathify
Converts a directory specification to a path - that is, a string you
can prepend to a file name to form a valid file specification. If the
input file specification uses VMS syntax, the returned path does, too;
likewise for Unix syntax (Unix paths are guaranteed to end with '/').
Note that this routine will insist that the input be a legal directory
file specification; the file type and version, if specified, must be
F<.DIR;1>. For compatibility with Unix usage, the type and version
may also be omitted.
=head2 fileify
Converts a directory specification to the file specification of the
directory file - that is, a string you can pass to functions like
C<stat> or C<rmdir> to manipulate the directory file. If the
input directory specification uses VMS syntax, the returned file
specification does, too; likewise for Unix syntax. As with
C<pathify>, the input file specification must have a type and
version of F<.DIR;1>, or the type and version must be omitted.
=head2 vmspath
Acts like C<pathify>, but insures the returned path uses VMS syntax.
=head2 unixpath
Acts like C<pathify>, but insures the returned path uses Unix syntax.
=head2 candelete
Determines whether you have delete access to a file. If you do, C<candelete>
returns true. If you don't, or its argument isn't a legal file specification,
C<candelete> returns FALSE. Unlike other file tests, the argument to
C<candelete> must be a file name (not a FileHandle), and, since it's an XSUB,
it's a list operator, so you need to be careful about parentheses. Both of
these restrictions may be removed in the future if the functionality of
C<candelete> becomes part of the Perl core.
=head1 REVISION
This document was last revised 22-Feb-1996, for Perl 5.002.
=cut
package VMS::Filespec;
require 5.002;
our $VERSION = '1.11';
# If you want to use this package on a non-VMS system,
# uncomment the following line.
# use AutoLoader;
require Exporter;
@ISA = qw( Exporter );
@EXPORT = qw( &vmsify &unixify &pathify &fileify
&vmspath &unixpath &candelete &rmsexpand );
1;
__END__
# The autosplit routines here are provided for use by non-VMS systems
# They are not guaranteed to function identically to the XSUBs of the
# same name, since they do not have access to the RMS system routine
# sys$parse() (in particular, no real provision is made for handling
# of complex DECnet node specifications). However, these routines
# should be adequate for most purposes.
# A sort-of sys$parse() replacement
sub rmsexpand ($;$) {
my($fspec,$defaults) = @_;
if (!$fspec) { return undef }
my($node,$dev,$dir,$name,$type,$ver,$dnode,$ddev,$ddir,$dname,$dtype,$dver);
$fspec =~ s/:$//;
$defaults = [] unless $defaults;
$defaults = [ $defaults ] unless ref($defaults) && ref($defaults) eq 'ARRAY';
while ($fspec !~ m#[:>\]]# && $ENV{$fspec}) { $fspec = $ENV{$fspec} }
if ($fspec =~ /:/) {
my($dev,$devtrn,$base);
($dev,$base) = split(/:/,$fspec);
$devtrn = $dev;
while ($devtrn = $ENV{$devtrn}) {
if ($devtrn =~ /(.)([:>\]])$/) {
$dev .= ':', last if $1 eq '.';
$dev = $devtrn, last;
}
}
$fspec = $dev . $base;
}
($node,$dev,$dir,$name,$type,$ver) = $fspec =~
/([^:]*::)?([^:]*:)?([^>\]]*[>\]])?([^.;]*)(\.?[^.;]*)([.;]?\d*)/;
foreach ((@$defaults,$ENV{'DEFAULT'})) {
next unless defined;
last if $node && $ver && $type && $dev && $dir && $name;
($dnode,$ddev,$ddir,$dname,$dtype,$dver) =
/([^:]*::)?([^:]*:)?([^>\]]*[>\]])?([^.;]*)(\.?[^.;]*)([.;]?\d*)/;
$node = $dnode if $dnode && !$node;
$dev = $ddev if $ddev && !$dev;
$dir = $ddir if $ddir && !$dir;
$name = $dname if $dname && !$name;
$type = $dtype if $dtype && !$type;
$ver = $dver if $dver && !$ver;
}
# do this the long way to keep -w happy
$fspec = '';
$fspec .= $node if $node;
$fspec .= $dev if $dev;
$fspec .= $dir if $dir;
$fspec .= $name if $name;
$fspec .= $type if $type;
$fspec .= $ver if $ver;
$fspec;
}
sub vmsify ($) {
my($fspec) = @_;
my($hasdev,$dev,$defdirs,$dir,$base,@dirs,@realdirs);
if ($fspec =~ m#^\.(\.?)/?$#) { return $1 ? '[-]' : '[]'; }
return $fspec if $fspec !~ m#/#;
($hasdev,$dir,$base) = $fspec =~ m#(/?)(.*)/(.*)#;
@dirs = split(m#/#,$dir);
if ($base eq '.') { $base = ''; }
elsif ($base eq '..') {
push @dirs,$base;
$base = '';
}
foreach (@dirs) {
next unless $_; # protect against // in input
next if $_ eq '.';
if ($_ eq '..') {
if (@realdirs && $realdirs[$#realdirs] ne '-') { pop @realdirs }
else { push @realdirs, '-' }
}
else { push @realdirs, $_; }
}
if ($hasdev) {
$dev = shift @realdirs;
@realdirs = ('000000') unless @realdirs;
$base = '' unless $base; # keep -w happy
$dev . ':[' . join('.',@realdirs) . "]$base";
}
else {
'[' . join('',map($_ eq '-' ? $_ : ".$_",@realdirs)) . "]$base";
}
}
sub unixify ($) {
my($fspec) = @_;
return $fspec if $fspec !~ m#[:>\]]#;
return '.' if ($fspec eq '[]' || $fspec eq '<>');
if ($fspec =~ m#^[<\[](\.|-+)(.*)# ) {
$fspec = ($1 eq '.' ? '' : "$1.") . $2;
my($dir,$base) = split(/[\]>]/,$fspec);
my(@dirs) = grep($_,split(m#\.#,$dir));
if ($dirs[0] =~ /^-/) {
my($steps) = shift @dirs;
for (1..length($steps)) { unshift @dirs, '..'; }
}
join('/',@dirs) . "/$base";
}
else {
$fspec = rmsexpand($fspec,'_N_O_T_:[_R_E_A_L_]');
$fspec =~ s/.*_N_O_T_:(?:\[_R_E_A_L_\])?//;
my($dev,$dir,$base) = $fspec =~ m#([^:<\[]*):?[<\[](.*)[>\]](.*)#;
my(@dirs) = split(m#\.#,$dir);
if ($dirs[0] && $dirs[0] =~ /^-/) {
my($steps) = shift @dirs;
for (1..length($steps)) { unshift @dirs, '..'; }
}
"/$dev/" . join('/',@dirs) . "/$base";
}
}
sub fileify ($) {
my($path) = @_;
if (!$path) { return undef }
if ($path eq '/') { return 'sys$disk:[000000]'; }
if ($path =~ /(.+)\.([^:>\]]*)$/) {
$path = $1;
if ($2 !~ /^dir(?:;1)?$/i) { return undef }
}
if ($path !~ m#[/>\]]#) {
$path =~ s/:$//;
while ($ENV{$path}) {
($path = $ENV{$path}) =~ s/:$//;
last if $path =~ m#[/>\]]#;
}
}
if ($path =~ m#[>\]]#) {
my($dir,$sep,$base) = $path =~ /(.*)([>\]])(.*)/;
$sep =~ tr/<[/>]/;
if ($base) {
"$dir$sep$base.dir;1";
}
else {
if ($dir !~ /\./) { $dir =~ s/([<\[])/${1}000000./; }
$dir =~ s#\.(\w+)$#$sep$1#;
$dir =~ s/^.$sep//;
"$dir.dir;1";
}
}
else {
$path =~ s#/$##;
"$path.dir;1";
}
}
sub pathify ($) {
my($fspec) = @_;
if (!$fspec) { return undef }
if ($fspec =~ m#[/>\]]$#) { return $fspec; }
if ($fspec =~ m#(.+)\.([^/>\]]*)$# && $2 && $2 ne '.') {
$fspec = $1;
if ($2 !~ /^dir(?:;1)?$/i) { return undef }
}
if ($fspec !~ m#[/>\]]#) {
$fspec =~ s/:$//;
while ($ENV{$fspec}) {
if ($ENV{$fspec} =~ m#[>\]]$#) { return $ENV{$fspec} }
else { $fspec = $ENV{$fspec} =~ s/:$// }
}
}
if ($fspec !~ m#[>\]]#) { "$fspec/"; }
else {
if ($fspec =~ /([^>\]]+)([>\]])(.+)/) { "$1.$3$2"; }
else { $fspec; }
}
}
sub vmspath ($) {
pathify(vmsify($_[0]));
}
sub unixpath ($) {
pathify(unixify($_[0]));
}
sub candelete ($) {
my($fspec) = @_;
my($parent);
return '' unless -w $fspec;
$fspec =~ s#/$##;
if ($fspec =~ m#/#) {
($parent = $fspec) =~ s#/[^/]+$##;
return (-w $parent);
}
elsif ($parent = fileify($fspec)) { # fileify() here to expand lnms
$parent =~ s/[>\]][^>\]]+//;
return (-w fileify($parent));
}
else { return (-w '[-]'); }
}
| rhuitl/uClinux | user/perl/vms/ext/Filespec.pm | Perl | gpl-2.0 | 11,218 |
#ifndef _LINUX_PERCPU_RWSEM_H
#define _LINUX_PERCPU_RWSEM_H
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
struct percpu_rw_semaphore {
unsigned __percpu *counters;
bool locked;
struct mutex mtx;
};
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{
rcu_read_lock();
if (unlikely(p->locked)) {
rcu_read_unlock();
mutex_lock(&p->mtx);
this_cpu_inc(*p->counters);
mutex_unlock(&p->mtx);
return;
}
this_cpu_inc(*p->counters);
rcu_read_unlock();
}
static inline void percpu_up_read(struct percpu_rw_semaphore *p)
{
/*
* On X86, write operation in this_cpu_dec serves as a memory unlock
* barrier (i.e. memory accesses may be moved before the write, but
* no memory accesses are moved past the write).
* On other architectures this may not be the case, so we need smp_mb()
* there.
*/
#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
barrier();
#else
smp_mb();
#endif
this_cpu_dec(*p->counters);
}
static inline unsigned __percpu_count(unsigned __percpu *counters)
{
unsigned total = 0;
int cpu;
for_each_possible_cpu(cpu)
total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
return total;
}
static inline void percpu_down_write(struct percpu_rw_semaphore *p)
{
mutex_lock(&p->mtx);
p->locked = true;
synchronize_rcu();
while (__percpu_count(p->counters))
msleep(1);
smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
}
static inline void percpu_up_write(struct percpu_rw_semaphore *p)
{
p->locked = false;
mutex_unlock(&p->mtx);
}
static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
{
p->counters = alloc_percpu(unsigned);
if (unlikely(!p->counters))
return -ENOMEM;
p->locked = false;
mutex_init(&p->mtx);
return 0;
}
static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
{
free_percpu(p->counters);
p->counters = NULL; /* catch use after free bugs */
}
#endif
| ricardon/omap-audio | include/linux/percpu-rwsem.h | C | gpl-2.0 | 1,985 |
/*
* MD218A voice coil motor driver
*
*
*/
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <asm/atomic.h>
#include "OV8827AF.h"
#include "../camera/kd_camera_hw.h"
#define LENS_I2C_BUSNUM 1
static struct i2c_board_info __initdata kd_lens_dev={ I2C_BOARD_INFO("OV8827AF", 0x6e)};
#define OV8827AF_DRVNAME "OV8827AF"
#define OV8827AF_VCM_WRITE_ID 0x6c
#define OV8827AF_DEBUG
#ifdef OV8827AF_DEBUG
#define OV8827AFDB printk
#else
#define OV8827AFDB(x,...)
#endif
static spinlock_t g_OV8827AF_SpinLock;
extern int iReadReg(u16 a_u2Addr , u8 * a_puBuff , u16 i2cId);
extern int iWriteReg(u16 a_u2Addr , u32 a_u4Data , u32 a_u4Bytes , u16 i2cId);
#define OV8827AF_write_cmos_sensor(addr, para) iWriteReg((u16) addr , (u32) para , 1, OV8827AF_VCM_WRITE_ID)
kal_uint16 OV8827AF_read_cmos_sensor(kal_uint32 addr)
{
kal_uint16 get_byte=0;
iReadReg((u16) addr ,(u8*)&get_byte,OV8827AF_VCM_WRITE_ID);
return get_byte;
}
static struct i2c_client * g_pstOV8827AF_I2Cclient = NULL;
static dev_t g_OV8827AF_devno;
static struct cdev * g_pOV8827AF_CharDrv = NULL;
static struct class *actuator_class = NULL;
static int g_s4OV8827AF_Opened = 0;
static long g_i4MotorStatus = 0;
static long g_i4Dir = 0;
static unsigned long g_u4OV8827AF_INF = 0;
static unsigned long g_u4OV8827AF_MACRO = 1023;
static unsigned long g_u4TargetPosition = 0;
static unsigned long g_u4CurrPosition = 0;
static int g_sr = 3;
extern s32 mt_set_gpio_mode(u32 u4Pin, u32 u4Mode);
extern s32 mt_set_gpio_out(u32 u4Pin, u32 u4PinOut);
extern s32 mt_set_gpio_dir(u32 u4Pin, u32 u4Dir);
static int s4OV8827AF_ReadReg(unsigned short * a_pu2Result)
{
int temp = 0;
//char pBuff[2];
temp = (OV8827AF_read_cmos_sensor(0x3618)+ (OV8827AF_read_cmos_sensor(0x3619)<<8))>>4;
*a_pu2Result = temp;
OV8827AFDB("s4OV8827AF_ReadReg = %d \n", temp);
return 0;
}
static int s4OV8827AF_WriteReg(u16 a_u2Data)
{
u16 temp,SlewRate=0;
OV8827AFDB("s4OV8827AF_WriteReg = %d \n", a_u2Data);
temp=(a_u2Data<<4)+0+SlewRate;
OV8827AFDB("-----stemp=(a_u2Data<<4)+8+SlewRate = %d----- \n", temp);
OV8827AF_write_cmos_sensor(0x3619,(temp>>8)&0xff);
OV8827AF_write_cmos_sensor(0x3618,temp&0xff);
return 0;
}
inline static int getOV8827AFInfo(__user stOV8827AF_MotorInfo * pstMotorInfo)
{
stOV8827AF_MotorInfo stMotorInfo;
stMotorInfo.u4MacroPosition = g_u4OV8827AF_MACRO;
stMotorInfo.u4InfPosition = g_u4OV8827AF_INF;
stMotorInfo.u4CurrentPosition = g_u4CurrPosition;
if (g_i4MotorStatus == 1) {stMotorInfo.bIsMotorMoving = 1;}
else {stMotorInfo.bIsMotorMoving = 0;}
if (g_s4OV8827AF_Opened >= 1) {stMotorInfo.bIsMotorOpen = 1;}
else {stMotorInfo.bIsMotorOpen = 0;}
if(copy_to_user(pstMotorInfo , &stMotorInfo , sizeof(stOV8827AF_MotorInfo)))
{
OV8827AFDB("[OV8827AF] copy to user failed when getting motor information \n");
}
return 0;
}
inline static int moveOV8827AF(unsigned long a_u4Position)
{
int ret = 0;
if((a_u4Position > g_u4OV8827AF_MACRO) || (a_u4Position < g_u4OV8827AF_INF))
{
OV8827AFDB("[OV8827AF] out of range \n");
return -EINVAL;
}
if (g_s4OV8827AF_Opened == 1)
{
unsigned short InitPos;
ret = s4OV8827AF_ReadReg(&InitPos);
spin_lock(&g_OV8827AF_SpinLock);
if(ret == 0)
{
OV8827AFDB("[OV8827AF] Init Pos %6d \n", InitPos);
g_u4CurrPosition = (unsigned long)InitPos;
}
else
{
g_u4CurrPosition = 0;
}
g_s4OV8827AF_Opened = 2;
spin_unlock(&g_OV8827AF_SpinLock);
}
if (g_u4CurrPosition < a_u4Position)
{
spin_lock(&g_OV8827AF_SpinLock);
g_i4Dir = 1;
spin_unlock(&g_OV8827AF_SpinLock);
}
else if (g_u4CurrPosition > a_u4Position)
{
spin_lock(&g_OV8827AF_SpinLock);
g_i4Dir = -1;
spin_unlock(&g_OV8827AF_SpinLock);
}
else {return 0;}
spin_lock(&g_OV8827AF_SpinLock);
g_u4TargetPosition = a_u4Position;
spin_unlock(&g_OV8827AF_SpinLock);
OV8827AFDB("[OV8827AF] move [curr] %d [target] %d\n", g_u4CurrPosition, g_u4TargetPosition);
spin_lock(&g_OV8827AF_SpinLock);
g_sr = 3;
g_i4MotorStatus = 0;
spin_unlock(&g_OV8827AF_SpinLock);
if(s4OV8827AF_WriteReg((unsigned short)g_u4TargetPosition) == 0)
{
spin_lock(&g_OV8827AF_SpinLock);
g_u4CurrPosition = (unsigned long)g_u4TargetPosition;
spin_unlock(&g_OV8827AF_SpinLock);
}
else
{
OV8827AFDB("[OV8827AF] set I2C failed when moving the motor \n");
spin_lock(&g_OV8827AF_SpinLock);
g_i4MotorStatus = -1;
spin_unlock(&g_OV8827AF_SpinLock);
}
return 0;
}
inline static int setOV8827AFInf(unsigned long a_u4Position)
{
spin_lock(&g_OV8827AF_SpinLock);
g_u4OV8827AF_INF = a_u4Position;
spin_unlock(&g_OV8827AF_SpinLock);
return 0;
}
inline static int setOV8827AFMacro(unsigned long a_u4Position)
{
spin_lock(&g_OV8827AF_SpinLock);
g_u4OV8827AF_MACRO = a_u4Position;
spin_unlock(&g_OV8827AF_SpinLock);
return 0;
}
////////////////////////////////////////////////////////////////
static long OV8827AF_Ioctl(
struct file * a_pstFile,
unsigned int a_u4Command,
unsigned long a_u4Param)
{
long i4RetValue = 0;
switch(a_u4Command)
{
case OV8827AFIOC_G_MOTORINFO :
i4RetValue = getOV8827AFInfo((__user stOV8827AF_MotorInfo *)(a_u4Param));
break;
case OV8827AFIOC_T_MOVETO :
i4RetValue = moveOV8827AF(a_u4Param);
break;
case OV8827AFIOC_T_SETINFPOS :
i4RetValue = setOV8827AFInf(a_u4Param);
break;
case OV8827AFIOC_T_SETMACROPOS :
i4RetValue = setOV8827AFMacro(a_u4Param);
break;
default :
OV8827AFDB("[OV8827AF] No CMD \n");
i4RetValue = -EPERM;
break;
}
return i4RetValue;
}
//Main jobs:
// 1.check for device-specified errors, device not ready.
// 2.Initialize the device if it is opened for the first time.
// 3.Update f_op pointer.
// 4.Fill data structures into private_data
//CAM_RESET
static int OV8827AF_Open(struct inode * a_pstInode, struct file * a_pstFile)
{
spin_lock(&g_OV8827AF_SpinLock);
if(g_s4OV8827AF_Opened)
{
spin_unlock(&g_OV8827AF_SpinLock);
OV8827AFDB("[OV8827AF] the device is opened \n");
return -EBUSY;
}
g_s4OV8827AF_Opened = 1;
spin_unlock(&g_OV8827AF_SpinLock);
return 0;
}
//Main jobs:
// 1.Deallocate anything that "open" allocated in private_data.
// 2.Shut down the device on last close.
// 3.Only called once on last time.
// Q1 : Try release multiple times.
static int OV8827AF_Release(struct inode * a_pstInode, struct file * a_pstFile)
{
if (g_s4OV8827AF_Opened)
{
OV8827AFDB("[OV8827AF] feee \n");
g_sr = 5;
if (g_u4CurrPosition > 700) {
s4OV8827AF_WriteReg(700);
msleep(3);
}
if (g_u4CurrPosition > 600) {
s4OV8827AF_WriteReg(600);
msleep(3);
}
if (g_u4CurrPosition > 500) {
s4OV8827AF_WriteReg(500);
msleep(3);
}
if (g_u4CurrPosition > 400) {
s4OV8827AF_WriteReg(400);
msleep(3);
}
if (g_u4CurrPosition > 300) {
s4OV8827AF_WriteReg(300);
msleep(3);
}
if (g_u4CurrPosition > 200) {
s4OV8827AF_WriteReg(200);
msleep(3);
}
if (g_u4CurrPosition > 100) {
s4OV8827AF_WriteReg(100);
msleep(3);
}
spin_lock(&g_OV8827AF_SpinLock);
g_s4OV8827AF_Opened = 0;
spin_unlock(&g_OV8827AF_SpinLock);
}
return 0;
}
static const struct file_operations g_stOV8827AF_fops =
{
.owner = THIS_MODULE,
.open = OV8827AF_Open,
.release = OV8827AF_Release,
.unlocked_ioctl = OV8827AF_Ioctl
};
inline static int Register_OV8827AF_CharDrv(void)
{
struct device* vcm_device = NULL;
//Allocate char driver no.
if( alloc_chrdev_region(&g_OV8827AF_devno, 0, 1,OV8827AF_DRVNAME) )
{
OV8827AFDB("[OV8827AF] Allocate device no failed\n");
return -EAGAIN;
}
//Allocate driver
g_pOV8827AF_CharDrv = cdev_alloc();
if(NULL == g_pOV8827AF_CharDrv)
{
unregister_chrdev_region(g_OV8827AF_devno, 1);
OV8827AFDB("[OV8827AF] Allocate mem for kobject failed\n");
return -ENOMEM;
}
//Attatch file operation.
cdev_init(g_pOV8827AF_CharDrv, &g_stOV8827AF_fops);
g_pOV8827AF_CharDrv->owner = THIS_MODULE;
//Add to system
if(cdev_add(g_pOV8827AF_CharDrv, g_OV8827AF_devno, 1))
{
OV8827AFDB("[OV8827AF] Attatch file operation failed\n");
unregister_chrdev_region(g_OV8827AF_devno, 1);
return -EAGAIN;
}
actuator_class = class_create(THIS_MODULE, "actuatordrv3");
if (IS_ERR(actuator_class)) {
int ret = PTR_ERR(actuator_class);
OV8827AFDB("Unable to create class, err = %d\n", ret);
return ret;
}
vcm_device = device_create(actuator_class, NULL, g_OV8827AF_devno, NULL, OV8827AF_DRVNAME);
if(NULL == vcm_device)
{
return -EIO;
}
return 0;
}
inline static void Unregister_OV8827AF_CharDrv(void)
{
//Release char driver
cdev_del(g_pOV8827AF_CharDrv);
unregister_chrdev_region(g_OV8827AF_devno, 1);
device_destroy(actuator_class, g_OV8827AF_devno);
class_destroy(actuator_class);
}
//////////////////////////////////////////////////////////////////////
static int OV8827AF_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
static int OV8827AF_i2c_remove(struct i2c_client *client);
static const struct i2c_device_id OV8827AF_i2c_id[] = {{OV8827AF_DRVNAME,0},{}};
struct i2c_driver OV8827AF_i2c_driver = {
.probe = OV8827AF_i2c_probe,
.remove = OV8827AF_i2c_remove,
.driver.name = OV8827AF_DRVNAME,
.id_table = OV8827AF_i2c_id,
};
#if 0
static int OV8827AF_i2c_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) {
strcpy(info->type, OV8827AF_DRVNAME);
return 0;
}
#endif
static int OV8827AF_i2c_remove(struct i2c_client *client) {
return 0;
}
/* Kirby: add new-style driver {*/
static int OV8827AF_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int i4RetValue = 0;
OV8827AFDB("[OV8827AF] Attach I2C \n");
/* Kirby: add new-style driver { */
g_pstOV8827AF_I2Cclient = client;
//g_pstOV8827AF_I2Cclient->addr = g_pstOV8827AF_I2Cclient->addr >> 1;
//Register char driver
i4RetValue = Register_OV8827AF_CharDrv();
if(i4RetValue){
OV8827AFDB("[OV8827AF] register char device failed!\n");
return i4RetValue;
}
spin_lock_init(&g_OV8827AF_SpinLock);
OV8827AFDB("[OV8827AF] Attached!! \n");
return 0;
}
static int OV8827AF_probe(struct platform_device *pdev)
{
return i2c_add_driver(&OV8827AF_i2c_driver);
}
static int OV8827AF_remove(struct platform_device *pdev)
{
i2c_del_driver(&OV8827AF_i2c_driver);
return 0;
}
static int OV8827AF_suspend(struct platform_device *pdev, pm_message_t mesg)
{
return 0;
}
static int OV8827AF_resume(struct platform_device *pdev)
{
return 0;
}
// platform structure
static struct platform_driver g_stOV8827AF_Driver = {
.probe = OV8827AF_probe,
.remove = OV8827AF_remove,
.suspend = OV8827AF_suspend,
.resume = OV8827AF_resume,
.driver = {
.name = "lens_actuator3",
.owner = THIS_MODULE,
}
};
static struct platform_device actuator_dev3 = {
.name = "lens_actuator3",
.id = -1,
};
static int __init OV8827AF_i2C_init(void)
{
i2c_register_board_info(LENS_I2C_BUSNUM, &kd_lens_dev, 1);
platform_device_register(&actuator_dev3);
if(platform_driver_register(&g_stOV8827AF_Driver)){
OV8827AFDB("failed to register OV8827AF driver\n");
return -ENODEV;
}
return 0;
}
static void __exit OV8827AF_i2C_exit(void)
{
platform_driver_unregister(&g_stOV8827AF_Driver);
}
module_init(OV8827AF_i2C_init);
module_exit(OV8827AF_i2C_exit);
MODULE_DESCRIPTION("OV8827AF lens module driver");
MODULE_AUTHOR("KY Chen <ky.chen@Mediatek.com>");
MODULE_LICENSE("GPL");
| GuneetAtwal/Blaze.Kernel-MT6589 | mediatek/custom/wiko1/kernel/lens/ov8827af/OV8827AF.c | C | gpl-2.0 | 13,067 |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "gfxhub_v1_0.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "gc/gc_9_0_default.h"
#include "vega10_enum.h"
#include "soc15_common.h"
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
}
void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
/* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
- mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
offset * vmid, lower_32_bits(page_table_base));
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->gmc.gart_end >> 44));
}
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
/* Program the AGP BAR */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
* workaround that increase system aperture high address (add 1)
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
(u32)(value >> 44));
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
(u32)(adev->dummy_page_addr >> 12));
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
(u32)((u64)adev->dummy_page_addr >> 44));
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
}
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
}
static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL, tmp);
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
} else {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp);
}
static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
{
uint32_t tmp;
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
}
static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
{
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0XFFFFFFFF);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
0x0000000F);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
0);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
0);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
}
static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
unsigned num_level, block_size;
uint32_t tmp;
int i;
num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size;
if (adev->gmc.translate_further)
num_level -= 1;
else
block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!amdgpu_noretry);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
lower_32_bits(adev->vm_manager.max_pfn - 1));
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
}
static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
{
unsigned i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
2 * i, 0xffffffff);
WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
2 * i, 0x1f);
}
}
int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
/*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
* VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them
*/
WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_BASE,
adev->gmc.vram_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_TOP,
adev->gmc.vram_end >> 24);
}
/* GART Enable. */
gfxhub_v1_0_init_gart_aperture_regs(adev);
gfxhub_v1_0_init_system_aperture_regs(adev);
gfxhub_v1_0_init_tlb_regs(adev);
gfxhub_v1_0_init_cache_regs(adev);
gfxhub_v1_0_enable_system_domain(adev);
gfxhub_v1_0_disable_identity_aperture(adev);
gfxhub_v1_0_setup_vmid_config(adev);
gfxhub_v1_0_program_invalidation(adev);
return 0;
}
void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
{
u32 tmp;
u32 i;
/* Disable all tables */
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, i, 0);
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
tmp = REG_SET_FIELD(tmp,
MC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL,
0);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
WREG32_FIELD15(GC, 0, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, 0);
}
/**
* gfxhub_v1_0_set_fault_enable_default - update GART/VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp,
VM_L2_PROTECTION_FAULT_CNTL,
TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
void gfxhub_v1_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
hub->vm_inv_eng0_sem =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ACK);
hub->vm_context0_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL);
hub->vm_l2_pro_fault_status =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
}
| Pingmin/linux | drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | C | gpl-2.0 | 13,857 |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magentocommerce.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magentocommerce.com for more information.
*
* @category Mage
* @package Mage_Bundle
* @copyright Copyright (c) 2012 Magento Inc. (http://www.magentocommerce.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Bundle Stock Status Indexer Resource Model
*
* @category Mage
* @package Mage_Bundle
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_Bundle_Model_Resource_Indexer_Stock extends Mage_CatalogInventory_Model_Resource_Indexer_Stock_Default
{
/**
* Reindex temporary (price result data) for defined product(s)
*
* @param int|array $entityIds
* @return Mage_Bundle_Model_Resource_Indexer_Stock
*/
public function reindexEntity($entityIds)
{
$this->_updateIndex($entityIds);
return $this;
}
/**
* Retrieve table name for temporary bundle option stock index
*
* @return string
*/
protected function _getBundleOptionTable()
{
return $this->getTable('bundle/stock_index');
}
/**
* Prepare stock status per Bundle options, website and stock
*
* @param int|array $entityIds
* @param bool $usePrimaryTable use primary or temporary index table
* @return Mage_Bundle_Model_Resource_Indexer_Stock
*/
protected function _prepareBundleOptionStockData($entityIds = null, $usePrimaryTable = false)
{
$this->_cleanBundleOptionStockData();
$idxTable = $usePrimaryTable ? $this->getMainTable() : $this->getIdxTable();
$adapter = $this->_getWriteAdapter();
$select = $adapter->select()
->from(array('bo' => $this->getTable('bundle/option')), array('parent_id'));
$this->_addWebsiteJoinToSelect($select, false);
$status = new Zend_Db_Expr('MAX(' .
$adapter->getCheckSql('e.required_options = 0', 'i.stock_status', '0') . ')');
$select->columns('website_id', 'cw')
->join(
array('cis' => $this->getTable('cataloginventory/stock')),
'',
array('stock_id')
)
->joinLeft(
array('bs' => $this->getTable('bundle/selection')),
'bs.option_id = bo.option_id',
array()
)
->joinLeft(
array('i' => $idxTable),
'i.product_id = bs.product_id AND i.website_id = cw.website_id AND i.stock_id = cis.stock_id',
array()
)
->joinLeft(
array('e' => $this->getTable('catalog/product')),
'e.entity_id = bs.product_id',
array()
)
->where('cw.website_id != 0')
->group(array('bo.parent_id', 'cw.website_id', 'cis.stock_id', 'bo.option_id'))
->columns(array(
'option_id' => 'bo.option_id',
'status' => $status
));
if (!is_null($entityIds)) {
$select->where('bo.parent_id IN(?)', $entityIds);
}
// clone select for bundle product without required bundle options
$selectNonRequired = clone $select;
$select->where('bo.required = ?', 1);
$selectNonRequired->where('bo.required = ?', 0)
->having($status . ' = 1');
$query = $select->insertFromSelect($this->_getBundleOptionTable());
$adapter->query($query);
$query = $selectNonRequired->insertFromSelect($this->_getBundleOptionTable());
$adapter->query($query);
return $this;
}
/**
* Get the select object for get stock status by product ids
*
* @param int|array $entityIds
* @param bool $usePrimaryTable use primary or temporary index table
* @return Varien_Db_Select
*/
protected function _getStockStatusSelect($entityIds = null, $usePrimaryTable = false)
{
$this->_prepareBundleOptionStockData($entityIds, $usePrimaryTable);
$adapter = $this->_getWriteAdapter();
$select = $adapter->select()
->from(array('e' => $this->getTable('catalog/product')), array('entity_id'));
$this->_addWebsiteJoinToSelect($select, true);
$this->_addProductWebsiteJoinToSelect($select, 'cw.website_id', 'e.entity_id');
$select->columns('cw.website_id')
->join(
array('cis' => $this->getTable('cataloginventory/stock')),
'',
array('stock_id')
)
->joinLeft(
array('cisi' => $this->getTable('cataloginventory/stock_item')),
'cisi.stock_id = cis.stock_id AND cisi.product_id = e.entity_id',
array()
)
->joinLeft(
array('o' => $this->_getBundleOptionTable()),
'o.entity_id = e.entity_id AND o.website_id = cw.website_id AND o.stock_id = cis.stock_id',
array()
)
->columns(array('qty' => new Zend_Db_Expr('0')))
->where('cw.website_id != 0')
->where('e.type_id = ?', $this->getTypeId())
->group(array('e.entity_id', 'cw.website_id', 'cis.stock_id'));
// add limitation of status
$condition = $adapter->quoteInto('=?', Mage_Catalog_Model_Product_Status::STATUS_ENABLED);
$this->_addAttributeToSelect($select, 'status', 'e.entity_id', 'cs.store_id', $condition);
if ($this->_isManageStock()) {
$statusExpr = $adapter->getCheckSql(
'cisi.use_config_manage_stock = 0 AND cisi.manage_stock = 0',
'1',
'cisi.is_in_stock'
);
} else {
$statusExpr = $adapter->getCheckSql(
'cisi.use_config_manage_stock = 0 AND cisi.manage_stock = 1',
'cisi.is_in_stock',
'1'
);
}
$select->columns(array('status' => $adapter->getLeastSql(array(
new Zend_Db_Expr('MIN(' . $adapter->getCheckSql('o.stock_status IS NOT NULL','o.stock_status', '0') .')'),
new Zend_Db_Expr('MIN(' . $statusExpr . ')'),
))));
if (!is_null($entityIds)) {
$select->where('e.entity_id IN(?)', $entityIds);
}
return $select;
}
/**
* Prepare stock status data in temporary index table
*
* @param int|array $entityIds the product limitation
* @return Mage_Bundle_Model_Resource_Indexer_Stock
*/
protected function _prepareIndexTable($entityIds = null)
{
parent::_prepareIndexTable($entityIds);
$this->_cleanBundleOptionStockData();
return $this;
}
/**
* Update Stock status index by product ids
*
* @param array|int $entityIds
* @return Mage_Bundle_Model_Resource_Indexer_Stock
*/
protected function _updateIndex($entityIds)
{
parent::_updateIndex($entityIds);
$this->_cleanBundleOptionStockData();
return $this;
}
/**
* Clean temporary bundle options stock data
*
* @return Mage_Bundle_Model_Resource_Indexer_Stock
*/
protected function _cleanBundleOptionStockData()
{
$this->_getWriteAdapter()->delete($this->_getBundleOptionTable());
return $this;
}
}
| keegan2149/magento | sites/default/app/code/core/Mage/Bundle/Model/Resource/Indexer/Stock.php | PHP | gpl-2.0 | 8,038 |
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
#include "alloc-util.h"
#include "build.h"
#include "env-file.h"
#include "env-util.h"
#include "fd-util.h"
#include "fileio.h"
#include "hostname-util.h"
#include "log.h"
#include "macro.h"
#include "parse-util.h"
#include "stat-util.h"
#include "string-util.h"
#include "util.h"
#include "virt.h"
int saved_argc = 0;
char **saved_argv = NULL;
static int saved_in_initrd = -1;
bool kexec_loaded(void) {
_cleanup_free_ char *s = NULL;
if (read_one_line_file("/sys/kernel/kexec_loaded", &s) < 0)
return false;
return s[0] == '1';
}
int prot_from_flags(int flags) {
switch (flags & O_ACCMODE) {
case O_RDONLY:
return PROT_READ;
case O_WRONLY:
return PROT_WRITE;
case O_RDWR:
return PROT_READ|PROT_WRITE;
default:
return -EINVAL;
}
}
bool in_initrd(void) {
int r;
const char *e;
bool lenient = false;
if (saved_in_initrd >= 0)
return saved_in_initrd;
/* We have two checks here:
*
* 1. the flag file /etc/initrd-release must exist
* 2. the root file system must be a memory file system
*
* The second check is extra paranoia, since misdetecting an
* initrd can have bad consequences due the initrd
* emptying when transititioning to the main systemd.
*
* If env var $SYSTEMD_IN_INITRD is not set or set to "auto",
* both checks are used. If it's set to "lenient", only check
* 1 is used. If set to a boolean value, then the boolean
* value is returned.
*/
e = secure_getenv("SYSTEMD_IN_INITRD");
if (e) {
if (streq(e, "lenient"))
lenient = true;
else if (!streq(e, "auto")) {
r = parse_boolean(e);
if (r >= 0) {
saved_in_initrd = r > 0;
return saved_in_initrd;
}
log_debug_errno(r, "Failed to parse $SYSTEMD_IN_INITRD, ignoring: %m");
}
}
if (!lenient) {
r = path_is_temporary_fs("/");
if (r < 0)
log_debug_errno(r, "Couldn't determine if / is a temporary file system: %m");
saved_in_initrd = r > 0;
}
r = access("/etc/initrd-release", F_OK);
if (r >= 0) {
if (saved_in_initrd == 0)
log_debug("/etc/initrd-release exists, but it's not an initrd.");
else
saved_in_initrd = 1;
} else {
if (errno != ENOENT)
log_debug_errno(errno, "Failed to test if /etc/initrd-release exists: %m");
saved_in_initrd = 0;
}
return saved_in_initrd;
}
void in_initrd_force(bool value) {
saved_in_initrd = value;
}
int container_get_leader(const char *machine, pid_t *pid) {
_cleanup_free_ char *s = NULL, *class = NULL;
const char *p;
pid_t leader;
int r;
assert(machine);
assert(pid);
if (streq(machine, ".host")) {
*pid = 1;
return 0;
}
if (!hostname_is_valid(machine, 0))
return -EINVAL;
p = strjoina("/run/systemd/machines/", machine);
r = parse_env_file(NULL, p,
"LEADER", &s,
"CLASS", &class);
if (r == -ENOENT)
return -EHOSTDOWN;
if (r < 0)
return r;
if (!s)
return -EIO;
if (!streq_ptr(class, "container"))
return -EIO;
r = parse_pid(s, &leader);
if (r < 0)
return r;
if (leader <= 1)
return -EIO;
*pid = leader;
return 0;
}
int version(void) {
printf("systemd " STRINGIFY(PROJECT_VERSION) " (" GIT_VERSION ")\n%s\n",
systemd_features);
return 0;
}
/* Turn off core dumps but only if we're running outside of a container. */
void disable_coredumps(void) {
int r;
if (detect_container() > 0)
return;
r = write_string_file("/proc/sys/kernel/core_pattern", "|/bin/false", WRITE_STRING_FILE_DISABLE_BUFFER);
if (r < 0)
log_debug_errno(r, "Failed to turn off coredumps, ignoring: %m");
}
| ssahani/systemd | src/basic/util.c | C | gpl-2.0 | 4,710 |
/*
* Copyright (c) 2010,
* Gavriloaie Eugen-Andrei (shiretu@gmail.com)
*
* This file is part of crtmpserver.
* crtmpserver is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* crtmpserver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with crtmpserver. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef HAS_PROTOCOL_RTP
#ifndef _BASERTPAPPPROTOCOLHANDLER_H
#define _BASERTPAPPPROTOCOLHANDLER_H
#include "application/baseappprotocolhandler.h"
class DLLEXP BaseRTPAppProtocolHandler
: public BaseAppProtocolHandler {
public:
BaseRTPAppProtocolHandler(Variant &configuration);
virtual ~BaseRTPAppProtocolHandler();
virtual void RegisterProtocol(BaseProtocol *pProtocol);
virtual void UnRegisterProtocol(BaseProtocol *pProtocol);
};
#endif /* _BASERTPAPPPROTOCOLHANDLER_H */
#endif /* HAS_PROTOCOL_RTP */
| OpenQCam/qcam | trunk/sources/thelib/include/protocols/rtp/basertpappprotocolhandler.h | C | gpl-3.0 | 1,296 |
/* Creation of autonomous subprocesses.
Copyright (C) 2001-2003, 2008-2016 Free Software Foundation, Inc.
Written by Bruno Haible <haible@clisp.cons.org>, 2001.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#ifndef _EXECUTE_H
#define _EXECUTE_H
#include <stdbool.h>
/* Execute a command, optionally redirecting any of the three standard file
descriptors to /dev/null. Return its exit code.
If it didn't terminate correctly, exit if exit_on_error is true, otherwise
return 127.
If ignore_sigpipe is true, consider a subprocess termination due to SIGPIPE
as equivalent to a success. This is suitable for processes whose only
purpose is to write to standard output.
If slave_process is true, the child process will be terminated when its
creator receives a catchable fatal signal.
If termsigp is not NULL, *termsig will be set to the signal that terminated
the subprocess (if supported by the platform: not on native Windows
platforms), otherwise 0.
It is recommended that no signal is blocked or ignored while execute()
is called. See pipe.h for the reason. */
extern int execute (const char *progname,
const char *prog_path, char **prog_argv,
bool ignore_sigpipe,
bool null_stdin, bool null_stdout, bool null_stderr,
bool slave_process, bool exit_on_error,
int *termsigp);
#endif /* _EXECUTE_H */
| FabianKnapp/nexmon | utilities/gettext/gettext-tools/gnulib-lib/execute.h | C | gpl-3.0 | 2,050 |
/*
* This file is part of the LibreOffice project.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
* This file incorporates work covered by the following license notice:
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed
* with this work for additional information regarding copyright
* ownership. The ASF licenses this file to you under the Apache
* License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.apache.org/licenses/LICENSE-2.0 .
*/
package ifc.sheet;
import lib.MultiPropertyTest;
/**
* Testing <code>com.sun.star.sheet.TableValidation</code>
* service properties :
* <ul>
* <li><code> Type</code></li>
* <li><code> ShowInputMessage</code></li>
* <li><code> InputTitle</code></li>
* <li><code> InputMessage</code></li>
* <li><code> ShowErrorMessage</code></li>
* <li><code> ErrorTitle</code></li>
* <li><code> ErrorMessage</code></li>
* <li><code> IgnoreBlankCells</code></li>
* <li><code> ErrorAlertStyle</code></li>
* </ul> <p>
* Properties testing is automated by <code>lib.MultiPropertyTest</code>.
* @see com.sun.star.sheet.TableValidation
*/
public class _TableValidation extends MultiPropertyTest {
} // finish class _TableValidation
| jvanz/core | qadevOOo/tests/java/ifc/sheet/_TableValidation.java | Java | gpl-3.0 | 1,541 |
--
-- Name: de_metabolite_sub_pway_metab; Type: TABLE; Schema: deapp; Owner: -
--
CREATE TABLE de_metabolite_sub_pway_metab (
metabolite_id bigint NOT NULL,
sub_pathway_id bigint NOT NULL
);
--
-- Name: de_met_sub_pw_met_met_id_fk; Type: FK CONSTRAINT; Schema: deapp; Owner: -
--
ALTER TABLE ONLY de_metabolite_sub_pway_metab
ADD CONSTRAINT de_met_sub_pw_met_met_id_fk FOREIGN KEY (metabolite_id) REFERENCES de_metabolite_annotation(id);
--
-- Name: de_met_sub_pw_met_sub_pw_id_fk; Type: FK CONSTRAINT; Schema: deapp; Owner: -
--
ALTER TABLE ONLY de_metabolite_sub_pway_metab
ADD CONSTRAINT de_met_sub_pw_met_sub_pw_id_fk FOREIGN KEY (sub_pathway_id) REFERENCES de_metabolite_sub_pathways(id);
| ricepeterm/transmart-data | ddl/postgres/deapp/de_metabolite_sub_pway_metab.sql | SQL | gpl-3.0 | 713 |
'use strict';
var latex = require('../../utils/latex');
var operators = require('../operators');
function factory (type, config, load, typed) {
var Node = load(require('./Node'));
/**
* A lazy evaluating conditional operator: 'condition ? trueExpr : falseExpr'
*
* @param {Node} condition Condition, must result in a boolean
* @param {Node} trueExpr Expression evaluated when condition is true
* @param {Node} falseExpr Expression evaluated when condition is true
*
* @constructor ConditionalNode
* @extends {Node}
*/
function ConditionalNode(condition, trueExpr, falseExpr) {
if (!(this instanceof ConditionalNode)) {
throw new SyntaxError('Constructor must be called with the new operator');
}
if (!(condition && condition.isNode)) throw new TypeError('Parameter condition must be a Node');
if (!(trueExpr && trueExpr.isNode)) throw new TypeError('Parameter trueExpr must be a Node');
if (!(falseExpr && falseExpr.isNode)) throw new TypeError('Parameter falseExpr must be a Node');
this.condition = condition;
this.trueExpr = trueExpr;
this.falseExpr = falseExpr;
}
ConditionalNode.prototype = new Node();
ConditionalNode.prototype.type = 'ConditionalNode';
ConditionalNode.prototype.isConditionalNode = true;
/**
* Compile the node to javascript code
* @param {Object} defs Object which can be used to define functions
* or constants globally available for the compiled
* expression
* @return {string} js
* @private
*/
ConditionalNode.prototype._compile = function (defs) {
/**
* Test whether a condition is met
* @param {*} condition
* @returns {boolean} true if condition is true or non-zero, else false
*/
defs.testCondition = function (condition) {
if (typeof condition === 'number'
|| typeof condition === 'boolean'
|| typeof condition === 'string') {
return condition ? true : false;
}
if (condition) {
if (condition.isBigNumber === true) {
return condition.isZero() ? false : true;
}
if (condition.isComplex === true) {
return (condition.re || condition.im) ? true : false;
}
if (condition.isUnit === true) {
return condition.value ? true : false;
}
}
if (condition === null || condition === undefined) {
return false;
}
throw new TypeError('Unsupported type of condition "' + defs.math['typeof'](condition) + '"');
};
return (
'testCondition(' + this.condition._compile(defs) + ') ? ' +
'( ' + this.trueExpr._compile(defs) + ') : ' +
'( ' + this.falseExpr._compile(defs) + ')'
);
};
/**
* Execute a callback for each of the child nodes of this node
* @param {function(child: Node, path: string, parent: Node)} callback
*/
ConditionalNode.prototype.forEach = function (callback) {
callback(this.condition, 'condition', this);
callback(this.trueExpr, 'trueExpr', this);
callback(this.falseExpr, 'falseExpr', this);
};
/**
* Create a new ConditionalNode having it's childs be the results of calling
* the provided callback function for each of the childs of the original node.
* @param {function(child: Node, path: string, parent: Node): Node} callback
* @returns {ConditionalNode} Returns a transformed copy of the node
*/
ConditionalNode.prototype.map = function (callback) {
return new ConditionalNode(
this._ifNode(callback(this.condition, 'condition', this)),
this._ifNode(callback(this.trueExpr, 'trueExpr', this)),
this._ifNode(callback(this.falseExpr, 'falseExpr', this))
);
};
/**
* Create a clone of this node, a shallow copy
* @return {ConditionalNode}
*/
ConditionalNode.prototype.clone = function () {
return new ConditionalNode(this.condition, this.trueExpr, this.falseExpr);
};
/**
* Get string representation
* @param {Object} options
* @return {string} str
*/
ConditionalNode.prototype._toString = function (options) {
var parenthesis = (options && options.parenthesis) ? options.parenthesis : 'keep';
var precedence = operators.getPrecedence(this, parenthesis);
//Enclose Arguments in parentheses if they are an OperatorNode
//or have lower or equal precedence
//NOTE: enclosing all OperatorNodes in parentheses is a decision
//purely based on aesthetics and readability
var condition = this.condition.toString(options);
var conditionPrecedence = operators.getPrecedence(this.condition, parenthesis);
if ((parenthesis === 'all')
|| (this.condition.type === 'OperatorNode')
|| ((conditionPrecedence !== null) && (conditionPrecedence <= precedence))) {
condition = '(' + condition + ')';
}
var trueExpr = this.trueExpr.toString(options);
var truePrecedence = operators.getPrecedence(this.trueExpr, parenthesis);
if ((parenthesis === 'all')
|| (this.trueExpr.type === 'OperatorNode')
|| ((truePrecedence !== null) && (truePrecedence <= precedence))) {
trueExpr = '(' + trueExpr + ')';
}
var falseExpr = this.falseExpr.toString(options);
var falsePrecedence = operators.getPrecedence(this.falseExpr, parenthesis);
if ((parenthesis === 'all')
|| (this.falseExpr.type === 'OperatorNode')
|| ((falsePrecedence !== null) && (falsePrecedence <= precedence))) {
falseExpr = '(' + falseExpr + ')';
}
return condition + ' ? ' + trueExpr + ' : ' + falseExpr;
};
/**
* Get LaTeX representation
* @param {Object} options
* @return {string} str
*/
ConditionalNode.prototype._toTex = function (options) {
return '\\left\\{\\begin{array}{l l}{'
+ this.trueExpr.toTex(options) + '}, &\\quad{\\text{if}\\;'
+ this.condition.toTex(options)
+ '}\\\\{' + this.falseExpr.toTex(options)
+ '}, &\\quad{\\text{otherwise}}\\end{array}\\right.';
};
return ConditionalNode;
}
exports.name = 'ConditionalNode';
exports.path = 'expression.node';
exports.factory = factory;
| SunriseCoder/update-checker | src/main/resources/public/bower_components/mathjs/lib/expression/node/ConditionalNode.js | JavaScript | gpl-3.0 | 6,182 |
/* jconfig.vc --- jconfig.h for Microsoft Visual C++ on Windows 95 or NT. */
/* see jconfig.doc for explanations */
// disable all the warnings under MSVC
#ifdef _MSC_VER
#pragma warning (disable: 4996 4267 4100 4127 4702 4244)
#endif
#ifdef __BORLANDC__
#pragma warn -8057
#pragma warn -8019
#pragma warn -8004
#pragma warn -8008
#endif
#define HAVE_PROTOTYPES
#define HAVE_UNSIGNED_CHAR
#define HAVE_UNSIGNED_SHORT
/* #define void char */
/* #define const */
#undef CHAR_IS_UNSIGNED
#define HAVE_STDDEF_H
#ifndef HAVE_STDLIB_H
#define HAVE_STDLIB_H
#endif
#undef NEED_BSD_STRINGS
#undef NEED_SYS_TYPES_H
#undef NEED_FAR_POINTERS /* we presume a 32-bit flat memory model */
#undef NEED_SHORT_EXTERNAL_NAMES
#undef INCOMPLETE_TYPES_BROKEN
/* Define "boolean" as unsigned char, not int, per Windows custom */
#ifndef __RPCNDR_H__ /* don't conflict if rpcndr.h already read */
typedef unsigned char boolean;
#endif
#define HAVE_BOOLEAN /* prevent jmorecfg.h from redefining it */
#ifdef JPEG_INTERNALS
#undef RIGHT_SHIFT_IS_UNSIGNED
#endif /* JPEG_INTERNALS */
#ifdef JPEG_CJPEG_DJPEG
#define BMP_SUPPORTED /* BMP image file format */
#define GIF_SUPPORTED /* GIF image file format */
#define PPM_SUPPORTED /* PBMPLUS PPM/PGM image file format */
#undef RLE_SUPPORTED /* Utah RLE image file format */
#define TARGA_SUPPORTED /* Targa image file format */
#define TWO_FILE_COMMANDLINE /* optional */
#define USE_SETMODE /* Microsoft has setmode() */
#undef NEED_SIGNAL_CATCHER
#undef DONT_USE_B_MODE
#undef PROGRESS_REPORT /* optional */
#endif /* JPEG_CJPEG_DJPEG */
| COx2/JUCE_JAPAN_DEMO | vol2/JUCE/modules/juce_graphics/image_formats/jpglib/jconfig.h | C | gpl-3.0 | 1,587 |
<?php
namespace React\Socket;
use Evenement\EventEmitter;
use React\EventLoop\LoopInterface;
/**
* The `Server` class implements the `ServerInterface` and
* is responsible for accepting plaintext TCP/IP connections.
*
* Whenever a client connects, it will emit a `connection` event with a connection
* instance implementing `ConnectionInterface`:
*
* ```php
* $server->on('connection', function (ConnectionInterface $connection) {
* echo 'Plaintext connection from ' . $connection->getRemoteAddress() . PHP_EOL;
* $connection->write('hello there!' . PHP_EOL);
* …
* });
* ```
*
* See also the `ServerInterface` for more details.
*
* Note that the `Server` class is a concrete implementation for TCP/IP sockets.
* If you want to typehint in your higher-level protocol implementation, you SHOULD
* use the generic `ServerInterface` instead.
*
* @see ServerInterface
* @see ConnectionInterface
*/
class Server extends EventEmitter implements ServerInterface
{
public $master;
private $loop;
private $context;
/**
* Creates a plaintext TCP/IP server.
*
* ```php
* $server = new Server($loop);
*
* $server->listen(8080);
* ```
*
* Optionally, you can specify [socket context options](http://php.net/manual/en/context.socket.php)
* for the underlying stream socket resource like this:
*
* ```php
* $server = new Server($loop, array(
* 'backlog' => 200,
* 'so_reuseport' => true,
* 'ipv6_v6only' => true
* ));
*
* $server->listen(8080, '::1');
* ```
*
* Note that available [socket context options](http://php.net/manual/en/context.socket.php),
* their defaults and effects of changing these may vary depending on your system
* and/or PHP version.
* Passing unknown context options has no effect.
*
* @param LoopInterface $loop
* @param array $context
*/
public function __construct(LoopInterface $loop, array $context = array())
{
$this->loop = $loop;
$this->context = $context;
}
public function listen($port, $host = '127.0.0.1')
{
if (strpos($host, ':') !== false) {
// enclose IPv6 addresses in square brackets before appending port
$host = '[' . $host . ']';
}
$this->master = @stream_socket_server(
"tcp://$host:$port",
$errno,
$errstr,
STREAM_SERVER_BIND | STREAM_SERVER_LISTEN,
stream_context_create(array('socket' => $this->context))
);
if (false === $this->master) {
$message = "Could not bind to tcp://$host:$port: $errstr";
throw new ConnectionException($message, $errno);
}
stream_set_blocking($this->master, 0);
$that = $this;
$this->loop->addReadStream($this->master, function ($master) use ($that) {
$newSocket = @stream_socket_accept($master);
if (false === $newSocket) {
$that->emit('error', array(new \RuntimeException('Error accepting new connection')));
return;
}
$that->handleConnection($newSocket);
});
}
public function handleConnection($socket)
{
stream_set_blocking($socket, 0);
$client = $this->createConnection($socket);
$this->emit('connection', array($client));
}
public function getPort()
{
$name = stream_socket_get_name($this->master, false);
return (int) substr(strrchr($name, ':'), 1);
}
public function shutdown()
{
$this->loop->removeStream($this->master);
fclose($this->master);
$this->removeAllListeners();
}
public function createConnection($socket)
{
return new Connection($socket, $this->loop);
}
}
| OkoWsc/Accessin-Command | vendor/react/socket/src/Server.php | PHP | gpl-3.0 | 3,889 |
/*
Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'newpage', 'el', {
toolbar: 'Νέα Σελίδα'
} );
| gmuro/dolibarr | htdocs/includes/ckeditor/ckeditor/_source/plugins/newpage/lang/el.js | JavaScript | gpl-3.0 | 225 |
// ReSharper disable All
using System.Collections.Generic;
using System.Dynamic;
using PetaPoco;
namespace MixERP.Net.Schemas.Core.Data
{
public interface IStateSalesTaxScrudViewRepository
{
/// <summary>
/// Performs count on IStateSalesTaxScrudViewRepository.
/// </summary>
/// <returns>Returns the number of IStateSalesTaxScrudViewRepository.</returns>
long Count();
/// <summary>
/// Return all instances of the "StateSalesTaxScrudView" class from IStateSalesTaxScrudViewRepository.
/// </summary>
/// <returns>Returns a non-live, non-mapped instances of "StateSalesTaxScrudView" class.</returns>
IEnumerable<MixERP.Net.Entities.Core.StateSalesTaxScrudView> Get();
/// <summary>
/// Displayfields provide a minimal name/value context for data binding IStateSalesTaxScrudViewRepository.
/// </summary>
/// <returns>Returns an enumerable name and value collection for IStateSalesTaxScrudViewRepository.</returns>
IEnumerable<DisplayField> GetDisplayFields();
/// <summary>
/// Produces a paginated result of 10 items from IStateSalesTaxScrudViewRepository.
/// </summary>
/// <returns>Returns the first page of collection of "StateSalesTaxScrudView" class.</returns>
IEnumerable<MixERP.Net.Entities.Core.StateSalesTaxScrudView> GetPaginatedResult();
/// <summary>
/// Produces a paginated result of 10 items from IStateSalesTaxScrudViewRepository.
/// </summary>
/// <param name="pageNumber">Enter the page number to produce the paginated result.</param>
/// <returns>Returns collection of "StateSalesTaxScrudView" class.</returns>
IEnumerable<MixERP.Net.Entities.Core.StateSalesTaxScrudView> GetPaginatedResult(long pageNumber);
List<EntityParser.Filter> GetFilters(string catalog, string filterName);
/// <summary>
/// Performs a filtered count on IStateSalesTaxScrudViewRepository.
/// </summary>
/// <param name="filters">The list of filter conditions.</param>
/// <returns>Returns number of rows of "StateSalesTaxScrudView" class using the filter.</returns>
long CountWhere(List<EntityParser.Filter> filters);
/// <summary>
/// Produces a paginated result of 10 items using the supplied filters from IStateSalesTaxScrudViewRepository.
/// </summary>
/// <param name="pageNumber">Enter the page number to produce the paginated result. If you provide a negative number, the result will not be paginated.</param>
/// <param name="filters">The list of filter conditions.</param>
/// <returns>Returns collection of "StateSalesTaxScrudView" class.</returns>
IEnumerable<MixERP.Net.Entities.Core.StateSalesTaxScrudView> GetWhere(long pageNumber, List<EntityParser.Filter> filters);
/// <summary>
/// Performs a filtered count on IStateSalesTaxScrudViewRepository.
/// </summary>
/// <param name="filterName">The named filter.</param>
/// <returns>Returns number of rows of "StateSalesTaxScrudView" class using the filter.</returns>
long CountFiltered(string filterName);
/// <summary>
/// Produces a paginated result of 10 items using the supplied filter name from IStateSalesTaxScrudViewRepository.
/// </summary>
/// <param name="pageNumber">Enter the page number to produce the paginated result. If you provide a negative number, the result will not be paginated.</param>
/// <param name="filterName">The named filter.</param>
/// <returns>Returns collection of "StateSalesTaxScrudView" class.</returns>
IEnumerable<MixERP.Net.Entities.Core.StateSalesTaxScrudView> GetFiltered(long pageNumber, string filterName);
}
} | mixerp/mixerp | src/Libraries/DAL/Core/IStateSalesTaxScrudViewRepository.cs | C# | gpl-3.0 | 3,870 |
//
// ip/address_v6_range.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Oliver Kowalke (oliver dot kowalke at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_IP_ADDRESS_V6_RANGE_HPP
#define BOOST_ASIO_IP_ADDRESS_V6_RANGE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#include <boost/asio/ip/address_v6_iterator.hpp>
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
namespace ip {
template <typename> class basic_address_range;
/// Represents a range of IPv6 addresses.
/**
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <> class basic_address_range<address_v6>
{
public:
/// The type of an iterator that points into the range.
typedef basic_address_iterator<address_v6> iterator;
/// Construct an empty range.
basic_address_range() BOOST_ASIO_NOEXCEPT
: begin_(address_v6()),
end_(address_v6())
{
}
/// Construct an range that represents the given range of addresses.
explicit basic_address_range(const iterator& first,
const iterator& last) BOOST_ASIO_NOEXCEPT
: begin_(first),
end_(last)
{
}
/// Copy constructor.
basic_address_range(const basic_address_range& other) BOOST_ASIO_NOEXCEPT
: begin_(other.begin_),
end_(other.end_)
{
}
#if defined(BOOST_ASIO_HAS_MOVE)
/// Move constructor.
basic_address_range(basic_address_range&& other) BOOST_ASIO_NOEXCEPT
: begin_(BOOST_ASIO_MOVE_CAST(iterator)(other.begin_)),
end_(BOOST_ASIO_MOVE_CAST(iterator)(other.end_))
{
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
/// Assignment operator.
basic_address_range& operator=(
const basic_address_range& other) BOOST_ASIO_NOEXCEPT
{
begin_ = other.begin_;
end_ = other.end_;
return *this;
}
#if defined(BOOST_ASIO_HAS_MOVE)
/// Move assignment operator.
basic_address_range& operator=(
basic_address_range&& other) BOOST_ASIO_NOEXCEPT
{
begin_ = BOOST_ASIO_MOVE_CAST(iterator)(other.begin_);
end_ = BOOST_ASIO_MOVE_CAST(iterator)(other.end_);
return *this;
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
/// Obtain an iterator that points to the start of the range.
iterator begin() const BOOST_ASIO_NOEXCEPT
{
return begin_;
}
/// Obtain an iterator that points to the end of the range.
iterator end() const BOOST_ASIO_NOEXCEPT
{
return end_;
}
/// Determine whether the range is empty.
bool empty() const BOOST_ASIO_NOEXCEPT
{
return begin_ == end_;
}
/// Find an address in the range.
iterator find(const address_v6& addr) const BOOST_ASIO_NOEXCEPT
{
return addr >= *begin_ && addr < *end_ ? iterator(addr) : end_;
}
private:
iterator begin_;
iterator end_;
};
/// Represents a range of IPv6 addresses.
typedef basic_address_range<address_v6> address_v6_range;
} // namespace ip
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // BOOST_ASIO_IP_ADDRESS_V6_RANGE_HPP
| BeGe78/esood | vendor/bundle/ruby/3.0.0/gems/passenger-6.0.10/src/cxx_supportlib/vendor-modified/boost/asio/ip/address_v6_range.hpp | C++ | gpl-3.0 | 3,335 |
<?php
// Text
$_['text_title'] = '银行转帐';
$_['text_instruction'] = '银行转帐指令.';
$_['text_description'] = '请将总额转账到以下银行账户.';
$_['text_payment'] = '收到付款后我们将按订单发货给您.';
?> | atpshxc/shcoyee | mobile/catalog/language/zh-cn/payment/bank_transfer.php | PHP | gpl-3.0 | 249 |
/*****************************************************************************
*
* PROJECT: Multi Theft Auto v1.0
* LICENSE: See LICENSE in the top level directory
* FILE: game_sa/CPadSA.cpp
* PURPOSE: Controller pad input logic
* DEVELOPERS: Ed Lyons <eai@opencoding.net>
* Christian Myhre Lundheim <>
* Jax <>
*
* Multi Theft Auto is available from http://www.multitheftauto.com/
*
*****************************************************************************/
#include <main.h>
CControllerState * CPadSA::GetCurrentControllerState(CControllerState * ControllerState)
{
DEBUG_TRACE("CControllerState * CPadSA::GetCurrentControllerState(CControllerState * ControllerState)");
memcpy(ControllerState, &this->internalInterface->NewState, sizeof(CControllerState));
return ControllerState;
}
CControllerState * CPadSA::GetLastControllerState(CControllerState * ControllerState)
{
DEBUG_TRACE("CControllerState * CPadSA::GetLastControllerState(CControllerState * ControllerState)");
memcpy(ControllerState, &this->internalInterface->OldState, sizeof(CControllerState));
return ControllerState;
}
VOID CPadSA::SetCurrentControllerState(CControllerState * ControllerState)
{
DEBUG_TRACE("VOID CPadSA::SetCurrentControllerState(CControllerState * ControllerState)");
memcpy(&this->internalInterface->NewState, ControllerState, sizeof(CControllerState));
}
VOID CPadSA::SetLastControllerState(CControllerState * ControllerState)
{
DEBUG_TRACE("VOID CPadSA::SetLastControllerState(CControllerState * ControllerState)");
memcpy(&this->internalInterface->OldState, ControllerState, sizeof(CControllerState));
}
VOID CPadSA::Store()
{
DEBUG_TRACE("VOID CPadSA::Store()");
memcpy(&this->StoredPad, this->internalInterface, sizeof(CPadSAInterface));
}
VOID CPadSA::Restore()
{
DEBUG_TRACE("VOID CPadSA::Restore()");
memcpy(this->internalInterface, &this->StoredPad, sizeof(CPadSAInterface));
}
bool CPadSA::IsEnabled ( void )
{
bool bEnabled = *(BYTE *)FUNC_CPad_UpdatePads == 0x56;
return bEnabled;
}
VOID CPadSA::Disable( bool bDisable )
{
if ( bDisable )
*(BYTE *)FUNC_CPad_UpdatePads = 0xC3;
else
*(BYTE *)FUNC_CPad_UpdatePads = 0x56;
//this->internalInterface->DisablePlayerControls = bDisable;
}
VOID CPadSA::Clear ( void )
{
CControllerState cs; // create a null controller (class is inited to null)
SetCurrentControllerState ( &cs );
SetLastControllerState ( &cs );
}
VOID CPadSA::SetHornHistoryValue( bool value )
{
internalInterface->iCurrHornHistory++;
if ( internalInterface->iCurrHornHistory >= MAX_HORN_HISTORY )
internalInterface->iCurrHornHistory = 0;
internalInterface->bHornHistory[internalInterface->iCurrHornHistory] = value;
}
long CPadSA::GetAverageWeapon ( void )
{
return internalInterface->AverageWeapon;
}
void CPadSA::SetLastTimeTouched ( DWORD dwTime )
{
internalInterface->LastTimeTouched = dwTime;
} | irukese1/m0d-s0beit-sa | src/game_sa/CPadSA.cpp | C++ | gpl-3.0 | 3,037 |
//------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
// Runtime Version:2.0.50727.3082
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
namespace ClearCanvas.Dicom.TestTools.TestScp.Properties {
using System;
/// <summary>
/// A strongly-typed resource class, for looking up localized strings, etc.
/// </summary>
// This class was auto-generated by the StronglyTypedResourceBuilder
// class via a tool like ResGen or Visual Studio.
// To add or remove a member, edit your .ResX file then rerun ResGen
// with the /str option, or rebuild your VS project.
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "2.0.0.0")]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
internal class Resources {
private static global::System.Resources.ResourceManager resourceMan;
private static global::System.Globalization.CultureInfo resourceCulture;
[global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
internal Resources() {
}
/// <summary>
/// Returns the cached ResourceManager instance used by this class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
internal static global::System.Resources.ResourceManager ResourceManager {
get {
if (object.ReferenceEquals(resourceMan, null)) {
global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("ClearCanvas.Dicom.TestTools.TestScp.Properties.Resources", typeof(Resources).Assembly);
resourceMan = temp;
}
return resourceMan;
}
}
/// <summary>
/// Overrides the current thread's CurrentUICulture property for all
/// resource lookups using this strongly typed resource class.
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
internal static global::System.Globalization.CultureInfo Culture {
get {
return resourceCulture;
}
set {
resourceCulture = value;
}
}
}
}
| chinapacs/ImageViewer | Dicom/TestTools/TestScp/Properties/Resources.Designer.cs | C# | gpl-3.0 | 2,897 |
package org.zarroboogs.weibo.dialogfragment;
import org.zarroboogs.weibo.R;
import android.app.AlertDialog;
import android.app.Dialog;
import android.app.DialogFragment;
import android.content.DialogInterface;
import android.os.Bundle;
public class SelectPictureDialog extends DialogFragment {
public static SelectPictureDialog newInstance() {
return new SelectPictureDialog();
}
@Override
public Dialog onCreateDialog(Bundle savedInstanceState) {
String[] items = {
getString(R.string.get_the_last_picture), getString(R.string.take_camera), getString(R.string.select_pic)
};
AlertDialog.Builder builder = new AlertDialog.Builder(getActivity()).setTitle(getString(R.string.select)).setItems(
items,
(DialogInterface.OnClickListener) getActivity());
return builder.create();
}
}
| tsdl2013/iBeebo | app/src/main/java/org/zarroboogs/weibo/dialogfragment/SelectPictureDialog.java | Java | gpl-3.0 | 894 |
using System;
using System.Diagnostics;
using System.Drawing;
using System.Windows.Forms;
using MissionPlanner.Controls;
using MissionPlanner.HIL;
namespace MissionPlanner.GCSViews.ConfigurationView
{
public partial class ConfigMotorTest : UserControl, IActivate
{
public ConfigMotorTest()
{
InitializeComponent();
}
/*
#if (FRAME_CONFIG == QUAD_FRAME)
MAV_TYPE_QUADROTOR,
#elif (FRAME_CONFIG == TRI_FRAME)
MAV_TYPE_TRICOPTER,
#elif (FRAME_CONFIG == HEXA_FRAME || FRAME_CONFIG == Y6_FRAME)
MAV_TYPE_HEXAROTOR,
#elif (FRAME_CONFIG == OCTA_FRAME || FRAME_CONFIG == OCTA_QUAD_FRAME)
MAV_TYPE_OCTOROTOR,
#elif (FRAME_CONFIG == HELI_FRAME)
MAV_TYPE_HELICOPTER,
#elif (FRAME_CONFIG == SINGLE_FRAME) //because mavlink did not define a singlecopter, we use a rocket
MAV_TYPE_ROCKET,
#elif (FRAME_CONFIG == COAX_FRAME) //because mavlink did not define a singlecopter, we use a rocket
MAV_TYPE_ROCKET,
#else
#error Unrecognised frame type
#endif*/
public void Activate()
{
var x = 20;
var y = 40;
var motormax = 8;
if (!MainV2.comPort.MAV.param.ContainsKey("FRAME"))
{
Enabled = false;
return;
}
var motors = new Motor[0];
if (MainV2.comPort.MAV.aptype == MAVLink.MAV_TYPE.TRICOPTER)
{
motormax = 4;
motors = Motor.build_motors(MAVLink.MAV_TYPE.TRICOPTER, (int) (float) MainV2.comPort.MAV.param["FRAME"]);
}
else if (MainV2.comPort.MAV.aptype == MAVLink.MAV_TYPE.QUADROTOR)
{
motormax = 4;
motors = Motor.build_motors(MAVLink.MAV_TYPE.QUADROTOR, (int) (float) MainV2.comPort.MAV.param["FRAME"]);
}
else if (MainV2.comPort.MAV.aptype == MAVLink.MAV_TYPE.HEXAROTOR)
{
motormax = 6;
motors = Motor.build_motors(MAVLink.MAV_TYPE.HEXAROTOR, (int) (float) MainV2.comPort.MAV.param["FRAME"]);
}
else if (MainV2.comPort.MAV.aptype == MAVLink.MAV_TYPE.OCTOROTOR)
{
motormax = 8;
motors = Motor.build_motors(MAVLink.MAV_TYPE.OCTOROTOR, (int) (float) MainV2.comPort.MAV.param["FRAME"]);
}
else if (MainV2.comPort.MAV.aptype == MAVLink.MAV_TYPE.HELICOPTER)
{
motormax = 0;
}
for (var a = 1; a <= motormax; a++)
{
var but = new MyButton();
but.Text = "Test motor " + (char) ((a - 1) + 'A');
but.Location = new Point(x, y);
but.Click += but_Click;
but.Tag = a;
Controls.Add(but);
y += 25;
}
}
private void but_Click(object sender, EventArgs e)
{
try
{
var motor = (int) ((MyButton) sender).Tag;
if (MainV2.comPort.doMotorTest(motor, MAVLink.MOTOR_TEST_THROTTLE_TYPE.MOTOR_TEST_THROTTLE_PERCENT,
(int) NUM_thr_percent.Value, (int) NUM_duration.Value))
{
}
else
{
CustomMessageBox.Show("Command was denied by the autopilot");
}
}
catch (Exception ex)
{
CustomMessageBox.Show("Failed to test motor\n" + ex);
}
}
private void linkLabel1_LinkClicked(object sender, LinkLabelLinkClickedEventArgs e)
{
try
{
Process.Start("http://copter.ardupilot.com/wiki/motor-setup/");
}
catch
{
CustomMessageBox.Show("Bad default system association", Strings.ERROR);
}
}
}
} | tcheehow/MissionPlanner | GCSViews/ConfigurationView/ConfigMotorTest.cs | C# | gpl-3.0 | 3,970 |
require 'spec_helper'
describe 'foreman::plugin::abrt' do
let(:facts) do
on_supported_os['redhat-7-x86_64']
end
it { should contain_foreman__plugin('abrt') }
end
| lazyfrosch/puppet-foreman | spec/classes/foreman_plugin_abrt_spec.rb | Ruby | gpl-3.0 | 174 |
/*******************************************************************************
* Copyright 2011 See libgdx AUTHORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.badlogic.gdx.graphics;
import java.nio.Buffer;
import java.nio.IntBuffer;
import java.util.HashMap;
import java.util.Map;
import com.badlogic.gdx.backends.gwt.GwtFileHandle;
import com.badlogic.gdx.files.FileHandle;
import com.badlogic.gdx.utils.BufferUtils;
import com.badlogic.gdx.utils.Disposable;
import com.badlogic.gdx.utils.GdxRuntimeException;
import com.google.gwt.canvas.client.Canvas;
import com.google.gwt.canvas.dom.client.CanvasPixelArray;
import com.google.gwt.canvas.dom.client.Context2d;
import com.google.gwt.canvas.dom.client.Context2d.Composite;
import com.google.gwt.dom.client.CanvasElement;
import com.google.gwt.dom.client.ImageElement;
public class Pixmap implements Disposable {
public static Map<Integer, Pixmap> pixmaps = new HashMap<Integer, Pixmap>();
static int nextId = 0;
/** Different pixel formats.
*
* @author mzechner */
public enum Format {
Alpha, Intensity, LuminanceAlpha, RGB565, RGBA4444, RGB888, RGBA8888;
}
/** Blending functions to be set with {@link Pixmap#setBlending}.
* @author mzechner */
public enum Blending {
None, SourceOver
}
/** Filters to be used with {@link Pixmap#drawPixmap(Pixmap, int, int, int, int, int, int, int, int)}.
*
* @author mzechner */
public enum Filter {
NearestNeighbour, BiLinear
}
int width;
int height;
Format format;
Canvas canvas;
Context2d context;
int id;
IntBuffer buffer;
int r = 255, g = 255, b = 255;
float a;
String color = make(r, g, b, a);
static Blending blending;
CanvasPixelArray pixels;
public Context2d getContext(){
return context;
}
public Pixmap (FileHandle file) {
GwtFileHandle gwtFile = (GwtFileHandle)file;
ImageElement img = gwtFile.preloader.images.get(file.path());
if (img == null) throw new GdxRuntimeException("Couldn't load image '" + file.path() + "', file does not exist");
create(img.getWidth(), img.getHeight(), Format.RGBA8888);
context.setGlobalCompositeOperation(Composite.COPY);
context.drawImage(img, 0, 0);
context.setGlobalCompositeOperation(getComposite());
}
private static Composite getComposite () {
return blending == Blending.None ? Composite.COPY : Composite.SOURCE_OVER;
}
public Pixmap (ImageElement img) {
create(img.getWidth(), img.getHeight(), Format.RGBA8888);
context.drawImage(img, 0, 0);
}
public Pixmap (int width, int height, Format format) {
create(width, height, format);
}
private void create (int width, int height, Format format2) {
this.width = width;
this.height = height;
this.format = Format.RGBA8888;
canvas = Canvas.createIfSupported();
canvas.getCanvasElement().setWidth(width);
canvas.getCanvasElement().setHeight(height);
context = canvas.getContext2d();
context.setGlobalCompositeOperation(getComposite());
buffer = BufferUtils.newIntBuffer(1);
id = nextId++;
buffer.put(0, id);
pixmaps.put(id, this);
}
public static String make (int r2, int g2, int b2, float a2) {
return "rgba(" + r2 + "," + g2 + "," + b2 + "," + a2 + ")";
}
/** Sets the type of {@link Blending} to be used for all operations. Default is {@link Blending#SourceOver}.
* @param blending the blending type */
public static void setBlending (Blending blending) {
Pixmap.blending = blending;
Composite composite = getComposite();
for (Pixmap pixmap : pixmaps.values()) {
pixmap.context.setGlobalCompositeOperation(composite);
}
}
/** @return the currently set {@link Blending} */
public static Blending getBlending () {
return blending;
}
/** Sets the type of interpolation {@link Filter} to be used in conjunction with
* {@link Pixmap#drawPixmap(Pixmap, int, int, int, int, int, int, int, int)}.
* @param filter the filter. */
public static void setFilter (Filter filter) {
}
public Format getFormat () {
return format;
}
public int getGLInternalFormat () {
return GL20.GL_RGBA;
}
public int getGLFormat () {
return GL20.GL_RGBA;
}
public int getGLType () {
return GL20.GL_UNSIGNED_BYTE;
}
public int getWidth () {
return width;
}
public int getHeight () {
return height;
}
public Buffer getPixels () {
return buffer;
}
@Override
public void dispose () {
pixmaps.remove(id);
}
public CanvasElement getCanvasElement () {
return canvas.getCanvasElement();
}
/** Sets the color for the following drawing operations
* @param color the color, encoded as RGBA8888 */
public void setColor (int color) {
r = (color >>> 24) & 0xff;
g = (color >>> 16) & 0xff;
b = (color >>> 8) & 0xff;
a = (color & 0xff) / 255f;
this.color = make(r, g, b, a);
context.setFillStyle(this.color);
context.setStrokeStyle(this.color);
}
/** Sets the color for the following drawing operations.
*
* @param r The red component.
* @param g The green component.
* @param b The blue component.
* @param a The alpha component. */
public void setColor (float r, float g, float b, float a) {
this.r = (int)(r * 255);
this.g = (int)(g * 255);
this.b = (int)(b * 255);
this.a = a;
color = make(this.r, this.g, this.b, this.a);
context.setFillStyle(color);
context.setStrokeStyle(this.color);
}
/** Sets the color for the following drawing operations.
* @param color The color. */
public void setColor (Color color) {
setColor(color.r, color.g, color.b, color.a);
}
/** Fills the complete bitmap with the currently set color. */
public void fill () {
context.fillRect(0, 0, getWidth(), getHeight());
}
// /**
// * Sets the width in pixels of strokes.
// *
// * @param width The stroke width in pixels.
// */
// public void setStrokeWidth (int width);
/** Draws a line between the given coordinates using the currently set color.
*
* @param x The x-coodinate of the first point
* @param y The y-coordinate of the first point
* @param x2 The x-coordinate of the first point
* @param y2 The y-coordinate of the first point */
public void drawLine (int x, int y, int x2, int y2) {
context.beginPath();
context.moveTo(x, y);
context.lineTo(x2, y2);
context.stroke();
context.closePath();
}
/** Draws a rectangle outline starting at x, y extending by width to the right and by height downwards (y-axis points downwards)
* using the current color.
*
* @param x The x coordinate
* @param y The y coordinate
* @param width The width in pixels
* @param height The height in pixels */
public void drawRectangle (int x, int y, int width, int height) {
context.beginPath();
context.rect(x, y, width, height);
context.stroke();
context.closePath();
}
/** Draws an area form another Pixmap to this Pixmap.
*
* @param pixmap The other Pixmap
* @param x The target x-coordinate (top left corner)
* @param y The target y-coordinate (top left corner) */
public void drawPixmap (Pixmap pixmap, int x, int y) {
context.drawImage(pixmap.getCanvasElement(), x, y);
}
/** Draws an area form another Pixmap to this Pixmap.
*
* @param pixmap The other Pixmap
* @param x The target x-coordinate (top left corner)
* @param y The target y-coordinate (top left corner)
* @param srcx The source x-coordinate (top left corner)
* @param srcy The source y-coordinate (top left corner);
* @param srcWidth The width of the area form the other Pixmap in pixels
* @param srcHeight The height of the area form the other Pixmap in pixles */
public void drawPixmap (Pixmap pixmap, int x, int y, int srcx, int srcy, int srcWidth, int srcHeight) {
context.drawImage(pixmap.getCanvasElement(), srcx, srcy, srcWidth, srcHeight, x, y, srcWidth, srcHeight);
}
/** Draws an area form another Pixmap to this Pixmap. This will automatically scale and stretch the source image to the
* specified target rectangle. Use {@link Pixmap#setFilter(Filter)} to specify the type of filtering to be used (nearest
* neighbour or bilinear).
*
* @param pixmap The other Pixmap
* @param srcx The source x-coordinate (top left corner)
* @param srcy The source y-coordinate (top left corner);
* @param srcWidth The width of the area form the other Pixmap in pixels
* @param srcHeight The height of the area form the other Pixmap in pixles
* @param dstx The target x-coordinate (top left corner)
* @param dsty The target y-coordinate (top left corner)
* @param dstWidth The target width
* @param dstHeight the target height */
public void drawPixmap (Pixmap pixmap, int srcx, int srcy, int srcWidth, int srcHeight, int dstx, int dsty, int dstWidth,
int dstHeight) {
context.drawImage(pixmap.getCanvasElement(), srcx, srcy, srcWidth, srcHeight, dstx, dsty, dstWidth, dstHeight);
}
/** Fills a rectangle starting at x, y extending by width to the right and by height downwards (y-axis points downwards) using
* the current color.
*
* @param x The x coordinate
* @param y The y coordinate
* @param width The width in pixels
* @param height The height in pixels */
public void fillRectangle (int x, int y, int width, int height) {
context.fillRect(x, y, width, height);
}
/** Draws a circle outline with the center at x,y and a radius using the current color and stroke width.
*
* @param x The x-coordinate of the center
* @param y The y-coordinate of the center
* @param radius The radius in pixels */
public void drawCircle (int x, int y, int radius) {
context.beginPath();
context.arc(x, y, radius, 0, 2 * Math.PI, false);
context.stroke();
context.closePath();
}
/** Fills a circle with the center at x,y and a radius using the current color.
*
* @param x The x-coordinate of the center
* @param y The y-coordinate of the center
* @param radius The radius in pixels */
public void fillCircle (int x, int y, int radius) {
context.beginPath();
context.arc(x, y, radius, 0, 2 * Math.PI, false);
context.fill();
context.closePath();
}
/** Fills a triangle with vertices at x1,y1 and x2,y2 and x3,y3 using the current color.
*
* @param x1 The x-coordinate of vertex 1
* @param y1 The y-coordinate of vertex 1
* @param x2 The x-coordinate of vertex 2
* @param y2 The y-coordinate of vertex 2
* @param x3 The x-coordinate of vertex 3
* @param y3 The y-coordinate of vertex 3 */
public void fillTriangle (int x1, int y1, int x2, int y2, int x3, int y3) {
context.beginPath();
context.moveTo(x1,y1);
context.lineTo(x2,y2);
context.lineTo(x3,y3);
context.lineTo(x1,y1);
context.fill();
context.closePath();
}
/** Returns the 32-bit RGBA8888 value of the pixel at x, y. For Alpha formats the RGB components will be one.
*
* @param x The x-coordinate
* @param y The y-coordinate
* @return The pixel color in RGBA8888 format. */
public int getPixel (int x, int y) {
if (pixels == null) pixels = context.getImageData(0, 0, width, height).getData();
int i = x * 4 + y * width * 4;
int r = pixels.get(i + 0) & 0xff;
int g = pixels.get(i + 1) & 0xff;
int b = pixels.get(i + 2) & 0xff;
int a = pixels.get(i + 3) & 0xff;
return (r << 24) | (g << 16) | (b << 8) | (a);
}
/** Draws a pixel at the given location with the current color.
*
* @param x the x-coordinate
* @param y the y-coordinate */
public void drawPixel (int x, int y) {
context.fillRect(x, y, 1, 1);
}
/** Draws a pixel at the given location with the given color.
*
* @param x the x-coordinate
* @param y the y-coordinate
* @param color the color in RGBA8888 format. */
public void drawPixel (int x, int y, int color) {
setColor(color);
drawPixel(x, y);
}
}
| opensciencemap/vtm | vtm-web/src/org/oscim/gdx/emu/com/badlogic/gdx/graphics/Pixmap.java | Java | gpl-3.0 | 12,582 |
url: http://sanskrit.uohyd.ac.in/cgi-bin/scl/sandhi_splitter/sandhi_splitter.cgi?encoding=Unicode&sandhi_type=s&word=कविस्तु<div id='finalout' style='border-style:solid; border-width:1px;padding:10px;color:blue;font-size:14px;height:200px'>कविस्तु = <a title = "कवि पुं 1 एक/कवि स्त्री 1 एक">कविः</a>+<a title = "तु अव्य">तु</a>/<script type="text/javascript">
function toggleMe(a){
var e=document.getElementById(a);
if(!e)return true;
if(e.style.display=="none"){
e.style.display="block";document.getElementById("more").style.display="none"; document.getElementById("less").style.display="block";
}
else{
e.style.display="none";document.getElementById("less").style.display="none"; document.getElementById("more").style.display="block";
}
return true;
}
</script>
<input type="button" onclick="return toggleMe('para1')" value="More" id="more"> <input type="button" onclick="return toggleMe('para1')" value="Less" id="less" style="display:none;" > <div id="para1" style="display:none; height:15px; border-style:none;border-width:1px;">
<a title = "कवि पुं 1 एक/कवि स्त्री 1 एक">कविः</a>+<a title = "तु अव्य">तु</a>/<a title = "कवि नपुं 1 एक/कवि नपुं 2 एक/कवि नपुं 8 एक">कवि</a>+<a title = "स्तु नपुं 1 एक/स्तु नपुं 2 एक/स्तु नपुं 8 एक">स्तु</a>/<a title = "क पुं 8 एक/क नपुं 8 एक">क</a>+<a title = "वि पुं 1 एक">विः</a>+<a title = "तु अव्य">तु</a>/<a title = "कु स्त्री 8 एक">को</a>+<a title = "इ पुं 1 एक">इः</a>+<a title = "तु अव्य">तु</a>/<a title = "क पुं 8 एक/क नपुं 8 एक">क</a>+<a title = "वि अव्य/ऊ स्त्री 7 एक/वि नपुं 1 एक/वि नपुं 2 एक/वि नपुं 8 एक/वी पुं 8 एक/वी स्त्री 8 एक/वी नपुं 1 एक/वी नपुं 2 एक/वी नपुं 8 एक/न्सुलू नपुं 7 एक">वि</a>+<a title = "स्तु नपुं 1 एक/स्तु नपुं 2 एक/स्तु नपुं 8 एक">स्तु</a>/</div><br /> | sanskritiitd/sanskrit | uohCorpus.fil/uoh/uoh.filteredcorpus.txt_output/Sanskritkathakunj_ext.txt.out.dict_10266_sam.html | HTML | gpl-3.0 | 2,435 |
/*
* Copyright (C) 2013 jonas.oreland@gmail.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.runnerup.export;
import android.annotation.TargetApi;
import android.content.ContentValues;
import android.database.sqlite.SQLiteDatabase;
import android.os.Build;
import android.util.Base64;
import android.util.Log;
import org.json.JSONException;
import org.json.JSONObject;
import org.runnerup.common.util.Constants.DB;
import org.runnerup.export.format.GPX;
import org.runnerup.util.KXmlSerializer;
import org.w3c.dom.DOMException;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import java.io.BufferedInputStream;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
@TargetApi(Build.VERSION_CODES.FROYO)
public class JoggSESynchronizer extends DefaultSynchronizer {
public static final String NAME = "jogg.se";
private static String MASTER_USER = null;
private static String MASTER_KEY = null;
public static final String BASE_URL = "http://jogg.se/iphoneservice/iphoneservice.asmx";
long id = 0;
private String username = null;
private String password = null;
private boolean isConnected = false;
JoggSESynchronizer(final SyncManager syncManager) {
if (MASTER_USER == null || MASTER_KEY == null) {
try {
final JSONObject tmp = new JSONObject(syncManager.loadData(this));
MASTER_USER = tmp.getString("MASTER_USER");
MASTER_KEY = tmp.getString("MASTER_KEY");
} catch (final Exception ex) {
ex.printStackTrace();
}
}
}
@Override
public long getId() {
return id;
}
@Override
public String getName() {
return NAME;
}
@Override
public void init(final ContentValues config) {
id = config.getAsLong("_id");
final String authToken = config.getAsString(DB.ACCOUNT.AUTH_CONFIG);
if (authToken != null) {
try {
JSONObject tmp = new JSONObject(authToken);
username = tmp.optString("username", null);
password = tmp.optString("password", null);
} catch (final JSONException e) {
e.printStackTrace();
}
}
}
@Override
public boolean isConfigured() {
if (username != null && password != null)
return true;
return false;
}
@Override
public String getAuthConfig() {
JSONObject tmp = new JSONObject();
try {
tmp.put("username", username);
tmp.put("password", password);
} catch (final JSONException e) {
e.printStackTrace();
}
return tmp.toString();
}
@Override
public void reset() {
username = null;
password = null;
isConnected = false;
}
@Override
public Status connect() {
if (isConnected) {
return Status.OK;
}
Status s = Status.NEED_AUTH;
s.authMethod = Synchronizer.AuthMethod.USER_PASS;
if (username == null || password == null) {
return s;
}
Exception ex = null;
HttpURLConnection conn = null;
try {
/**
* Login by making an empty save-gpx call and see what error message
* you get Invalid/"Invalid Userdetails" => wrong user/pass
* NOK/"Root element is missing" => OK
*/
final String LOGIN_OK = "NOK";
conn = (HttpURLConnection) new URL(BASE_URL).openConnection();
conn.setDoOutput(true);
conn.setRequestMethod(RequestMethod.POST.name());
conn.addRequestProperty("Host", "jogg.se");
conn.addRequestProperty("Content-Type", "text/xml");
final BufferedWriter wr = new BufferedWriter(new PrintWriter(conn.getOutputStream()));
saveGPX(wr, "");
wr.flush();
wr.close();
final InputStream in = new BufferedInputStream(conn.getInputStream());
final DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
final DocumentBuilder db = dbf.newDocumentBuilder();
final InputSource is = new InputSource();
is.setByteStream(in);
final Document doc = db.parse(is);
conn.disconnect();
conn = null;
final String path[] = {
"soap:Envelope", "soap:Body", "SaveGpxResponse", "SaveGpxResult",
"ResponseStatus", "ResponseCode"
};
final Node e = navigate(doc, path);
Log.e(getName(), "reply: " + e.getTextContent());
if (e != null && e.getTextContent() != null
&& LOGIN_OK.contentEquals(e.getTextContent())) {
isConnected = true;
return Synchronizer.Status.OK;
}
return s;
} catch (final MalformedURLException e) {
ex = e;
} catch (final IOException e) {
ex = e;
} catch (final ParserConfigurationException e) {
ex = e;
} catch (final SAXException e) {
ex = e;
}
if (conn != null)
conn.disconnect();
s = Synchronizer.Status.ERROR;
s.ex = ex;
if (ex != null) {
ex.printStackTrace();
}
return s;
}
private static Node navigate(final Document doc, final String[] path) {
// TODO improve...
final NodeList list = doc.getElementsByTagName(path[path.length - 1]);
return list.item(0);
}
private void saveGPX(final Writer wr, final String gpx) throws IllegalArgumentException,
IllegalStateException, IOException {
final KXmlSerializer mXML = new KXmlSerializer();
mXML.setFeature(
"http://xmlpull.org/v1/doc/features.html#indent-output",
true);
mXML.setOutput(wr);
mXML.startDocument("UTF-8", true);
mXML.startTag("", "soap12:Envelope");
mXML.attribute("", "xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance");
mXML.attribute("", "xmlns:xsd", "http://www.w3.org/2001/XMLSchema");
mXML.attribute("", "xmlns:soap12", "http://www.w3.org/2003/05/soap-envelope");
mXML.startTag("", "soap12:Body");
mXML.startTag("", "SaveGpx");
mXML.attribute("", "xmlns", "http://jogg.se/IphoneService");
mXML.startTag("", "gpx");
mXML.text(android.util.Base64.encodeToString(gpx.getBytes(), Base64.NO_WRAP));
mXML.endTag("", "gpx");
mXML.startTag("", "user");
mXML.startTag("", "Email");
mXML.text(username);
mXML.endTag("", "Email");
mXML.startTag("", "Password");
mXML.text(password);
mXML.endTag("", "Password");
mXML.endTag("", "user");
mXML.startTag("", "credentials");
mXML.startTag("", "MasterUser");
mXML.text(MASTER_USER);
mXML.endTag("", "MasterUser");
mXML.startTag("", "MasterKey");
mXML.text(MASTER_KEY);
mXML.endTag("", "MasterKey");
mXML.endTag("", "credentials");
mXML.endTag("", "SaveGpx");
mXML.endTag("", "soap12:Body");
mXML.endTag("", "soap12:Envelope");
mXML.endDocument();
mXML.flush();
}
@Override
public Status upload(final SQLiteDatabase db, final long mID) {
Status s;
if ((s = connect()) != Status.OK) {
return s;
}
Exception ex = null;
HttpURLConnection conn = null;
final GPX gpx = new GPX(db);
try {
final StringWriter gpxString = new StringWriter();
gpx.export(mID, gpxString);
conn = (HttpURLConnection) new URL(BASE_URL).openConnection();
conn.setDoOutput(true);
conn.setRequestMethod(RequestMethod.POST.name());
conn.addRequestProperty("Host", "jogg.se");
conn.addRequestProperty("Content-Type", "text/xml; charset=utf-8");
final BufferedWriter wr = new BufferedWriter(new PrintWriter(
conn.getOutputStream()));
saveGPX(wr, gpxString.toString());
wr.flush();
wr.close();
final InputStream in = new BufferedInputStream(conn.getInputStream());
final DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
final DocumentBuilder dob = dbf.newDocumentBuilder();
final InputSource is = new InputSource();
is.setByteStream(in);
final Document doc = dob.parse(is);
conn.disconnect();
conn = null;
final String path[] = {
"soap:Envelope", "soap:Body",
"SaveGpxResponse", "SaveGpxResult", "ResponseStatus",
"ResponseCode"
};
final Node e = navigate(doc, path);
Log.e(getName(), "reply: " + e.getTextContent());
if (e != null && e.getTextContent() != null
&& "OK".contentEquals(e.getTextContent())) {
s = Status.OK;
s.activityId = mID;
return s;
}
throw new Exception(e.getTextContent());
} catch (final MalformedURLException e) {
ex = e;
} catch (final IOException e) {
ex = e;
} catch (final ParserConfigurationException e) {
ex = e;
} catch (final SAXException e) {
ex = e;
} catch (final DOMException e) {
ex = e;
e.printStackTrace();
} catch (final Exception e) {
ex = e;
}
if (conn != null)
conn.disconnect();
s = Synchronizer.Status.ERROR;
s.ex = ex;
s.activityId = mID;
if (ex != null) {
ex.printStackTrace();
}
return s;
}
@Override
public boolean checkSupport(Synchronizer.Feature f) {
switch (f) {
case UPLOAD:
return true;
default:
return false;
}
}
}
| feilaoda/runnerup | app/src/org/runnerup/export/JoggSESynchronizer.java | Java | gpl-3.0 | 11,318 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! DOM bindings for `CharacterData`.
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::CharacterDataBinding::CharacterDataMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::ProcessingInstructionBinding::ProcessingInstructionMethods;
use dom::bindings::codegen::InheritTypes::{CharacterDataTypeId, NodeTypeId};
use dom::bindings::codegen::UnionTypes::NodeOrString;
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, Root};
use dom::bindings::str::DOMString;
use dom::comment::Comment;
use dom::document::Document;
use dom::element::Element;
use dom::node::{ChildrenMutation, Node, NodeDamage};
use dom::processinginstruction::ProcessingInstruction;
use dom::text::Text;
use dom::virtualmethods::vtable_for;
use dom_struct::dom_struct;
use servo_config::opts;
use std::cell::Ref;
// https://dom.spec.whatwg.org/#characterdata
#[dom_struct]
pub struct CharacterData {
node: Node,
data: DOMRefCell<DOMString>,
}
impl CharacterData {
pub fn new_inherited(data: DOMString, document: &Document) -> CharacterData {
CharacterData {
node: Node::new_inherited(document),
data: DOMRefCell::new(data),
}
}
pub fn clone_with_data(&self, data: DOMString, document: &Document) -> Root<Node> {
match self.upcast::<Node>().type_id() {
NodeTypeId::CharacterData(CharacterDataTypeId::Comment) => {
Root::upcast(Comment::new(data, &document))
}
NodeTypeId::CharacterData(CharacterDataTypeId::ProcessingInstruction) => {
let pi = self.downcast::<ProcessingInstruction>().unwrap();
Root::upcast(ProcessingInstruction::new(pi.Target(), data, &document))
},
NodeTypeId::CharacterData(CharacterDataTypeId::Text) => {
Root::upcast(Text::new(data, &document))
},
_ => unreachable!(),
}
}
#[inline]
pub fn data(&self) -> Ref<DOMString> {
self.data.borrow()
}
#[inline]
pub fn append_data(&self, data: &str) {
self.data.borrow_mut().push_str(data);
self.content_changed();
}
fn content_changed(&self) {
let node = self.upcast::<Node>();
node.dirty(NodeDamage::OtherNodeDamage);
}
}
impl CharacterDataMethods for CharacterData {
// https://dom.spec.whatwg.org/#dom-characterdata-data
fn Data(&self) -> DOMString {
self.data.borrow().clone()
}
// https://dom.spec.whatwg.org/#dom-characterdata-data
fn SetData(&self, data: DOMString) {
let old_length = self.Length();
let new_length = data.encode_utf16().count() as u32;
*self.data.borrow_mut() = data;
self.content_changed();
let node = self.upcast::<Node>();
node.ranges().replace_code_units(node, 0, old_length, new_length);
// If this is a Text node, we might need to re-parse (say, if our parent
// is a <style> element.) We don't need to if this is a Comment or
// ProcessingInstruction.
if self.is::<Text>() {
if let Some(parent_node) = node.GetParentNode() {
let mutation = ChildrenMutation::ChangeText;
vtable_for(&parent_node).children_changed(&mutation);
}
}
}
// https://dom.spec.whatwg.org/#dom-characterdata-length
fn Length(&self) -> u32 {
self.data.borrow().encode_utf16().count() as u32
}
// https://dom.spec.whatwg.org/#dom-characterdata-substringdata
fn SubstringData(&self, offset: u32, count: u32) -> Fallible<DOMString> {
let data = self.data.borrow();
// Step 1.
let mut substring = String::new();
let remaining;
match split_at_utf16_code_unit_offset(&data, offset) {
Ok((_, astral, s)) => {
// As if we had split the UTF-16 surrogate pair in half
// and then transcoded that to UTF-8 lossily,
// since our DOMString is currently strict UTF-8.
if astral.is_some() {
substring = substring + "\u{FFFD}";
}
remaining = s;
}
// Step 2.
Err(()) => return Err(Error::IndexSize),
}
match split_at_utf16_code_unit_offset(remaining, count) {
// Steps 3.
Err(()) => substring = substring + remaining,
// Steps 4.
Ok((s, astral, _)) => {
substring = substring + s;
// As if we had split the UTF-16 surrogate pair in half
// and then transcoded that to UTF-8 lossily,
// since our DOMString is currently strict UTF-8.
if astral.is_some() {
substring = substring + "\u{FFFD}";
}
}
};
Ok(DOMString::from(substring))
}
// https://dom.spec.whatwg.org/#dom-characterdata-appenddatadata
fn AppendData(&self, data: DOMString) {
// FIXME(ajeffrey): Efficient append on DOMStrings?
self.append_data(&*data);
}
// https://dom.spec.whatwg.org/#dom-characterdata-insertdataoffset-data
fn InsertData(&self, offset: u32, arg: DOMString) -> ErrorResult {
self.ReplaceData(offset, 0, arg)
}
// https://dom.spec.whatwg.org/#dom-characterdata-deletedataoffset-count
fn DeleteData(&self, offset: u32, count: u32) -> ErrorResult {
self.ReplaceData(offset, count, DOMString::new())
}
// https://dom.spec.whatwg.org/#dom-characterdata-replacedata
fn ReplaceData(&self, offset: u32, count: u32, arg: DOMString) -> ErrorResult {
let mut new_data;
{
let data = self.data.borrow();
let prefix;
let replacement_before;
let remaining;
match split_at_utf16_code_unit_offset(&data, offset) {
Ok((p, astral, r)) => {
prefix = p;
// As if we had split the UTF-16 surrogate pair in half
// and then transcoded that to UTF-8 lossily,
// since our DOMString is currently strict UTF-8.
replacement_before = if astral.is_some() { "\u{FFFD}" } else { "" };
remaining = r;
}
// Step 2.
Err(()) => return Err(Error::IndexSize),
};
let replacement_after;
let suffix;
match split_at_utf16_code_unit_offset(remaining, count) {
// Steps 3.
Err(()) => {
replacement_after = "";
suffix = "";
}
Ok((_, astral, s)) => {
// As if we had split the UTF-16 surrogate pair in half
// and then transcoded that to UTF-8 lossily,
// since our DOMString is currently strict UTF-8.
replacement_after = if astral.is_some() { "\u{FFFD}" } else { "" };
suffix = s;
}
};
// Step 4: Mutation observers.
// Step 5 to 7.
new_data = String::with_capacity(
prefix.len() +
replacement_before.len() +
arg.len() +
replacement_after.len() +
suffix.len());
new_data.push_str(prefix);
new_data.push_str(replacement_before);
new_data.push_str(&arg);
new_data.push_str(replacement_after);
new_data.push_str(suffix);
}
*self.data.borrow_mut() = DOMString::from(new_data);
self.content_changed();
// Steps 8-11.
let node = self.upcast::<Node>();
node.ranges().replace_code_units(
node, offset, count, arg.encode_utf16().count() as u32);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-childnode-before
fn Before(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().before(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-after
fn After(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().after(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-replacewith
fn ReplaceWith(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().replace_with(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-remove
fn Remove(&self) {
let node = self.upcast::<Node>();
node.remove_self();
}
// https://dom.spec.whatwg.org/#dom-nondocumenttypechildnode-previouselementsibling
fn GetPreviousElementSibling(&self) -> Option<Root<Element>> {
self.upcast::<Node>().preceding_siblings().filter_map(Root::downcast).next()
}
// https://dom.spec.whatwg.org/#dom-nondocumenttypechildnode-nextelementsibling
fn GetNextElementSibling(&self) -> Option<Root<Element>> {
self.upcast::<Node>().following_siblings().filter_map(Root::downcast).next()
}
}
#[allow(unsafe_code)]
pub trait LayoutCharacterDataHelpers {
unsafe fn data_for_layout(&self) -> &str;
}
#[allow(unsafe_code)]
impl LayoutCharacterDataHelpers for LayoutJS<CharacterData> {
#[inline]
unsafe fn data_for_layout(&self) -> &str {
&(*self.unsafe_get()).data.borrow_for_layout()
}
}
/// Split the given string at the given position measured in UTF-16 code units from the start.
///
/// * `Err(())` indicates that `offset` if after the end of the string
/// * `Ok((before, None, after))` indicates that `offset` is between Unicode code points.
/// The two string slices are such that:
/// `before == s.to_utf16()[..offset].to_utf8()` and
/// `after == s.to_utf16()[offset..].to_utf8()`
/// * `Ok((before, Some(ch), after))` indicates that `offset` is "in the middle"
/// of a single Unicode code point that would be represented in UTF-16 by a surrogate pair
/// of two 16-bit code units.
/// `ch` is that code point.
/// The two string slices are such that:
/// `before == s.to_utf16()[..offset - 1].to_utf8()` and
/// `after == s.to_utf16()[offset + 1..].to_utf8()`
///
/// # Panics
///
/// Note that the third variant is only ever returned when the `-Z replace-surrogates`
/// command-line option is specified.
/// When it *would* be returned but the option is *not* specified, this function panics.
fn split_at_utf16_code_unit_offset(s: &str, offset: u32) -> Result<(&str, Option<char>, &str), ()> {
let mut code_units = 0;
for (i, c) in s.char_indices() {
if code_units == offset {
let (a, b) = s.split_at(i);
return Ok((a, None, b));
}
code_units += 1;
if c > '\u{FFFF}' {
if code_units == offset {
if opts::get().replace_surrogates {
debug_assert!(c.len_utf8() == 4);
return Ok((&s[..i], Some(c), &s[i + c.len_utf8()..]))
}
panic!("\n\n\
Would split a surrogate pair in CharacterData API.\n\
If you see this in real content, please comment with the URL\n\
on https://github.com/servo/servo/issues/6873\n\
\n");
}
code_units += 1;
}
}
if code_units == offset {
Ok((s, None, ""))
} else {
Err(())
}
}
| MortimerGoro/servo | components/script/dom/characterdata.rs | Rust | mpl-2.0 | 11,823 |
package egoscale
// ServiceOffering corresponds to the Compute Offerings
//
// A service offering correspond to some hardware features (CPU, RAM).
//
// See: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/service_offerings.html
type ServiceOffering struct {
Authorized bool `json:"authorized,omitempty" doc:"is the account/domain authorized to use this service offering"`
CPUNumber int `json:"cpunumber,omitempty" doc:"the number of CPU"`
CPUSpeed int `json:"cpuspeed,omitempty" doc:"the clock rate CPU speed in Mhz"`
Created string `json:"created,omitempty" doc:"the date this service offering was created"`
DefaultUse bool `json:"defaultuse,omitempty" doc:"is this a default system vm offering"`
DeploymentPlanner string `json:"deploymentplanner,omitempty" doc:"deployment strategy used to deploy VM."`
DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty" doc:"bytes read rate of the service offering"`
DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty" doc:"bytes write rate of the service offering"`
DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty" doc:"io requests read rate of the service offering"`
DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty" doc:"io requests write rate of the service offering"`
Displaytext string `json:"displaytext,omitempty" doc:"an alternate display text of the service offering."`
HostTags string `json:"hosttags,omitempty" doc:"the host tag for the service offering"`
HypervisorSnapshotReserve int `json:"hypervisorsnapshotreserve,omitempty" doc:"Hypervisor snapshot reserve space as a percent of a volume (for managed storage using Xen or VMware)"`
ID *UUID `json:"id" doc:"the id of the service offering"`
IsCustomized bool `json:"iscustomized,omitempty" doc:"is true if the offering is customized"`
IsCustomizedIops bool `json:"iscustomizediops,omitempty" doc:"true if disk offering uses custom iops, false otherwise"`
IsSystem bool `json:"issystem,omitempty" doc:"is this a system vm offering"`
IsVolatile bool `json:"isvolatile,omitempty" doc:"true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk"`
LimitCPUUse bool `json:"limitcpuuse,omitempty" doc:"restrict the CPU usage to committed service offering"`
MaxIops int64 `json:"maxiops,omitempty" doc:"the max iops of the disk offering"`
Memory int `json:"memory,omitempty" doc:"the memory in MB"`
MinIops int64 `json:"miniops,omitempty" doc:"the min iops of the disk offering"`
Name string `json:"name,omitempty" doc:"the name of the service offering"`
NetworkRate int `json:"networkrate,omitempty" doc:"data transfer rate in megabits per second allowed."`
OfferHA bool `json:"offerha,omitempty" doc:"the ha support in the service offering"`
Restricted bool `json:"restricted,omitempty" doc:"is this offering restricted"`
ServiceOfferingDetails map[string]string `json:"serviceofferingdetails,omitempty" doc:"additional key/value details tied with this service offering"`
StorageType string `json:"storagetype,omitempty" doc:"the storage type for this service offering"`
SystemVMType string `json:"systemvmtype,omitempty" doc:"is this a the systemvm type for system vm offering"`
Tags string `json:"tags,omitempty" doc:"the tags for the service offering"`
}
// ListRequest builds the ListSecurityGroups request
func (so ServiceOffering) ListRequest() (ListCommand, error) {
// Restricted cannot be applied here because it really has three states
req := &ListServiceOfferings{
ID: so.ID,
Name: so.Name,
SystemVMType: so.SystemVMType,
}
if so.IsSystem {
req.IsSystem = &so.IsSystem
}
return req, nil
}
//go:generate go run generate/main.go -interface=Listable ListServiceOfferings
// ListServiceOfferings represents a query for service offerings
type ListServiceOfferings struct {
ID *UUID `json:"id,omitempty" doc:"ID of the service offering"`
IsSystem *bool `json:"issystem,omitempty" doc:"is this a system vm offering"`
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
Name string `json:"name,omitempty" doc:"name of the service offering"`
Page int `json:"page,omitempty"`
PageSize int `json:"pagesize,omitempty"`
Restricted *bool `json:"restricted,omitempty" doc:"filter by the restriction flag: true to list only the restricted service offerings, false to list non-restricted service offerings, or nothing for all."`
SystemVMType string `json:"systemvmtype,omitempty" doc:"the system VM type. Possible types are \"consoleproxy\", \"secondarystoragevm\" or \"domainrouter\"."`
VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"the ID of the virtual machine. Pass this in if you want to see the available service offering that a virtual machine can be changed to."`
_ bool `name:"listServiceOfferings" description:"Lists all available service offerings."`
}
// ListServiceOfferingsResponse represents a list of service offerings
type ListServiceOfferingsResponse struct {
Count int `json:"count"`
ServiceOffering []ServiceOffering `json:"serviceoffering"`
}
| dave2/packer | vendor/github.com/exoscale/egoscale/service_offerings.go | GO | mpl-2.0 | 6,002 |
/*
* JBoss, Home of Professional Open Source.
* Copyright (c) 2016, Red Hat, Inc., and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.jboss.as.test.integration.jca.statistics.xa;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.NAME;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.OP;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.OP_ADDR;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.READ_ATTRIBUTE_OPERATION;
import static org.jboss.as.controller.descriptions.ModelDescriptionConstants.SUBSYSTEM;
import static org.junit.Assert.assertEquals;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.container.test.api.RunAsClient;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.as.arquillian.api.ContainerResource;
import org.jboss.as.arquillian.api.ServerSetup;
import org.jboss.as.arquillian.container.ManagementClient;
import org.jboss.as.controller.PathAddress;
import org.jboss.as.controller.PathElement;
import org.jboss.as.test.integration.management.ManagementOperations;
import org.jboss.as.test.integration.transactions.TxTestUtil;
import org.jboss.as.test.shared.TimeoutUtil;
import org.jboss.dmr.ModelNode;
import org.jboss.ejb.client.EJBClient;
import org.jboss.ejb.client.StatelessEJBLocator;
import org.jboss.shrinkwrap.api.Archive;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.asset.StringAsset;
import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
import org.jboss.shrinkwrap.api.spec.JavaArchive;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
/**
* XA Data source statistics testCase
*
* @author dsimko@redhat.com
*/
@RunWith(Arquillian.class)
@RunAsClient
@ServerSetup(XaDataSourceSetupStep.class)
public class XaDataSourcePoolStatisticsTestCase {
private static final String ARCHIVE_NAME = "xa_transactions";
private static final String APP_NAME = "xa-datasource-pool-statistics-test";
private static final String ATTRIBUTE_XA_COMMIT_COUNT = "XACommitCount";
private static final String ATTRIBUTE_XA_ROLLBACK_COUNT = "XARollbackCount";
private static final String ATTRIBUTE_XA_START_COUNT = "XAStartCount";
private static final int COUNT = 10;
@ContainerResource
private ManagementClient managementClient;
@Deployment
public static Archive<?> deploy() {
final EnterpriseArchive ear = ShrinkWrap.create(EnterpriseArchive.class, APP_NAME + ".ear");
JavaArchive jar = ShrinkWrap.create(JavaArchive.class, ARCHIVE_NAME + ".jar");
jar.addClasses(TestEntity.class, SLSB1.class, SLSB.class, TimeoutUtil.class);
jar.addPackage(TxTestUtil.class.getPackage());
jar.addAsManifestResource(XaDataSourcePoolStatisticsTestCase.class.getPackage(), "persistence.xml", "persistence.xml");
ear.addAsModule(jar);
ear.addAsManifestResource(new StringAsset("Dependencies: com.h2database.h2\n"), "MANIFEST.MF");
return ear;
}
@Before
public void beforeTest() throws Exception {
// TODO Elytron: Determine how this should be adapted once the transaction client changes are in
//final EJBClientTransactionContext localUserTxContext = EJBClientTransactionContext.createLocal();
//EJBClientTransactionContext.setGlobalContext(localUserTxContext);
}
/**
* Tests increasing XACommitCount, XACommitAverageTime and XAStartCount
* statistical attributes.
*/
@Test
public void testXACommit() throws Exception {
int xaStartCountBefore = readStatisticalAttribute(ATTRIBUTE_XA_START_COUNT);
int xaCommitCount = readStatisticalAttribute(ATTRIBUTE_XA_COMMIT_COUNT);
assertEquals(ATTRIBUTE_XA_COMMIT_COUNT + " is " + xaCommitCount + " but should be 0", 0, xaCommitCount);
SLSB slsb = getBean();
for (int i = 0; i < COUNT; i++) {
slsb.commit();
}
xaCommitCount = readStatisticalAttribute(ATTRIBUTE_XA_COMMIT_COUNT);
int xaStartCountAfter = readStatisticalAttribute(ATTRIBUTE_XA_START_COUNT);
int total = xaStartCountBefore + COUNT;
assertEquals(ATTRIBUTE_XA_COMMIT_COUNT + " is " + xaCommitCount + " but should be " + COUNT, COUNT, xaCommitCount);
assertEquals(ATTRIBUTE_XA_START_COUNT + " is " + xaStartCountAfter + " but should be " + total, total, xaStartCountAfter);
}
/**
* Tests increasing XARollbackCount statistical attribute.
*/
@Test
public void testXARollback() throws Exception {
int xaRollbackCount = readStatisticalAttribute(ATTRIBUTE_XA_ROLLBACK_COUNT);
assertEquals(ATTRIBUTE_XA_ROLLBACK_COUNT + " is " + xaRollbackCount + " but should be 0", 0, xaRollbackCount);
SLSB slsb = getBean();
for (int i = 0; i < COUNT; i++) {
slsb.rollback();
}
xaRollbackCount = readStatisticalAttribute(ATTRIBUTE_XA_ROLLBACK_COUNT);
assertEquals(ATTRIBUTE_XA_ROLLBACK_COUNT + " is " + xaRollbackCount + " but should be " + COUNT, COUNT, xaRollbackCount);
}
private SLSB getBean() {
final StatelessEJBLocator<SLSB> locator = new StatelessEJBLocator<SLSB>(SLSB.class, APP_NAME, ARCHIVE_NAME, SLSB1.class.getSimpleName(), "");
return EJBClient.createProxy(locator);
}
private ModelNode getStaticticsAddress() {
return PathAddress.pathAddress(PathElement.pathElement(SUBSYSTEM, "datasources"),
PathElement.pathElement("xa-data-source", XaDataSourceSetupStep.XA_DATASOURCE_NAME), PathElement.pathElement("statistics", "pool")).toModelNode();
}
private int readStatisticalAttribute(String attributeName) throws Exception {
return readAttribute(getStaticticsAddress(), attributeName).asInt();
}
private ModelNode readAttribute(ModelNode address, String attributeName) throws Exception {
ModelNode op = new ModelNode();
op.get(OP).set(READ_ATTRIBUTE_OPERATION);
op.get(NAME).set(attributeName);
op.get(OP_ADDR).set(address);
return ManagementOperations.executeOperation(managementClient.getControllerClient(), op);
}
} | xasx/wildfly | testsuite/integration/basic/src/test/java/org/jboss/as/test/integration/jca/statistics/xa/XaDataSourcePoolStatisticsTestCase.java | Java | lgpl-2.1 | 7,146 |
/*
* JBoss, Home of Professional Open Source.
* Copyright 2015, Red Hat, Inc., and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.jboss.as.controller.access;
import org.jboss.as.controller.PathAddress;
import org.jboss.as.controller.logging.ControllerLogger;
import org.jboss.as.controller.registry.Resource;
/**
* {@link org.jboss.as.controller.registry.Resource.NoSuchResourceException} variant
* to throw when a resource should not be visible due to the called lacking
* permissions to perform {@link org.jboss.as.controller.access.Action.ActionEffect#ADDRESS}.
*
* @author Brian Stansberry (c) 2015 Red Hat Inc.
*/
public final class ResourceNotAddressableException extends Resource.NoSuchResourceException {
public ResourceNotAddressableException(PathAddress pathAddress) {
// Critical -- use the same message as the normal NoSuchResourceException so we don't leak data
// to external users that this was an RBAC failure
//noinspection ThrowableResultOfMethodCallIgnored
super(ControllerLogger.ROOT_LOGGER.managementResourceNotFound(pathAddress).getMessage());
}
}
| jamezp/wildfly-core | controller/src/main/java/org/jboss/as/controller/access/ResourceNotAddressableException.java | Java | lgpl-2.1 | 2,025 |
/*
* Copyright (C) 2016 Leon George
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup cpu_cc26x0_cc13x0_definitions
* @{
*
* @file
* @brief CC26x0/CC13x0 PRCM register definitions
*/
#ifndef CC26X0_CC13X0_PRCM_H
#define CC26X0_CC13X0_PRCM_H
#include <cc26xx_cc13xx.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief DDI_0_OSC registers
*/
typedef struct {
reg32_t CTL0; /**< control 0 */
reg32_t CTL1; /**< control 1 */
reg32_t RADCEXTCFG; /**< RADC external config */
reg32_t AMPCOMPCTL; /**< amplitude compensation control */
reg32_t AMPCOMPTH1; /**< amplitude compensation threshold 1 */
reg32_t AMPCOMPTH2; /**< amplitude compensation threshold 2 */
reg32_t ANABYPASSVAL1; /**< analog bypass values 1 */
reg32_t ANABYPASSVAL2; /**< analog bypass values 2 */
reg32_t ATESTCTL; /**< analog test control */
reg32_t ADCDOUBLERNANOAMPCTL; /**< ADC doubler nanoamp control */
reg32_t XOSCHFCTL; /**< XOSCHF control */
reg32_t LFOSCCTL; /**< low frequency oscillator control */
reg32_t RCOSCHFCTL; /**< RCOSCHF control */
reg32_t STAT0; /**< status 0 */
reg32_t STAT1; /**< status 1 */
reg32_t STAT2; /**< status 2 */
} ddi0_osc_regs_t;
/**
* @brief DDI_0_OSC register values
* @{
*/
#define DDI_0_OSC_CTL0_SCLK_LF_SRC_SEL_mask 0x6
#define DDI_0_OSC_CTL0_SCLK_LF_SRC_SEL_HF_RCOSC 0x0
#define DDI_0_OSC_CTL0_SCLK_LF_SRC_SEL_HF_XOSC 0x4
#define DDI_0_OSC_CTL0_SCLK_LF_SRC_SEL_LF_RCOSC 0x8
#define DDI_0_OSC_CTL0_SCLK_LF_SRC_SEL_LF_XOSC 0xC
#define DDI_0_OSC_CTL0_ACLK_REF_SRC_SEL_RCOSC_mask 0x60
#define DDI_0_OSC_CTL0_ACLK_REF_SRC_SEL_RCOSC_HF 0x00 /* 31.25kHz */
#define DDI_0_OSC_CTL0_ACLK_REF_SRC_SEL_XOSC_HF 0x20 /* 31.25kHz */
#define DDI_0_OSC_CTL0_ACLK_REF_SRC_SEL_RCOSC_LF 0x40 /* 32kHz */
#define DDI_0_OSC_CTL0_ACLK_REF_SRC_SEL_XOSC_LF 0x60 /* 32.768kHz */
#define DDI_0_OSC_CTL0_ACLK_TDC_SRC_SEL_RCOSC_mask 0x180
#define DDI_0_OSC_CTL0_ACLK_TDC_SRC_SEL_RCOSC_HF 0x000 /* 48MHz */
#define DDI_0_OSC_CTL0_ACLK_TDC_SRC_SEL_RCOSC_LF 0x080 /* 48MHz */
#define DDI_0_OSC_CTL0_ACLK_TDC_SRC_SEL_XOSC_HF 0x100 /* 24MHz */
#define DDI_0_OSC_CTL0_DOUBLER_START_DURATION_mask 0x6000000
#define DDI_0_OSC_CTL0_BYPASS_RCOSC_LF_CLK_QUAL 0x10000000
#define DDI_0_OSC_CTL0_BYPASS_XOSC_LF_CLK_QUAL 0x20000000
#define DDI_0_OSC_CTL0_XTAL_IS_24M 0x80000000
/** @} */
/**
* @ingroup cpu_specific_peripheral_memory_map
* @{
*/
#define DDI0_OSC_BASE 0x400CA000 /**< DDI0_OSC base address */
/** @} */
/**
* @brief DDI_0_OSC register bank
*/
#define DDI_0_OSC ((ddi0_osc_regs_t *) (DDI0_OSC_BASE))
/**
* @brief AON_SYSCTL registers
*/
typedef struct {
reg32_t PWRCTL; /**< power management */
reg32_t RESETCTL; /**< reset management */
reg32_t SLEEPCTL; /**< sleep mode */
} aon_sysctl_regs_t;
/**
* @ingroup cpu_specific_peripheral_memory_map
* @{
*/
#define AON_SYSCTL_BASE 0x40090000 /**< AON_SYSCTL base address */
/** @} */
#define AON_SYSCTL ((aon_sysctl_regs_t *) (AON_SYSCTL_BASE)) /**< AON_SYSCTL register bank */
/**
* @brief AON_WUC registers
*/
typedef struct {
reg32_t MCUCLK; /**< MCU clock management */
reg32_t AUXCLK; /**< AUX clock management */
reg32_t MCUCFG; /**< MCU config */
reg32_t AUXCFG; /**< AUX config */
reg32_t AUXCTL; /**< AUX control */
reg32_t PWRSTAT; /**< power status */
reg32_t __reserved1; /**< Reserved */
reg32_t SHUTDOWN; /**< shutdown control */
reg32_t CTL0; /**< control 0 */
reg32_t CTL1; /**< control 1 */
reg32_t __reserved2[2]; /**< Reserved */
reg32_t RECHARGECFG; /**< recharge controller config */
reg32_t RECHARGESTAT; /**< recharge controller status */
reg32_t __reserved3; /**< Reserved */
reg32_t OSCCFG; /**< oscillator config */
reg32_t JTAGCFG; /**< JTAG config */
reg32_t JTAGUSERCODE; /**< JTAG USERCODE */
} aon_wuc_regs_t;
/**
* @brief AON_WUC register values
* @{
*/
#define MCUCLK_PWR_DWN_SRC 0x1 /* SCLK_LF in powerdown (no clock elsewise) */
#define MCUCLK_PWR_DWN_SRC_mask 0x3
#define MCUCLK_RCOSC_HF_CAL_DONE 0x4 /* set by MCU bootcode. RCOSC_HF is calibrated to 48 MHz, allowing FLASH to power up */
#define AUXCLK_SRC_HF 0x1 /* SCLK for AUX */
#define AUXCLK_SRC_LF 0x4
#define AUXCLK_SRC_mask 0x7 /* guaranteed to be glitchless */
#define AUXCLK_SCLK_HF_DIV_pos 8 /* don't set while SCLK_HF active for AUX */
#define AUXCLK_SCLK_HF_DIV_mask 0x700 /* divisor will be 2^(value+1) */
#define AUXCLK_PWR_DWN_SRC_pos 11 /* SCLK_LF in powerdown when SCLK_HF is source (no clock elsewise?!) */
#define AUXCLK_PWR_DWN_SRC_mask 0x1800 /* datasheet is confusing.. */
#define MCUCFG_SRAM_RET_OFF 0x0 /* no retention for any SRAM-bank */
#define MCUCFG_SRAM_RET_B0 0x1
#define MCUCFG_SRAM_RET_B01 0x3
#define MCUCFG_SRAM_RET_B012 0x7
#define MCUCFG_SRAM_RET_B0124 0xF /* retention for banks 0, 1, 2, and 3 */
#define MCUCFG_SRAM_FIXED_WU_EN 0x100
#define MCUCFG_SRAM_VIRT_OFF 0x200
#define AUXCFG_RAM_RET_EN 0x1 /* retention for AUX_RAM bank 0. is off when otherwise in retention mode */
#define AUXCTL_AUX_FORCE_ON 0x1
#define AUXCTL_SWEV 0x2
#define AUXCTL_SCE_RUN_EN 0x3
#define AUXCTL_RESET_REQ 0x80000000
#define PWRSTAT_AUX_RESET_DONE 0x2
#define PWRSTAT_AUX_BUS_CONNECTED 0x4
#define PWRSTAT_MCU_PD_ON 0x10
#define PWRSTAT_AUX_PD_ON 0x20
#define PWRSTAT_JTAG_PD_ON 0x40
#define PWRSTAT_AUX_PWR_DNW 0x200
#define SHUTDOWN_EN 0x1 /* register/cancel shutdown request */
#define AONWUC_CTL0_MCU_SRAM_ERASE 0x4
#define AONWUC_CTL0_AUX_SRAM_ERASE 0x8
#define AONWUC_CTL0_PWR_DWN_DIS 0x10 /* disable powerdown on request */
#define AONWUC_CTL1_MCU_WARM_RESET 0x1 /* last MCU reset was a warm reset */
#define AONWUC_CTL1_MCU_RESET_SRC 0x2 /* JTAG was source of last reset (MCU SW elsewise) */
#define RECHARGECFG_PER_E_mask 0x00000007 /* number of 32KHz clocks between activation of recharge controller: */
#define RECHARGECFG_PER_M_mask 0x000000F8 /* computed as follows: PERIOD = (PER_M*16+15) * 2^(PER_E) */
#define RECHARGECFG_MAX_PER_E_mask 0x00000700 /* maximum period the recharge algorithm can take */
#define RECHARGECFG_MAX_PER_M_mask 0x0000F800 /* computed as follows: MAXCYCLES = (MAX_PER_M*16+15) * 2^(MAX_PER_E) */
#define RECHARGECFG_C1_mask 0x000F0000 /* i resign */
#define RECHARGECFG_C2_mask 0x000F0000
#define RECHARGECFG_ADAPTIVE_EN 0x80000000
#define RECHARGESTAT_MAX_USED_PER_mask 0x0FFFF
#define RECHARGESTAT_VDDR_SMPLS_mask 0xF0000
#define OSCCFG_PER_E_mask 0x07 /* number of 32KHz clocks between oscillator amplitude callibrations */
#define OSCCFG_PER_M_mask 0xF8 /* computed as follows: PERIOD = (PER_M*16+15) * 2^(PER_E) */
#define JTAGCFG_JTAG_PD_FORCE_ON 0x10
/** @} */
/**
* @ingroup cpu_specific_peripheral_memory_map
* @{
*/
#define AON_WUC_BASE 0x40091000 /**< AON_WUC base address */
/** @} */
#define AON_WUC ((aon_wuc_regs_t *) (AON_WUC_BASE)) /**< AON_WUC register bank */
/**
* @brief AON_RTC registers
*/
typedef struct {
reg32_t CTL; /**< Control */
reg32_t EVFLAGS; /**< Event Flags, RTC Status */
reg32_t SEC; /**< Second Counter Value, Integer Part */
reg32_t SUBSEC; /**< Second Counter Value, Fractional Part */
reg32_t SUBSECINC; /**< Subseconds Increment */
reg32_t CHCTL; /**< Channel Configuration */
reg32_t CH0CMP; /**< Channel 0 Compare Value */
reg32_t CH1CMP; /**< Channel 1 Compare Value */
reg32_t CH2CMP; /**< Channel 2 Compare Value */
reg32_t CH2CMPINC; /**< Channel 2 Compare Value Auto-increment */
reg32_t CH1CAPT; /**< Channel 1 Capture Value */
reg32_t SYNC; /**< AON Synchronization */
} aon_rtc_regs_t;
/**
* @brief RTC_UPD is a 16 KHz signal used to sync up the radio timer. The
* 16 Khz is SCLK_LF divided by 2
* @details 0h = RTC_UPD signal is forced to 0
* 1h = RTC_UPD signal is toggling @16 kHz
*/
#define AON_RTC_CTL_RTC_UPD_EN 0x00000002
/**
* @ingroup cpu_specific_peripheral_memory_map
* @{
*/
#define AON_RTC_BASE (PERIPH_BASE + 0x92000) /**< AON_RTC base address */
/** @} */
#define AON_RTC ((aon_rtc_regs_t *) (AON_RTC_BASE)) /**< AON_RTC register bank */
/**
* @brief PRCM registers
*/
typedef struct {
reg32_t INFRCLKDIVR; /**< infrastructure clock division factor for run mode */
reg32_t INFRCLKDIVS; /**< infrastructure clock division factor for sleep mode */
reg32_t INFRCLKDIVDS; /**< infrastructure clock division factor for deep sleep mode */
reg32_t VDCTL; /**< MCU voltage domain control */
reg32_t __reserved1[6]; /**< Reserved */
reg32_t CLKLOADCTL; /**< clock load control */
reg32_t RFCCLKG; /**< RFC clock gate */
reg32_t VIMSCLKG; /**< VIMS clock gate */
reg32_t __reserved2[2]; /**< Reserved */
reg32_t SECDMACLKGR; /**< TRNG, CRYPTO, and UDMA clock gate for run mode */
reg32_t SECDMACLKGS; /**< TRNG, CRYPTO, and UDMA clock gate for sleep mode */
reg32_t SECDMACLKGDS; /**< TRNG, CRYPTO, and UDMA clock gate for deep sleep mode */
reg32_t GPIOCLKGR; /**< GPIO clock gate for run mode */
reg32_t GPIOCLKGS; /**< GPIO clock gate for sleep mode */
reg32_t GPIOCLKGDS; /**< GPIO clock gate for deep sleep mode */
reg32_t GPTCLKGR; /**< GPT clock gate for run mode */
reg32_t GPTCLKGS; /**< GPT clock gate for sleep mode */
reg32_t GPTCLKGDS; /**< GPT clock gate for deep sleep mode */
reg32_t I2CCLKGR; /**< I2C clock gate for run mode */
reg32_t I2CCLKGS; /**< I2C clock gate for sleep mode */
reg32_t I2CCLKGDS; /**< I2C clock gate for deep sleep mode */
reg32_t UARTCLKGR; /**< UART clock gate for run mode */
reg32_t UARTCLKGS; /**< UART clock gate for sleep mode */
reg32_t UARTCLKGDS; /**< UART clock gate for deep sleep mode */
reg32_t SSICLKGR; /**< SSI clock gate for run mode */
reg32_t SSICLKGS; /**< SSI clock gate for sleep mode */
reg32_t SSICLKGDS; /**< SSI clock gate for deep sleep mode */
reg32_t I2SCLKGR; /**< I2S clock gate for run mode */
reg32_t I2SCLKGS; /**< I2S clock gate for sleep mode */
reg32_t I2SCLKGDS; /**< I2S clock gate for deep sleep mode */
reg32_t __reserved3[10]; /**< Reserved */
reg32_t CPUCLKDIV; /**< CPU clock division factor */
reg32_t __reserved4[3]; /**< Reserved */
reg32_t I2SBCLKSEL; /**< I2S clock select */
reg32_t GPTCLKDIV; /**< GPT scalar */
reg32_t I2SCLKCTL; /**< I2S clock control */
reg32_t I2SMCLKDIV; /**< MCLK division ratio */
reg32_t I2SBCLKDIV; /**< BCLK division ratio */
reg32_t I2SWCLKDIV; /**< WCLK division ratio */
reg32_t __reserved5[11]; /**< Reserved */
reg32_t SWRESET; /**< SW initiated resets */
reg32_t WARMRESET; /**< WARM reset control and status */
reg32_t __reserved6[6]; /**< Reserved */
reg32_t PDCTL0; /**< power domain control */
reg32_t PDCTL0RFC; /**< RFC power domain control */
reg32_t PDCTL0SERIAL; /**< SERIAL power domain control */
reg32_t PDCTL0PERIPH; /**< PERIPH power domain control */
reg32_t __reserved7; /**< Reserved */
reg32_t PDSTAT0; /**< power domain status */
reg32_t PDSTAT0RFC; /**< RFC power domain status */
reg32_t PDSTAT0SERIAL; /**< SERIAL power domain status */
reg32_t PDSTAT0PERIPH; /**< PERIPH power domain status */
reg32_t __reserved8[11]; /**< Reserved */
reg32_t PDCTL1; /**< power domain control */
reg32_t __reserved9; /**< power domain control */
reg32_t PDCTL1CPU; /**< CPU power domain control */
reg32_t PDCTL1RFC; /**< RFC power domain control */
reg32_t PDCTL1VIMS; /**< VIMS power domain control */
reg32_t __reserved10; /**< Reserved */
reg32_t PDSTAT1; /**< power domain status */
reg32_t PDSTAT1BUS; /**< BUS power domain status */
reg32_t PDSTAT1RFC; /**< RFC power domain status */
reg32_t PDSTAT1CPU; /**< CPU power domain status */
reg32_t PDSTAT1VIMS; /**< VIMS power domain status */
reg32_t __reserved11[10]; /**< Reserved */
reg32_t RFCMODESEL; /**< selected RFC mode */
reg32_t __reserved12[20]; /**< Reserved */
reg32_t RAMRETEN; /**< memory retention control */
reg32_t __reserved13; /**< Reserved */
reg32_t PDRETEN; /**< power domain retention (undocumented) */
reg32_t __reserved14[8]; /**< Reserved */
reg32_t RAMHWOPT; /**< undocumented */
} prcm_regs_t;
/**
* @brief PRCM register values
* @{
*/
#define CLKLOADCTL_LOAD 0x1
#define CLKLOADCTL_LOADDONE 0x2
#define PDCTL0_RFC_ON 0x1
#define PDCTL0_SERIAL_ON 0x2
#define PDCTL0_PERIPH_ON 0x4
#define PDSTAT0_RFC_ON 0x1
#define PDSTAT0_SERIAL_ON 0x2
#define PDSTAT0_PERIPH_ON 0x4
#define PDCTL1_CPU_ON 0x2
#define PDCTL1_RFC_ON 0x4
#define PDCTL1_VIMS_ON 0x8
#define PDSTAT1_CPU_ON 0x2
#define PDSTAT1_RFC_ON 0x4
#define PDSTAT1_VIMS_ON 0x8
#define GPIOCLKGR_CLK_EN 0x1
#define I2CCLKGR_CLK_EN 0x1
#define UARTCLKGR_CLK_EN_UART0 0x1
#define GPIOCLKGS_CLK_EN 0x1
#define I2CCLKGS_CLK_EN 0x1
#define UARTCLKGS_CLK_EN_UART0 0x1
#define GPIOCLKGDS_CLK_EN 0x1
#define I2CCLKGDS_CLK_EN 0x1
#define UARTCLKGDS_CLK_EN_UART0 0x1
/** @} */
/**
* @ingroup cpu_specific_peripheral_memory_map
* @{
*/
#define PRCM_BASE (PERIPH_BASE + 0x82000) /**< PRCM base address */
#define PRCM_BASE_NONBUF (PERIPH_BASE_NONBUF + 0x82000) /**< PRCM base address (nonbuf) */
/** @} */
#define PRCM ((prcm_regs_t *) (PRCM_BASE)) /**< PRCM register bank */
#define PRCM_NONBUF ((prcm_regs_t *) (PRCM_BASE_NONBUF)) /**< PRCM register bank (nonbuf) */
#ifdef __cplusplus
} /* end extern "C" */
#endif
#endif /* CC26X0_CC13X0_PRCM_H */
/** @} */
| aabadie/RIOT | cpu/cc26x0_cc13x0/include/cc26x0_cc13x0_prcm.h | C | lgpl-2.1 | 14,169 |
//===--------------- subtf3_test.c - Test __subtf3 ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file tests __subtf3 for the compiler_rt library.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#if __LDBL_MANT_DIG__ == 113
#include "fp_test.h"
// Returns: a - b
COMPILER_RT_ABI long double __subtf3(long double a, long double b);
int test__subtf3(long double a, long double b,
uint64_t expectedHi, uint64_t expectedLo)
{
long double x = __subtf3(a, b);
int ret = compareResultLD(x, expectedHi, expectedLo);
if (ret){
printf("error in test__subtf3(%.20Lf, %.20Lf) = %.20Lf, "
"expected %.20Lf\n", a, b, x,
fromRep128(expectedHi, expectedLo));
}
return ret;
}
char assumption_1[sizeof(long double) * CHAR_BIT == 128] = {0};
#endif
int main()
{
#if __LDBL_MANT_DIG__ == 113
// qNaN - any = qNaN
if (test__subtf3(makeQNaN128(),
0x1.23456789abcdefp+5L,
UINT64_C(0x7fff800000000000),
UINT64_C(0x0)))
return 1;
// NaN - any = NaN
if (test__subtf3(makeNaN128(UINT64_C(0x800030000000)),
0x1.23456789abcdefp+5L,
UINT64_C(0x7fff800000000000),
UINT64_C(0x0)))
return 1;
// inf - any = inf
if (test__subtf3(makeInf128(),
0x1.23456789abcdefp+5L,
UINT64_C(0x7fff000000000000),
UINT64_C(0x0)))
return 1;
// any - any
if (test__subtf3(0x1.234567829a3bcdef5678ade36734p+5L,
0x1.ee9d7c52354a6936ab8d7654321fp-1L,
UINT64_C(0x40041b8af1915166),
UINT64_C(0xa44a7bca780a166c)))
return 1;
#else
printf("skipped\n");
#endif
return 0;
}
| cd80/UtilizedLLVM | projects/compiler-rt/test/builtins/Unit/subtf3_test.c | C | unlicense | 2,146 |
/**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MR4C_RANDOM_ACCESS_FILE_H__
#define __MR4C_RANDOM_ACCESS_FILE_H__
namespace MR4C {
/**
* File that allows arbitrary movement of the file pointer. Implementations will most likely be local disk files
*/
class RandomAccessFile {
public:
/**
* Read up to the next num bytes into buf, returns the number of bytes read
*/
virtual size_t read(char* buf, size_t num) =0;
/**
* Returns the absolute location of the file pointer, in bytes
*/
virtual size_t getLocation() =0;
/**
* Set absolute location from file start
*/
virtual void setLocation(size_t loc) =0;
/**
* Set location in bytes back from end of file
*/
virtual void setLocationFromEnd(size_t loc) =0;
/**
* Skip num bytes forward from current location
*/
virtual void skipForward(size_t num) =0;
/**
* Skip num bytes backward from current location
*/
virtual void skipBackward(size_t num) =0;
virtual size_t getFileSize() =0;
virtual void close() =0;
virtual bool isClosed() const =0;
};
}
#endif
| sysalexis/mr4c | native/src/cpp/api/dataset/RandomAccessFile.h | C | apache-2.0 | 1,681 |
# Copyright 2011-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
require 'aws/core'
require 'aws/ec2/config'
module AWS
# Provides an expressive, object-oriented interface to Amazon EC2.
#
# ## Credentials
#
# You can setup default credentials for all AWS services via
# AWS.config:
#
# AWS.config(
# :access_key_id => 'YOUR_ACCESS_KEY_ID',
# :secret_access_key => 'YOUR_SECRET_ACCESS_KEY')
#
# Or you can set them directly on the EC2 interface:
#
# ec2 = AWS::EC2.new(
# :access_key_id => 'YOUR_ACCESS_KEY_ID',
# :secret_access_key => 'YOUR_SECRET_ACCESS_KEY')
#
# ## Instances
#
# EC2 uses instances to run your software.
#
# To run an instance:
#
# ec2.instances.create(:image_id => "ami-8c1fece5")
#
# To get an instance by ID:
#
# i = ec2.instances["i-12345678"]
# i.exists?
#
# To get a list of instances:
#
# ec2.instances.inject({}) { |m, i| m[i.id] = i.status; m }
# # => { "i-12345678" => :running, "i-87654321" => :shutting_down }
#
# ## Security Groups
#
# A security group is a named collection of access rules. These access
# rules specify which ingress (i.e., incoming) network traffic should be
# delivered to your instance. All other ingress traffic will be discarded.
#
# To create a security group:
#
# websvr = ec2.security_groups.create('webservers')
#
# Then you can add ingress authorizations. In the following example
# we add a rule that allows web traffic from the entire internet.
#
# # web traffic
# websvr.authorize_ingress(:tcp, 80)
#
# You can also specify a port range. Here we are opening FTP traffic:
#
# # ftp traffic
# websvr.authorize_ingress(:tcp, 20..21)
#
# If you want to limit an authorization to a particular CIDR IP address or
# list of address, just add them to the #authorize_ingress call.
#
# # ssh access
# websrvr.authorize_ingress(:tcp, 22, '1.1.1.1/0', '2.2.2.2/0')
#
# You can also provide another security group instead of CIDR IP addresses.
# This allows incoming traffic from EC2 instances in the given security
# group(s).
#
# # get two existing security groups
# dbsvrs = ec2.security_groups.filter('group-name', 'db-servers').first
# websvrs = ec2.security_groups.filter('group-name', 'web-servers').first
#
# # allow instances in the 'web-servers' security group to connect
# # to instances in the 'db-servers' security group over tcp port 3306
# dbsvrs.authorize_ingress(:tcp, 3306, websvrs)
#
# There are a few handy shortcuts for allowing pings:
#
# wbsvrs.allow_ping
#
# Just like with authorize_ingress you can pass a security group or a list
# of CIDR IP addresses to allow ping to limit where you can ping from.
#
# You can also use the same parameters from the examples above to
# {SecurityGroup#revoke_ingress} and {SecurityGroup#disallow_ping}.
#
# You can specify other protocols than `:tcp`, like :udp and :icmp.
#
# ## Elastic IPs
#
# You can allocate up to 5 elastic IP addresses for each account.
# You can associate those elastic IP addresses with EC2 instances:
#
# instance = ec2.instances['i-12345678']
# ip = ec2.elastic_ips.allocate
#
# instance.ip_address # 1.1.1.1
# ip.ip_address # 2.2.2.2
#
# instance.associate_elastic_ip(ip)
# instance.ip_address # 2.2.2.2
#
# instance.disassociate_elastic_ip
# instance.ip_address # 1.1.1.1
#
# When you are done with an elastic IP address you should release it.
# In the following example we release all elastic IP addresses that are
# not currently associated with an instance:
#
# ec2.elastic_ips.select{|ip| !ip.associated? }.each(&:release)
#
# ## Key Pairs
#
# Public Amazon Machine Image (AMI) instances have no password, and you need a
# public/private key pair to log in to them. The public key half
# of this pair is embedded in your instance, allowing you to use
# the private key to log in securely without a password.
#
# You can generate a key pair yourself and then send the public
# part to EC2 using {KeyPairCollection#import}. For example:
#
# key_pair = ec2.key_pairs.import("mykey", File.read("~/.ssh/identity.pub"))
#
# You can also ask EC2 to generate a key pair for you. For
# example:
#
# key_pair = ec2.key_pairs.create("mykey")
# File.open("~/.ssh/ec2", "wb") do |f|
# f.write(key_pair.private_key)
# end
#
# ## Filtering and Tagging
#
# Any of the collections in the interface may be filtered by a
# number of different parameters. For example, to get all the
# windows images owned by amazon where the description includes
# the string "linux", you can do this:
#
# ec2.images.with_owner("amazon").
# filter("platform", "windows").
# filter("description", "*linux*")
#
# Similarly, you can tag images, instances, security groups,
# snapshots, and volumes with free-form key-value metadata and
# filter on that metadata. For example:
#
# ec2.images["ami-123"].tags << "myapp"
# ec2.images.tagged("myapp") # will include ami-123
#
# ## Regions
#
# Amazon has data centers in different areas of the world (e.g.,
# North America, Europe, Asia, etc.). Correspondingly, EC2 is
# available to use in different Regions. By launching instances in
# separate Regions, you can design your application to be closer
# to specific customers or to meet legal or other
# requirements. Prices for Amazon EC2 usage vary by Region (for
# more information about pricing by Region, go to the
# [Amazon EC2 Pricing page](http://aws.amazon.com/ec2/pricing)).
# You can use the Ruby SDK to see which regions are available for your
# account:
#
# ec2.regions.map(&:name) # => ["us-east-1", ...]
#
# The default region is `us-east-1`; you can access other regions
# like this:
#
# ec2 = AWS::EC2.new(:region => "us-west-1")
# ec2.instances.create(:image_id => 'ami-3bc9997e')
#
# ## Availability Zones
#
# Each Region contains multiple distinct locations called
# Availability Zones. Each Availability Zone is engineered to be
# isolated from failures in other Availability zones and to
# provide inexpensive, low-latency network connectivity to other
# zones in the same Region. By launching instances in separate
# Availability Zones, you can protect your applications from the
# failure of a single location.
#
# You can use the {#availability_zones} collection to get information
# about the available zones available to your account. For
# example:
#
# ec2.availability_zones.map(&:name) # => ["us-east-1a", ...]
#
# ## Images
#
# An Amazon Machine Image (AMI) contains all information necessary
# to boot instances of your software. For example, an AMI might
# contain all the software to act as a web server (e.g., Linux,
# Apache, and your web site) or it might contain all the software
# to act as a Hadoop node (e.g., Linux, Hadoop, and a custom
# application).
#
# You can use the {#images} collection to get information about
# the images available to your account. For example:
#
# ec2.images.with_owner("amazon").map(&:name)
#
# You can also use the images collection to create new images:
#
# ec2.images.create(
# :image_location => "mybucket/manifest.xml",
# :name => "my-image")
#
# @!attribute [r] client
# @return [Client] the low-level EC2 client object
class EC2
autoload :Attachment, 'aws/ec2/attachment'
autoload :AttachmentCollection, 'aws/ec2/attachment_collection'
autoload :AvailabilityZone, 'aws/ec2/availability_zone'
autoload :AvailabilityZoneCollection, 'aws/ec2/availability_zone_collection'
autoload :BlockDeviceMappings, 'aws/ec2/block_device_mappings'
autoload :Client, 'aws/ec2/client'
autoload :Collection, 'aws/ec2/collection'
autoload :CustomerGateway, 'aws/ec2/customer_gateway'
autoload :CustomerGatewayCollection, 'aws/ec2/customer_gateway_collection'
autoload :DHCPOptions, 'aws/ec2/dhcp_options'
autoload :DHCPOptionsCollection, 'aws/ec2/dhcp_options_collection'
autoload :ElasticIp, 'aws/ec2/elastic_ip'
autoload :ElasticIpCollection, 'aws/ec2/elastic_ip_collection'
autoload :Errors, 'aws/ec2/errors'
autoload :ExportTask, 'aws/ec2/export_task'
autoload :ExportTaskCollection, 'aws/ec2/export_task_collection'
autoload :FilteredCollection, 'aws/ec2/filtered_collection'
autoload :HasPermissions, 'aws/ec2/has_permissions'
autoload :Image, 'aws/ec2/image'
autoload :ImageCollection, 'aws/ec2/image_collection'
autoload :Instance, 'aws/ec2/instance'
autoload :InstanceCollection, 'aws/ec2/instance_collection'
autoload :InternetGateway, 'aws/ec2/internet_gateway'
autoload :InternetGatewayCollection, 'aws/ec2/internet_gateway_collection'
autoload :KeyPair, 'aws/ec2/key_pair'
autoload :KeyPairCollection, 'aws/ec2/key_pair_collection'
autoload :NetworkACL, 'aws/ec2/network_acl'
autoload :NetworkACLCollection, 'aws/ec2/network_acl_collection'
autoload :NetworkInterface, 'aws/ec2/network_interface'
autoload :NetworkInterfaceCollection, 'aws/ec2/network_interface_collection'
autoload :PermissionCollection, 'aws/ec2/permission_collection'
autoload :Region, 'aws/ec2/region'
autoload :RegionCollection, 'aws/ec2/region_collection'
autoload :ReservedInstances, 'aws/ec2/reserved_instances'
autoload :ReservedInstancesCollection, 'aws/ec2/reserved_instances_collection'
autoload :ReservedInstancesOffering, 'aws/ec2/reserved_instances_offering'
autoload :ReservedInstancesOfferingCollection, 'aws/ec2/reserved_instances_offering_collection'
autoload :Resource, 'aws/ec2/resource'
autoload :ResourceObject, 'aws/ec2/tag_collection'
autoload :ResourceTagCollection, 'aws/ec2/resource_tag_collection'
autoload :RouteTable, 'aws/ec2/route_table'
autoload :RouteTableCollection, 'aws/ec2/route_table_collection'
autoload :SecurityGroup, 'aws/ec2/security_group'
autoload :SecurityGroupCollection, 'aws/ec2/security_group_collection'
autoload :Snapshot, 'aws/ec2/snapshot'
autoload :SnapshotCollection, 'aws/ec2/snapshot_collection'
autoload :Subnet, 'aws/ec2/subnet'
autoload :SubnetCollection, 'aws/ec2/subnet_collection'
autoload :Tag, 'aws/ec2/tag'
autoload :TagCollection, 'aws/ec2/tag_collection'
autoload :TaggedCollection, 'aws/ec2/tagged_collection'
autoload :TaggedItem, 'aws/ec2/tagged_item'
autoload :Volume, 'aws/ec2/volume'
autoload :VolumeCollection, 'aws/ec2/volume_collection'
autoload :VPC, 'aws/ec2/vpc'
autoload :VPCCollection, 'aws/ec2/vpc_collection'
autoload :VPNConnection, 'aws/ec2/vpn_connection'
autoload :VPNConnectionCollection, 'aws/ec2/vpn_connection_collection'
autoload :VPNGateway, 'aws/ec2/vpn_gateway'
autoload :VPNGatewayCollection, 'aws/ec2/vpn_gateway_collection'
include Core::ServiceInterface
endpoint_prefix 'ec2'
# @return [InstanceCollection] A collection representing all instances
def instances
InstanceCollection.new(:config => config)
end
# @return [SecurityGroupCollection] A collection representing all security
# groups.
def security_groups
SecurityGroupCollection.new(:config => config)
end
# @return [ElasticIpCollection] A collection representing all
# elastic IP addresses for this account.
def elastic_ips
ElasticIpCollection.new(:config => config)
end
# @return [KeyPairCollection] A collection representing all key pairs.
def key_pairs
KeyPairCollection.new(:config => config)
end
# @return [TagCollection] A collection representing all EC2 tags for
# all resource types.
def tags
TagCollection.new(:config => config)
end
# @return [RegionCollection] A collection representing all EC2
# regions.
def regions
RegionCollection.new(:config => config)
end
# @return [AvailabilityZoneCollection] A collection representing
# all EC2 availability zones.
def availability_zones
AvailabilityZoneCollection.new(:config => config)
end
# @return [ImageCollection] A collection representing
# all Amazon Machine Images available to your account.
def images
ImageCollection.new(:config => config)
end
# @return [VolumeCollection] A collection representing
# all EBS volumes available to your account.
def volumes
VolumeCollection.new(:config => config)
end
# @return [ReservedInstancesCollection] A collection representing all
# purchased reserved instance offerings.
def reserved_instances
ReservedInstancesCollection.new(:config => config)
end
# @return [ReservedInstancesOfferingCollection] A collection representing all
# reserved instance offerings that may be purchased.
def reserved_instances_offerings
ReservedInstancesOfferingCollection.new(:config => config)
end
# @return [SnapshotCollection] A collection representing
# all EBS snapshots available to your account.
def snapshots
SnapshotCollection.new(:config => config)
end
# @return [VPCCollection] A collection representing
# all VPCs in your account.
def vpcs
VPCCollection.new(:config => config)
end
# @return [SubnetCollection] Returns a collection that represents all
# of the subnets associated with this account (across all VPCs).
def subnets
SubnetCollection.new(:config => config)
end
# @return [NetworkACLCollection] Returns a collection that represents
# all of the network ACLs for this account.
def network_acls
NetworkACLCollection.new(:config => config)
end
# @return [RouteTableCollection] Returns a collection that represents
# all of the route tables for this account.
def route_tables
RouteTableCollection.new(:config => config)
end
# @return [NetworkInterfaceCollection] Returns a collection that
# represents all of the network interfaces for this account.
def network_interfaces
NetworkInterfaceCollection.new(:config => config)
end
# @return [InternetGatewayCollection] Returns a collection that
# represents all of the internet gateways for this account.
def internet_gateways
InternetGatewayCollection.new(:config => config)
end
# @return [CustomerGatewayCollection] Returns a collection that
# represents all of the customer gateways for this account.
def customer_gateways
CustomerGatewayCollection.new(:config => config)
end
# @return [VPNGatewayCollection] Returns a collection that
# represents all of the vpn gateways for this account.
def vpn_gateways
VPNGatewayCollection.new(:config => config)
end
# @return [DHCPOptionsCollection] Returns a collection that
# represents all of the dhcp options for this account.
def dhcp_options
DHCPOptionsCollection.new(:config => config)
end
# @return [VPNConnections] Returns a collection that
# represents all of vpn connections for this account.
def vpn_connections
VPNConnectionCollection.new(:config => config)
end
# @return [ExportTaskCollection]
def export_tasks
ExportTaskCollection.new(:config => config)
end
end
end
| Coolnesss/Coordinates | vendor/cache/ruby/2.2.0/gems/aws-sdk-v1-1.66.0/lib/aws/ec2.rb | Ruby | apache-2.0 | 16,224 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.support.WriteResponse;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.StatusToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.Locale;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
/**
* A base class for the response of a write operation that involves a single doc
*/
public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContentObject {
private static final String _SHARDS = "_shards";
private static final String _INDEX = "_index";
private static final String _TYPE = "_type";
private static final String _ID = "_id";
private static final String _VERSION = "_version";
private static final String _SEQ_NO = "_seq_no";
private static final String _PRIMARY_TERM = "_primary_term";
private static final String RESULT = "result";
private static final String FORCED_REFRESH = "forced_refresh";
/**
* An enum that represents the results of CRUD operations, primarily used to communicate the type of
* operation that occurred.
*/
public enum Result implements Writeable {
CREATED(0),
UPDATED(1),
DELETED(2),
NOT_FOUND(3),
NOOP(4);
private final byte op;
private final String lowercase;
Result(int op) {
this.op = (byte) op;
this.lowercase = this.name().toLowerCase(Locale.ROOT);
}
public byte getOp() {
return op;
}
public String getLowercase() {
return lowercase;
}
public static Result readFrom(StreamInput in) throws IOException{
Byte opcode = in.readByte();
switch(opcode){
case 0:
return CREATED;
case 1:
return UPDATED;
case 2:
return DELETED;
case 3:
return NOT_FOUND;
case 4:
return NOOP;
default:
throw new IllegalArgumentException("Unknown result code: " + opcode);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte(op);
}
}
private ShardId shardId;
private String id;
private String type;
private long version;
private long seqNo;
private long primaryTerm;
private boolean forcedRefresh;
protected Result result;
public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) {
this.shardId = shardId;
this.type = type;
this.id = id;
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.version = version;
this.result = result;
}
// needed for deserialization
protected DocWriteResponse() {
}
/**
* The change that occurred to the document.
*/
public Result getResult() {
return result;
}
/**
* The index the document was changed in.
*/
public String getIndex() {
return this.shardId.getIndexName();
}
/**
* The exact shard the document was changed in.
*/
public ShardId getShardId() {
return this.shardId;
}
/**
* The type of the document changed.
*/
public String getType() {
return this.type;
}
/**
* The id of the document changed.
*/
public String getId() {
return this.id;
}
/**
* Returns the current version of the doc.
*/
public long getVersion() {
return this.version;
}
/**
* Returns the sequence number assigned for this change. Returns {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if the operation
* wasn't performed (i.e., an update operation that resulted in a NOOP).
*/
public long getSeqNo() {
return seqNo;
}
/**
* The primary term for this change.
*
* @return the primary term
*/
public long getPrimaryTerm() {
return primaryTerm;
}
/**
* Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to
* {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will
* only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
*/
public boolean forcedRefresh() {
return forcedRefresh;
}
@Override
public void setForcedRefresh(boolean forcedRefresh) {
this.forcedRefresh = forcedRefresh;
}
/** returns the rest status for this response (based on {@link ShardInfo#status()} */
@Override
public RestStatus status() {
return getShardInfo().status();
}
/**
* Return the relative URI for the location of the document suitable for use in the {@code Location} header. The use of relative URIs is
* permitted as of HTTP/1.1 (cf. https://tools.ietf.org/html/rfc7231#section-7.1.2).
*
* @param routing custom routing or {@code null} if custom routing is not used
* @return the relative URI for the location of the document
*/
public String getLocation(@Nullable String routing) {
final String encodedIndex;
final String encodedType;
final String encodedId;
final String encodedRouting;
try {
// encode the path components separately otherwise the path separators will be encoded
encodedIndex = URLEncoder.encode(getIndex(), "UTF-8");
encodedType = URLEncoder.encode(getType(), "UTF-8");
encodedId = URLEncoder.encode(getId(), "UTF-8");
encodedRouting = routing == null ? null : URLEncoder.encode(routing, "UTF-8");
} catch (final UnsupportedEncodingException e) {
throw new AssertionError(e);
}
final String routingStart = "?routing=";
final int bufferSizeExcludingRouting = 3 + encodedIndex.length() + encodedType.length() + encodedId.length();
final int bufferSize;
if (encodedRouting == null) {
bufferSize = bufferSizeExcludingRouting;
} else {
bufferSize = bufferSizeExcludingRouting + routingStart.length() + encodedRouting.length();
}
final StringBuilder location = new StringBuilder(bufferSize);
location.append('/').append(encodedIndex);
location.append('/').append(encodedType);
location.append('/').append(encodedId);
if (encodedRouting != null) {
location.append(routingStart).append(encodedRouting);
}
return location.toString();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = ShardId.readShardId(in);
type = in.readString();
id = in.readString();
version = in.readZLong();
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
seqNo = in.readZLong();
primaryTerm = in.readVLong();
} else {
seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
primaryTerm = 0;
}
forcedRefresh = in.readBoolean();
result = Result.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeString(type);
out.writeString(id);
out.writeZLong(version);
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
out.writeZLong(seqNo);
out.writeVLong(primaryTerm);
}
out.writeBoolean(forcedRefresh);
result.writeTo(out);
}
@Override
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
innerToXContent(builder, params);
builder.endObject();
return builder;
}
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
ReplicationResponse.ShardInfo shardInfo = getShardInfo();
builder.field(_INDEX, shardId.getIndexName());
if (params.paramAsBoolean("include_type_name", true)) {
builder.field(_TYPE, type);
}
builder.field(_ID, id)
.field(_VERSION, version)
.field(RESULT, getResult().getLowercase());
if (forcedRefresh) {
builder.field(FORCED_REFRESH, true);
}
builder.field(_SHARDS, shardInfo);
if (getSeqNo() >= 0) {
builder.field(_SEQ_NO, getSeqNo());
builder.field(_PRIMARY_TERM, getPrimaryTerm());
}
return builder;
}
/**
* Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method.
*
* This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning
* {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly
* if needed and then immediately returns.
*/
protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException {
XContentParser.Token token = parser.currentToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
if (_INDEX.equals(currentFieldName)) {
// index uuid and shard id are unknown and can't be parsed back for now.
context.setShardId(new ShardId(new Index(parser.text(), IndexMetaData.INDEX_UUID_NA_VALUE), -1));
} else if (_TYPE.equals(currentFieldName)) {
context.setType(parser.text());
} else if (_ID.equals(currentFieldName)) {
context.setId(parser.text());
} else if (_VERSION.equals(currentFieldName)) {
context.setVersion(parser.longValue());
} else if (RESULT.equals(currentFieldName)) {
String result = parser.text();
for (Result r : Result.values()) {
if (r.getLowercase().equals(result)) {
context.setResult(r);
break;
}
}
} else if (FORCED_REFRESH.equals(currentFieldName)) {
context.setForcedRefresh(parser.booleanValue());
} else if (_SEQ_NO.equals(currentFieldName)) {
context.setSeqNo(parser.longValue());
} else if (_PRIMARY_TERM.equals(currentFieldName)) {
context.setPrimaryTerm(parser.longValue());
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (_SHARDS.equals(currentFieldName)) {
context.setShardInfo(ShardInfo.fromXContent(parser));
} else {
parser.skipChildren(); // skip potential inner objects for forward compatibility
}
} else if (token == XContentParser.Token.START_ARRAY) {
parser.skipChildren(); // skip potential inner arrays for forward compatibility
}
}
/**
* Base class of all {@link DocWriteResponse} builders. These {@link DocWriteResponse.Builder} are used during
* xcontent parsing to temporarily store the parsed values, then the {@link Builder#build()} method is called to
* instantiate the appropriate {@link DocWriteResponse} with the parsed values.
*/
public abstract static class Builder {
protected ShardId shardId = null;
protected String type = null;
protected String id = null;
protected Long version = null;
protected Result result = null;
protected boolean forcedRefresh;
protected ShardInfo shardInfo = null;
protected Long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
protected Long primaryTerm = 0L;
public ShardId getShardId() {
return shardId;
}
public void setShardId(ShardId shardId) {
this.shardId = shardId;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public void setVersion(Long version) {
this.version = version;
}
public void setResult(Result result) {
this.result = result;
}
public void setForcedRefresh(boolean forcedRefresh) {
this.forcedRefresh = forcedRefresh;
}
public void setShardInfo(ShardInfo shardInfo) {
this.shardInfo = shardInfo;
}
public void setSeqNo(Long seqNo) {
this.seqNo = seqNo;
}
public void setPrimaryTerm(Long primaryTerm) {
this.primaryTerm = primaryTerm;
}
public abstract DocWriteResponse build();
}
}
| gfyoung/elasticsearch | server/src/main/java/org/elasticsearch/action/DocWriteResponse.java | Java | apache-2.0 | 15,164 |
// ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using Microsoft.Azure.Batch;
using System.Collections.Generic;
namespace Microsoft.Azure.Commands.Batch.Models
{
public class ListTaskFileOptions : TaskOperationParameters
{
public ListTaskFileOptions(BatchAccountContext context, string workItemName, string jobName, string taskName, PSCloudTask task,
IEnumerable<BatchClientBehavior> additionalBehaviors = null) : base(context, workItemName, jobName, taskName, task, additionalBehaviors)
{ }
/// <summary>
/// If specified, the single Task file with this name will be returned
/// </summary>
public string TaskFileName { get; set; }
/// <summary>
/// The OData filter to use when querying for Task files
/// </summary>
public string Filter { get; set; }
/// <summary>
/// The maximum number of Task files to return
/// </summary>
public int MaxCount { get; set; }
/// <summary>
/// If true, performs a recursive list of all files of the task. If false, returns only the files at the task directory root.
/// </summary>
public bool Recursive { get; set; }
}
}
| kagamsft/azure-powershell | src/ResourceManager/Batch/Commands.Batch/Models/ListTaskFileOptions.cs | C# | apache-2.0 | 1,943 |
/*
* Copyright (c) 2005 - 2014, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.wso2.carbon.event.receiver.ui;
public final class EventReceiverUIConstants {
public static final String STREAM_VERSION_DELIMITER = ":";
private EventReceiverUIConstants() {
}
public static final String STRING_LITERAL_ENABLE = "enable";
public static final String PROPERTY_META_PREFIX = "meta_";
public static final String PROPERTY_CORRELATION_PREFIX = "correlation_";
}
| keizer619/carbon-analytics-common | components/event-receiver/org.wso2.carbon.event.receiver.ui/src/main/java/org/wso2/carbon/event/receiver/ui/EventReceiverUIConstants.java | Java | apache-2.0 | 1,054 |
/* mbed Microcontroller Library
*******************************************************************************
* Copyright (c) 2018, STMicroelectronics
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
*/
#include "PeripheralPins.h"
#include "mbed_toolchain.h"
//==============================================================================
// Notes
//
// - The pins mentioned Px_y_ALTz are alternative possibilities which use other
// HW peripheral instances. You can use them the same way as any other "normal"
// pin (i.e. PwmOut pwm(PA_7_ALT0);). These pins are not displayed on the board
// pinout image on mbed.org.
//
// - The pins which are connected to other components present on the board have
// the comment "Connected to xxx". The pin function may not work properly in this
// case. These pins may not be displayed on the board pinout image on mbed.org.
// Please read the board reference manual and schematic for more information.
//
// - Warning: pins connected to the default STDIO_UART_TX and STDIO_UART_RX pins are commented
// See https://os.mbed.com/teams/ST/wiki/STDIO for more information.
//
//==============================================================================
//*** ADC ***
MBED_WEAK const PinMap PinMap_ADC[] = {
{PA_0, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 0, 0)}, // ADC_IN0
{PA_1, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 1, 0)}, // ADC_IN1
// {PA_2, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 2, 0)}, // ADC_IN2 // Connected to STDIO_UART_TX
// {PA_3, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 3, 0)}, // ADC_IN3 // Connected to STDIO_UART_RX
{PA_4, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 4, 0)}, // ADC_IN4
{PA_5, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 5, 0)}, // ADC_IN5 // Connected to LD2 [Green Led]
{PA_6, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 6, 0)}, // ADC_IN6
{PA_7, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 7, 0)}, // ADC_IN7
{PB_0, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 8, 0)}, // ADC_IN8
{PB_1, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 9, 0)}, // ADC_IN9
{PB_2, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 0, 0)}, // ADC_IN0b
{PB_12, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 18, 0)}, // ADC_IN18
{PB_13, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 19, 0)}, // ADC_IN19
{PB_14, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 20, 0)}, // ADC_IN20
{PB_15, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 21, 0)}, // ADC_IN21
{PC_0, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 10, 0)}, // ADC_IN10
{PC_1, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 11, 0)}, // ADC_IN11
{PC_2, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 12, 0)}, // ADC_IN12
{PC_3, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 13, 0)}, // ADC_IN13
{PC_4, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 14, 0)}, // ADC_IN14
{PC_5, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 15, 0)}, // ADC_IN15
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_ADC_Internal[] = {
{ADC_TEMP, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 16, 0)},
{ADC_VREF, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 17, 0)},
{NC, NC, 0}
};
//*** DAC ***
MBED_WEAK const PinMap PinMap_DAC[] = {
{PA_4, DAC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 1, 0)}, // DAC_OUT1
{PA_5, DAC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 2, 0)}, // DAC_OUT2 // Connected to LD2 [Green Led]
{NC, NC, 0}
};
//*** I2C ***
MBED_WEAK const PinMap PinMap_I2C_SDA[] = {
{PB_7, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_9, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_11, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C2)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_I2C_SCL[] = {
{PB_6, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_8, I2C_1, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C1)},
{PB_10, I2C_2, STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, GPIO_AF4_I2C2)},
{NC, NC, 0}
};
//*** PWM ***
// TIM5 cannot be used because already used by the us_ticker
MBED_WEAK const PinMap PinMap_PWM[] = {
{PA_0, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 1, 0)}, // TIM2_CH1
// {PA_0, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM5, 1, 0)}, // TIM5_CH1
{PA_1, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 2, 0)}, // TIM2_CH2
// {PA_1, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM5, 2, 0)}, // TIM5_CH2
// {PA_2, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 3, 0)}, // TIM2_CH3 // Connected to STDIO_UART_TX
// {PA_2, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM5, 3, 0)}, // TIM5_CH3 // Connected to STDIO_UART_TX
// {PA_2, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM9, 1, 0)}, // TIM9_CH1 // Connected to STDIO_UART_TX
// {PA_3, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 4, 0)}, // TIM2_CH4 // Connected to STDIO_UART_RX
// {PA_3, PWM_5, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM5, 4, 0)}, // TIM5_CH4 // Connected to STDIO_UART_RX
// {PA_3, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM9, 2, 0)}, // TIM9_CH2 // Connected to STDIO_UART_RX
{PA_5, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 1, 0)}, // TIM2_CH1 // Connected to LD2 [Green Led]
{PA_6, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 1, 0)}, // TIM3_CH1
{PA_6_ALT0, PWM_10, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM10, 1, 0)}, // TIM10_CH1
{PA_7, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 2, 0)}, // TIM3_CH2
{PA_7_ALT0, PWM_11, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM11, 1, 0)}, // TIM11_CH1
{PA_15, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 1, 0)}, // TIM2_CH1
{PB_0, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 3, 0)}, // TIM3_CH3
{PB_1, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 4, 0)}, // TIM3_CH4
{PB_3, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 2, 0)}, // TIM2_CH2 // Connected to SWO
{PB_4, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 1, 0)}, // TIM3_CH1
{PB_5, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 2, 0)}, // TIM3_CH2
{PB_6, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM4, 1, 0)}, // TIM4_CH1
{PB_7, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM4, 2, 0)}, // TIM4_CH2
{PB_8, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM4, 3, 0)}, // TIM4_CH3
{PB_8_ALT0, PWM_10, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM10, 1, 0)}, // TIM10_CH1
{PB_9, PWM_4, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM4, 4, 0)}, // TIM4_CH4
{PB_9_ALT0, PWM_11, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM11, 1, 0)}, // TIM11_CH1
{PB_10, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 3, 0)}, // TIM2_CH3
{PB_11, PWM_2, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF1_TIM2, 4, 0)}, // TIM2_CH4
{PB_12, PWM_10, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM10, 1, 0)}, // TIM10_CH1
{PB_13, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM9, 1, 0)}, // TIM9_CH1
{PB_14, PWM_9, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM9, 2, 0)}, // TIM9_CH2
{PB_15, PWM_11, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF3_TIM11, 1, 0)}, // TIM11_CH1
{PC_6, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 1, 0)}, // TIM3_CH1
{PC_7, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 2, 0)}, // TIM3_CH2
{PC_8, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 3, 0)}, // TIM3_CH3
{PC_9, PWM_3, STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF2_TIM3, 4, 0)}, // TIM3_CH4
{NC, NC, 0}
};
//*** SERIAL ***
MBED_WEAK const PinMap PinMap_UART_TX[] = {
{PA_2, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)}, // Connected to STDIO_UART_TX
{PA_9, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_6, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_10, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PC_10, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PC_10_ALT0, UART_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_UART4)},
{PC_12, UART_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_UART5)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_UART_RX[] = {
{PA_3, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)}, // Connected to STDIO_UART_RX
{PA_10, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_7, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_11, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PC_11, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{PC_11_ALT0, UART_4, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_UART4)},
{PD_2, UART_5, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF8_UART5)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_UART_RTS[] = {
{PA_1, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PA_12, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_14, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_UART_CTS[] = {
{PA_0, UART_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART2)},
{PA_11, UART_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART1)},
{PB_13, UART_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, GPIO_AF7_USART3)},
{NC, NC, 0}
};
//*** SPI ***
MBED_WEAK const PinMap PinMap_SPI_MOSI[] = {
{PA_7, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_12, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_5, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_5_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PB_15, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_12, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_SPI_MISO[] = {
{PA_6, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_11, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_4, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PB_4_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PB_14, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_11, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_SPI_SCLK[] = {
{PA_5, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)}, // Connected to LD2 [Green Led]
{PB_3, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)}, // Connected to SWO
{PB_3_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)}, // Connected to SWO
{PB_13, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{PC_10, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{NC, NC, 0}
};
MBED_WEAK const PinMap PinMap_SPI_SSEL[] = {
{PA_4, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_4_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PA_15, SPI_1, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI1)},
{PA_15_ALT0, SPI_3, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF6_SPI3)},
{PB_12, SPI_2, STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, GPIO_AF5_SPI2)},
{NC, NC, 0}
};
//*** USBDEVICE ***
MBED_WEAK const PinMap PinMap_USB_FS[] = {
{PA_11, USB_FS, STM_PIN_DATA(STM_MODE_INPUT, GPIO_NOPULL, GPIO_AF_NONE)}, // USB_DM
{PA_12, USB_FS, STM_PIN_DATA(STM_MODE_INPUT, GPIO_NOPULL, GPIO_AF_NONE)}, // USB_DP
{NC, NC, 0}
};
| mbedmicro/mbed | targets/TARGET_STM/TARGET_STM32L1/TARGET_NUCLEO_L152RE/PeripheralPins.c | C | apache-2.0 | 14,912 |
# Print
Adds a button to the toolbar for printing the table in a predefined configurable format.
## Usage
```html
<script src="extensions/print/bootstrap-table-print.js"></script>
```
## Options
### showPrint
* type: Boolean
* description: Set true to show the Print button on the toolbar.
* default: `false`
### printSortColumn
* type: String
* description: set column field name to sort by for the printed table
* default: `undefined`
### printSortOrder
* type: String
* description: Valid values: 'asc', 'desc'. Relevant only if printSortColumn is set
* default: `'asc'`
### printPageBuilder
* type: Function
* description: Receive html `<table>` element as string parameter, returns html string for printing. Used for styling and adding header or footer.
* default: `function(table){return printPageBuilderDefault(table)}`
## Column options
### printFilter
* type: String
* description: set value to filter the printed data by this column.
* default: `undefined`
### printIgnore
* type: Boolean
* description: set true to hide this column in the printed page.
* default: `false`
## Icons
* print: `'glyphicon-print icon-share'`
| epam-debrecen-rft-2015/atsy | web/src/main/webapp/resources/thirdparty/bootstrap-table/extensions/print/README.md | Markdown | apache-2.0 | 1,155 |
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" href="../includes/main.css" type="text/css">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<title>Apache CloudStack | The Power Behind Your Cloud</title>
</head>
<body>
<div id="insidetopbg">
<div id="inside_wrapper">
<div class="uppermenu_panel">
<div class="uppermenu_box"></div>
</div>
<div id="main_master">
<div id="inside_header">
<div class="header_top">
<a class="cloud_logo" href="http://cloudstack.org"></a>
<div class="mainemenu_panel"></div>
</div>
</div>
<div id="main_content">
<div class="inside_apileftpanel">
<div class="inside_contentpanel" style="width:930px;">
<div class="api_titlebox">
<div class="api_titlebox_left">
<span>
Apache CloudStack v4.5.0 Root Admin API Reference
</span>
<p></p>
<h1>registerSSHKeyPair</h1>
<p>Register a public key in a keypair under a certain name</p>
</div>
<div class="api_titlebox_right">
<a class="api_backbutton" href="../TOC_Root_Admin.html"></a>
</div>
</div>
<div class="api_tablepanel">
<h2>Request parameters</h2>
<table class="apitable">
<tr class="hed">
<td style="width:200px;"><strong>Parameter Name</strong></td><td style="width:500px;">Description</td><td style="width:180px;">Required</td>
</tr>
<tr>
<td style="width:200px;"><strong>name</strong></td><td style="width:500px;"><strong>Name of the keypair</strong></td><td style="width:180px;"><strong>true</strong></td>
</tr>
<tr>
<td style="width:200px;"><strong>publickey</strong></td><td style="width:500px;"><strong>Public key material of the keypair</strong></td><td style="width:180px;"><strong>true</strong></td>
</tr>
<tr>
<td style="width:200px;"><i>account</i></td><td style="width:500px;"><i>an optional account for the ssh key. Must be used with domainId.</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>domainid</i></td><td style="width:500px;"><i>an optional domainId for the ssh key. If the account parameter is used, domainId must also be used.</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>projectid</i></td><td style="width:500px;"><i>an optional project for the ssh key</i></td><td style="width:180px;"><i>false</i></td>
</tr>
</table>
</div>
<div class="api_tablepanel">
<h2>Response Tags</h2>
<table class="apitable">
<tr class="hed">
<td style="width:200px;"><strong>Response Name</strong></td><td style="width:500px;">Description</td>
</tr>
<tr>
<td style="width:200px;"><strong>fingerprint</strong></td><td style="width:500px;">Fingerprint of the public key</td>
</tr>
<tr>
<td style="width:200px;"><strong>name</strong></td><td style="width:500px;">Name of the keypair</td>
</tr>
</table>
</div>
</div>
</div>
</div>
</div>
<div id="footer">
<div id="comments_thread">
<script type="text/javascript" src="https://comments.apache.org/show_comments.lua?site=test" async="true"></script>
<noscript>
<iframe width="930" height="500" src="https://comments.apache.org/iframe.lua?site=test&page=4.2.0/rootadmin"></iframe>
</noscript>
</div>
<div id="footer_mainmaster">
<p>Copyright © 2015 The Apache Software Foundation, Licensed under the
<a href="http://www.apache.org/licenses/LICENSE-2.0">Apache License, Version 2.0.</a>
<br>
Apache, CloudStack, Apache CloudStack, the Apache CloudStack logo, the CloudMonkey logo and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
</div>
</div>
</div>
</div>
</body>
</html>
| resmo/cloudstack-www | source/api/apidocs-4.5/root_admin/registerSSHKeyPair.html | HTML | apache-2.0 | 3,727 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.encryption;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.internal.IgniteEx;
import org.jetbrains.annotations.NotNull;
import org.junit.Test;
/** */
public class EncryptedCachePreconfiguredRestartTest extends EncryptedCacheRestartTest {
/** */
private boolean differentCachesOnNodes;
/** {@inheritDoc} */
@Override protected void afterTestsStopped() throws Exception {
stopAllGrids();
cleanPersistenceDir();
}
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
cleanPersistenceDir();
}
/** @throws Exception If failed. */
@Test
public void testDifferentPreconfiguredCachesOnNodes() throws Exception {
differentCachesOnNodes = true;
super.testCreateEncryptedCache();
}
/** {@inheritDoc} */
@Test
@Override public void testCreateEncryptedCache() throws Exception {
differentCachesOnNodes = false;
super.testCreateEncryptedCache();
}
/** {@inheritDoc} */
@Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
String cacheName = ENCRYPTED_CACHE + (differentCachesOnNodes ? "." + igniteInstanceName : "");
CacheConfiguration ccfg = new CacheConfiguration(cacheName)
.setEncryptionEnabled(true);
cfg.setCacheConfiguration(ccfg);
return cfg;
}
/**
* @return Cache name.
*/
@NotNull @Override protected String cacheName() {
return ENCRYPTED_CACHE + (differentCachesOnNodes ? "." + GRID_1 : "");
}
/**
* Creates encrypted cache.
*/
@Override protected void createEncryptedCache(IgniteEx grid0, IgniteEx grid1, String cacheName, String groupName) {
IgniteCache<Long, String> cache = grid0.cache(cacheName());
for (long i = 0; i < 100; i++)
cache.put(i, "" + i);
}
}
| samaitra/ignite | modules/core/src/test/java/org/apache/ignite/internal/encryption/EncryptedCachePreconfiguredRestartTest.java | Java | apache-2.0 | 2,966 |
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::CommentStyle::*;
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::is_block_doc_comment;
use parse::lexer::{StringReader, TokenAndSpan};
use parse::lexer::{is_whitespace, Reader};
use parse::lexer;
use print::pprust;
use str::char_at;
use std::io::Read;
use std::usize;
#[derive(Clone, Copy, PartialEq)]
pub enum CommentStyle {
/// No code on either side of each line of the comment
Isolated,
/// Code exists to the left of the comment
Trailing,
/// Code before /* foo */ and after the comment
Mixed,
/// Just a manual blank line "\n\n", for layout
BlankLine,
}
#[derive(Clone)]
pub struct Comment {
pub style: CommentStyle,
pub lines: Vec<String>,
pub pos: BytePos,
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") && super::is_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") && is_block_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> String {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: Vec<String>) -> Vec<String> {
let mut i = 0;
let mut j = lines.len();
// first line of all-stars should be omitted
if !lines.is_empty() &&
lines[0].chars().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1]
.chars()
.skip(1)
.all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].trim().is_empty() {
j -= 1;
}
lines[i..j].iter().cloned().collect()
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = usize::MAX;
let mut can_trim = true;
let mut first = true;
for line in &lines {
for (j, c) in line.chars().enumerate() {
if j > i || !"* \t".contains(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i != j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if !can_trim {
break;
}
}
if can_trim {
lines.iter().map(|line| {
(&line[i + 1..line.len()]).to_string()
}).collect()
} else {
lines
}
}
// one-line comments lose their prefix
const ONELINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONELINERS {
if comment.starts_with(*prefix) {
return (&comment[prefix.len()..]).to_string();
}
}
if comment.starts_with("/*") {
let lines = comment[3..comment.len() - 2]
.lines_any()
.map(|s| s.to_string())
.collect::<Vec<String> >();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.join("\n");
}
panic!("not a doc-comment: {}", comment);
}
fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
debug!(">>> blank-line comment");
comments.push(Comment {
style: BlankLine,
lines: Vec::new(),
pos: rdr.last_pos,
});
}
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader,
comments: &mut Vec<Comment>) {
while is_whitespace(rdr.curr) && !rdr.is_eof() {
if rdr.col == CharPos(0) && rdr.curr_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
rdr.bump();
}
}
fn read_shebang_comment(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> shebang comment");
let p = rdr.last_pos;
debug!("<<< shebang comment");
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: vec!(rdr.read_one_line_comment()),
pos: p
});
}
fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> line comments");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
while rdr.curr_is('/') && rdr.nextch_is('/') {
let line = rdr.read_one_line_comment();
debug!("{}", line);
// Doc comments are not put in comments.
if is_doc_comment(&line[..]) {
break;
}
lines.push(line);
rdr.consume_non_eol_whitespace();
}
debug!("<<< line comments");
if !lines.is_empty() {
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: lines,
pos: p
});
}
}
/// Returns None if the first col chars of s contain a non-whitespace char.
/// Otherwise returns Some(k) where k is first char offset after that leading
/// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<usize> {
let len = s.len();
let mut col = col.to_usize();
let mut cursor: usize = 0;
while col > 0 && cursor < len {
let ch = char_at(s, cursor);
if !ch.is_whitespace() {
return None;
}
cursor += ch.len_utf8();
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut Vec<String> ,
s: String, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(&s[..], col) {
Some(col) => {
if col < len {
(&s[col..len]).to_string()
} else {
"".to_string()
}
}
None => s,
};
debug!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> block comment");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
let col = rdr.col;
rdr.bump();
rdr.bump();
let mut curr_line = String::from("/*");
// doc-comments are not really comments, they are attributes
if (rdr.curr_is('*') && !rdr.nextch_is('*')) || rdr.curr_is('!') {
while !(rdr.curr_is('*') && rdr.nextch_is('/')) && !rdr.is_eof() {
curr_line.push(rdr.curr.unwrap());
rdr.bump();
}
if !rdr.is_eof() {
curr_line.push_str("*/");
rdr.bump();
rdr.bump();
}
if is_block_doc_comment(&curr_line[..]) {
return
}
assert!(!curr_line.contains('\n'));
lines.push(curr_line);
} else {
let mut level: isize = 1;
while level > 0 {
debug!("=== block comment level {}", level);
if rdr.is_eof() {
rdr.fatal("unterminated block comment");
}
if rdr.curr_is('\n') {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
curr_line = String::new();
rdr.bump();
} else {
curr_line.push(rdr.curr.unwrap());
if rdr.curr_is('/') && rdr.nextch_is('*') {
rdr.bump();
rdr.bump();
curr_line.push('*');
level += 1;
} else {
if rdr.curr_is('*') && rdr.nextch_is('/') {
rdr.bump();
rdr.bump();
curr_line.push('/');
level -= 1;
} else { rdr.bump(); }
}
}
}
if !curr_line.is_empty() {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
}
}
let mut style = if code_to_the_left { Trailing } else { Isolated };
rdr.consume_non_eol_whitespace();
if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1 {
style = Mixed;
}
debug!("<<< block comment");
comments.push(Comment {style: style, lines: lines, pos: p});
}
fn consume_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> consume comment");
if rdr.curr_is('/') && rdr.nextch_is('/') {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr_is('/') && rdr.nextch_is('*') {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr_is('#') && rdr.nextch_is('!') {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { panic!(); }
debug!("<<< consume comment");
}
#[derive(Clone)]
pub struct Literal {
pub lit: String,
pub pos: BytePos,
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic: &diagnostic::SpanHandler,
path: String,
srdr: &mut Read)
-> (Vec<Comment>, Vec<Literal>) {
let mut src = Vec::new();
srdr.read_to_end(&mut src).unwrap();
let src = String::from_utf8(src).unwrap();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let mut rdr = lexer::StringReader::new_raw(span_diagnostic, filemap);
let mut comments: Vec<Comment> = Vec::new();
let mut literals: Vec<Literal> = Vec::new();
let mut first_read: bool = true;
while !rdr.is_eof() {
loop {
let mut code_to_the_left = !first_read;
rdr.consume_non_eol_whitespace();
if rdr.curr_is('\n') {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
while rdr.peeking_at_comment() {
consume_comment(&mut rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan { tok, sp } = rdr.peek();
if tok.is_lit() {
rdr.with_str_from(bstart, |s| {
debug!("tok lit: {}", s);
literals.push(Literal {lit: s.to_string(), pos: sp.lo});
})
} else {
debug!("tok: {}", pprust::token_to_string(&tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod tests {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test \n* Test\n Test");
}
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test\n Test");
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *i32;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " let a: *i32;\n *a = 5;");
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " test");
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, " test");
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, " test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test");
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, "test");
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, "test");
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, "test");
}
}
| vhbit/rust | src/libsyntax/parse/lexer/comments.rs | Rust | apache-2.0 | 13,857 |
//// [parser509698.ts]
/// <style requireSemi="on" />
/// <reference no-default-lib="true"/>
declare function foo(): void;
declare function bar(): void;
//// [parser509698.js]
| RReverser/TSX | tests/baselines/reference/parser509698.js | JavaScript | apache-2.0 | 182 |
package daemon
import (
derr "github.com/docker/docker/api/errors"
)
// ContainerUnpause unpauses a container
func (daemon *Daemon) ContainerUnpause(name string) error {
container, err := daemon.Get(name)
if err != nil {
return err
}
if err := container.unpause(); err != nil {
return derr.ErrorCodeCantUnpause.WithArgs(name, err)
}
return nil
}
| dongjiaqiang/docker | daemon/unpause.go | GO | apache-2.0 | 361 |
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package org.openjdk.jmh.samples
import org.openjdk.jmh.annotations._
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
@State(Scope.Group)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
class JMHSample_15_Asymmetric {
/*
* So far all the tests were symmetric: the same code was executed in all the threads.
* At times, you need the asymmetric test. JMH provides this with the notion of @Group,
* which can bind several methods together, and all the threads are distributed among
* the test methods.
*
* Each execution group contains of one or more threads. Each thread within a particular
* execution group executes one of @Group-annotated @Benchmark methods. Multiple execution
* groups may participate in the run. The total thread count in the run is rounded to the
* execution group size, which will only allow the full execution groups.
*
* Note that two state scopes: Scope.Benchmark and Scope.Thread are not covering all
* the use cases here -- you either share everything in the state, or share nothing.
* To break this, we have the middle ground Scope.Group, which marks the state to be
* shared within the execution group, but not among the execution groups.
*
* Putting this all together, the example below means:
* a) define the execution group "g", with 3 threads executing inc(), and 1 thread
* executing get(), 4 threads per group in total;
* b) if we run this test case with 4 threads, then we will have a single execution
* group. Generally, running with 4*N threads will create N execution groups, etc.;
* c) each execution group has one @State instance to share: that is, execution groups
* share the counter within the group, but not across the groups.
*/
private var counter: AtomicInteger = _
@Setup
def up {
counter = new AtomicInteger
}
@Benchmark
@Group("g")
@GroupThreads(3)
def inc: Int = counter.incrementAndGet
@Benchmark
@Group("g")
@GroupThreads(1)
def get: Int = counter.get
}
| udl/sbt-jmh | src/sbt-test/sbt-jmh/run/src/main/scala/org/openjdk/jmh/samples/JMHSample_15_Asymmetric.scala | Scala | apache-2.0 | 3,311 |
(function() {
define(function() {
var AccessibleViewItemView, AccessibleViewsView;
AccessibleViewItemView = Backbone.View.extend({
tagName: 'div',
events: {
"click": "clicked",
"mouseover": "mousedover",
"mouseout": "mousedout"
},
render: function() {
this.$el.empty().append("<a href=\"#\" title=\"" + (this.model.getShelleySelector()) + "\">\n <span class=\"viewClass\">" + (this.model.get('class')) + "</span>\n with label\n \"<span class=\"viewLabel\">" + (this.model.get('accessibilityLabel')) + "</span>\"\n</a>");
return this;
},
mousedover: function() {
return this.model.setActive();
},
mousedout: function() {
return this.model.unsetActive();
},
clicked: function() {
return this.model.trigger('accessible-selected', this.model);
}
});
AccessibleViewsView = Backbone.View.extend({
el: $('#accessible-views'),
initialize: function() {
this.collection = new Backbone.Collection;
return this.collection.on('reset', _.bind(this.render, this));
},
render: function() {
var _this = this;
this.$el.empty();
this.collection.each(function(viewModel) {
return _this.$el.append(new AccessibleViewItemView({
model: viewModel
}).render().el);
});
return this;
}
});
return AccessibleViewsView;
});
}).call(this);
| Weeman360/frank-cucumber-pitchin | gem/Frank/frank_static_resources.bundle/js/accessible_views_view.js | JavaScript | apache-2.0 | 1,491 |
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" href="../includes/main.css" type="text/css">
<link rel="shortcut icon" href="../favicon.ico" type="image/x-icon">
<title>Apache CloudStack | The Power Behind Your Cloud</title>
</head>
<body>
<div id="insidetopbg">
<div id="inside_wrapper">
<div class="uppermenu_panel">
<div class="uppermenu_box"></div>
</div>
<div id="main_master">
<div id="inside_header">
<div class="header_top">
<a class="cloud_logo" href="http://cloudstack.org"></a>
<div class="mainemenu_panel"></div>
</div>
</div>
<div id="main_content">
<div class="inside_apileftpanel">
<div class="inside_contentpanel" style="width:930px;">
<div class="api_titlebox">
<div class="api_titlebox_left">
<span>
Apache CloudStack v4.4.1 User API Reference
</span>
<p></p>
<h1>createInstanceGroup</h1>
<p>Creates a vm group</p>
</div>
<div class="api_titlebox_right">
<a class="api_backbutton" href="../TOC_User.html"></a>
</div>
</div>
<div class="api_tablepanel">
<h2>Request parameters</h2>
<table class="apitable">
<tr class="hed">
<td style="width:200px;"><strong>Parameter Name</strong></td><td style="width:500px;">Description</td><td style="width:180px;">Required</td>
</tr>
<tr>
<td style="width:200px;"><strong>name</strong></td><td style="width:500px;"><strong>the name of the instance group</strong></td><td style="width:180px;"><strong>true</strong></td>
</tr>
<tr>
<td style="width:200px;"><i>account</i></td><td style="width:500px;"><i>the account of the instance group. The account parameter must be used with the domainId parameter.</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>domainid</i></td><td style="width:500px;"><i>the domain ID of account owning the instance group</i></td><td style="width:180px;"><i>false</i></td>
</tr>
<tr>
<td style="width:200px;"><i>projectid</i></td><td style="width:500px;"><i>The project of the instance group</i></td><td style="width:180px;"><i>false</i></td>
</tr>
</table>
</div>
<div class="api_tablepanel">
<h2>Response Tags</h2>
<table class="apitable">
<tr class="hed">
<td style="width:200px;"><strong>Response Name</strong></td><td style="width:500px;">Description</td>
</tr>
<tr>
<td style="width:200px;"><strong>id</strong></td><td style="width:500px;">the id of the instance group</td>
</tr>
<tr>
<td style="width:200px;"><strong>account</strong></td><td style="width:500px;">the account owning the instance group</td>
</tr>
<tr>
<td style="width:200px;"><strong>created</strong></td><td style="width:500px;">time and date the instance group was created</td>
</tr>
<tr>
<td style="width:200px;"><strong>domain</strong></td><td style="width:500px;">the domain name of the instance group</td>
</tr>
<tr>
<td style="width:200px;"><strong>domainid</strong></td><td style="width:500px;">the domain ID of the instance group</td>
</tr>
<tr>
<td style="width:200px;"><strong>name</strong></td><td style="width:500px;">the name of the instance group</td>
</tr>
<tr>
<td style="width:200px;"><strong>project</strong></td><td style="width:500px;">the project name of the group</td>
</tr>
<tr>
<td style="width:200px;"><strong>projectid</strong></td><td style="width:500px;">the project id of the group</td>
</tr>
</table>
</div>
</div>
</div>
</div>
</div>
<div id="footer">
<div id="footer_mainmaster">
<p>Copyright © 2014 The Apache Software Foundation, Licensed under the
<a href="http://www.apache.org/licenses/LICENSE-2.0">Apache License, Version 2.0.</a>
<br>
Apache, CloudStack, Apache CloudStack, the Apache CloudStack logo, the CloudMonkey logo and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
</div>
</div>
</div>
</div>
</body>
</html>
| resmo/cloudstack-www | source/api/apidocs-4.4/user/createInstanceGroup.html | HTML | apache-2.0 | 3,965 |
/**
* ueditor完整配置项
* 可以在这里配置整个编辑器的特性
*/
/**************************提示********************************
* 所有被注释的配置项均为UEditor默认值。
* 修改默认配置请首先确保已经完全明确该参数的真实用途。
* 主要有两种修改方案,一种是取消此处注释,然后修改成对应参数;另一种是在实例化编辑器时传入对应参数。
* 当升级编辑器时,可直接使用旧版配置文件替换新版配置文件,不用担心旧版配置文件中因缺少新功能所需的参数而导致脚本报错。
**************************提示********************************/
(function () {
/**
* 编辑器资源文件根路径。它所表示的含义是:以编辑器实例化页面为当前路径,指向编辑器资源文件(即dialog等文件夹)的路径。
* 鉴于很多同学在使用编辑器的时候出现的种种路径问题,此处强烈建议大家使用"相对于网站根目录的相对路径"进行配置。
* "相对于网站根目录的相对路径"也就是以斜杠开头的形如"/myProject/ueditor/"这样的路径。
* 如果站点中有多个不在同一层级的页面需要实例化编辑器,且引用了同一UEditor的时候,此处的URL可能不适用于每个页面的编辑器。
* 因此,UEditor提供了针对不同页面的编辑器可单独配置的根路径,具体来说,在需要实例化编辑器的页面最顶部写上如下代码即可。当然,需要令此处的URL等于对应的配置。
* window.UEDITOR_HOME_URL = "/xxxx/xxxx/";
*/
//var URL = window.UEDITOR_HOME_URL || getUEBasePath();
var getRootPath = function (){
//获取当前网址
var curWwwPath=window.document.location.href;
//获取主机地址之后的目录
var pathName=window.document.location.pathname;
var pos=curWwwPath.indexOf(pathName);
//获取主机地址
var localhostPaht=curWwwPath.substring(0,pos);
//获取带"/"的项目名,如:/uimcardprj
var projectName=pathName.substring(0,pathName.substr(1).indexOf('/')+1);
return(localhostPaht+projectName);
}
//获取路径
var applicationPath = getRootPath();
var URL = window.UEDITOR_HOME_URL || getUEBasePath();
var serverURL = applicationPath;
/**
* 配置项主体。注意,此处所有涉及到路径的配置别遗漏URL变量。
*/
window.UEDITOR_CONFIG = {
//为编辑器实例添加一个路径,这个不能被注释
UEDITOR_HOME_URL: URL
// 服务器统一请求接口路径
//, serverUrl: URL + "jsp/controller.jsp"
, serverUrl: serverURL + "ueditor/dispatch"
//工具栏上的所有的功能按钮和下拉框,可以在new编辑器的实例时选择自己需要的从新定义
, toolbars: [[
'fullscreen', 'source', '|', 'undo', 'redo', '|',
'bold', 'italic', 'underline', 'fontborder', 'strikethrough', 'superscript', 'subscript', 'removeformat', 'formatmatch', 'autotypeset', 'blockquote', 'pasteplain', '|', 'forecolor', 'backcolor', 'insertorderedlist', 'insertunorderedlist', 'selectall', 'cleardoc', '|',
'rowspacingtop', 'rowspacingbottom', 'lineheight', '|',
'customstyle', 'paragraph', 'fontfamily', 'fontsize', '|',
'directionalityltr', 'directionalityrtl', 'indent', '|',
'justifyleft', 'justifycenter', 'justifyright', 'justifyjustify', '|', 'touppercase', 'tolowercase', '|',
'link', 'unlink', 'anchor', '|', 'imagenone', 'imageleft', 'imageright', 'imagecenter', '|',
'simpleupload', 'insertimage', 'emotion', 'scrawl', 'insertvideo', 'music', 'attachment', 'map', 'gmap', 'insertframe', 'insertcode', 'webapp', 'pagebreak', 'template', 'background', '|',
'horizontal', 'date', 'time', 'spechars', 'snapscreen', 'wordimage', '|',
'inserttable', 'deletetable', 'insertparagraphbeforetable', 'insertrow', 'deleterow', 'insertcol', 'deletecol', 'mergecells', 'mergeright', 'mergedown', 'splittocells', 'splittorows', 'splittocols', 'charts', '|',
'print', 'preview', 'searchreplace', 'help', 'drafts'
]]
//当鼠标放在工具栏上时显示的tooltip提示,留空支持自动多语言配置,否则以配置值为准
//,labelMap:{
// 'anchor':'', 'undo':''
//}
//语言配置项,默认是zh-cn。有需要的话也可以使用如下这样的方式来自动多语言切换,当然,前提条件是lang文件夹下存在对应的语言文件:
//lang值也可以通过自动获取 (navigator.language||navigator.browserLanguage ||navigator.userLanguage).toLowerCase()
//,lang:"zh-cn"
//,langPath:URL +"lang/"
//主题配置项,默认是default。有需要的话也可以使用如下这样的方式来自动多主题切换,当然,前提条件是themes文件夹下存在对应的主题文件:
//现有如下皮肤:default
//,theme:'default'
//,themePath:URL +"themes/"
//,zIndex : 900 //编辑器层级的基数,默认是900
//针对getAllHtml方法,会在对应的head标签中增加该编码设置。
//,charset:"utf-8"
//若实例化编辑器的页面手动修改的domain,此处需要设置为true
//,customDomain:false
//常用配置项目
//,isShow : true //默认显示编辑器
//,textarea:'editorValue' // 提交表单时,服务器获取编辑器提交内容的所用的参数,多实例时可以给容器name属性,会将name给定的值最为每个实例的键值,不用每次实例化的时候都设置这个值
//,initialContent:'欢迎使用ueditor!' //初始化编辑器的内容,也可以通过textarea/script给值,看官网例子
//,autoClearinitialContent:true //是否自动清除编辑器初始内容,注意:如果focus属性设置为true,这个也为真,那么编辑器一上来就会触发导致初始化的内容看不到了
//,focus:false //初始化时,是否让编辑器获得焦点true或false
//如果自定义,最好给p标签如下的行高,要不输入中文时,会有跳动感
//,initialStyle:'p{line-height:1em}'//编辑器层级的基数,可以用来改变字体等
//,iframeCssUrl: URL + '/themes/iframe.css' //给编辑器内部引入一个css文件
//indentValue
//首行缩进距离,默认是2em
//,indentValue:'2em'
//,initialFrameWidth:1000 //初始化编辑器宽度,默认1000
//,initialFrameHeight:320 //初始化编辑器高度,默认320
//,readonly : false //编辑器初始化结束后,编辑区域是否是只读的,默认是false
//,autoClearEmptyNode : true //getContent时,是否删除空的inlineElement节点(包括嵌套的情况)
//启用自动保存
//,enableAutoSave: true
//自动保存间隔时间, 单位ms
//,saveInterval: 500
//,fullscreen : false //是否开启初始化时即全屏,默认关闭
//,imagePopup:true //图片操作的浮层开关,默认打开
//,autoSyncData:true //自动同步编辑器要提交的数据
//,emotionLocalization:false //是否开启表情本地化,默认关闭。若要开启请确保emotion文件夹下包含官网提供的images表情文件夹
//粘贴只保留标签,去除标签所有属性
//,retainOnlyLabelPasted: false
//,pasteplain:false //是否默认为纯文本粘贴。false为不使用纯文本粘贴,true为使用纯文本粘贴
//纯文本粘贴模式下的过滤规则
//'filterTxtRules' : function(){
// function transP(node){
// node.tagName = 'p';
// node.setStyle();
// }
// return {
// //直接删除及其字节点内容
// '-' : 'script style object iframe embed input select',
// 'p': {$:{}},
// 'br':{$:{}},
// 'div':{'$':{}},
// 'li':{'$':{}},
// 'caption':transP,
// 'th':transP,
// 'tr':transP,
// 'h1':transP,'h2':transP,'h3':transP,'h4':transP,'h5':transP,'h6':transP,
// 'td':function(node){
// //没有内容的td直接删掉
// var txt = !!node.innerText();
// if(txt){
// node.parentNode.insertAfter(UE.uNode.createText(' '),node);
// }
// node.parentNode.removeChild(node,node.innerText())
// }
// }
//}()
//,allHtmlEnabled:false //提交到后台的数据是否包含整个html字符串
//insertorderedlist
//有序列表的下拉配置,值留空时支持多语言自动识别,若配置值,则以此值为准
//,'insertorderedlist':{
// //自定的样式
// 'num':'1,2,3...',
// 'num1':'1),2),3)...',
// 'num2':'(1),(2),(3)...',
// 'cn':'一,二,三....',
// 'cn1':'一),二),三)....',
// 'cn2':'(一),(二),(三)....',
// //系统自带
// 'decimal' : '' , //'1,2,3...'
// 'lower-alpha' : '' , // 'a,b,c...'
// 'lower-roman' : '' , //'i,ii,iii...'
// 'upper-alpha' : '' , lang //'A,B,C'
// 'upper-roman' : '' //'I,II,III...'
//}
//insertunorderedlist
//无序列表的下拉配置,值留空时支持多语言自动识别,若配置值,则以此值为准
//,insertunorderedlist : { //自定的样式
// 'dash' :'— 破折号', //-破折号
// 'dot':' 。 小圆圈', //系统自带
// 'circle' : '', // '○ 小圆圈'
// 'disc' : '', // '● 小圆点'
// 'square' : '' //'■ 小方块'
//}
//,listDefaultPaddingLeft : '30'//默认的左边缩进的基数倍
//,listiconpath : 'http://bs.baidu.com/listicon/'//自定义标号的路径
//,maxListLevel : 3 //限制可以tab的级数, 设置-1为不限制
//,autoTransWordToList:false //禁止word中粘贴进来的列表自动变成列表标签
//fontfamily
//字体设置 label留空支持多语言自动切换,若配置,则以配置值为准
//,'fontfamily':[
// { label:'',name:'songti',val:'宋体,SimSun'},
// { label:'',name:'kaiti',val:'楷体,楷体_GB2312, SimKai'},
// { label:'',name:'yahei',val:'微软雅黑,Microsoft YaHei'},
// { label:'',name:'heiti',val:'黑体, SimHei'},
// { label:'',name:'lishu',val:'隶书, SimLi'},
// { label:'',name:'andaleMono',val:'andale mono'},
// { label:'',name:'arial',val:'arial, helvetica,sans-serif'},
// { label:'',name:'arialBlack',val:'arial black,avant garde'},
// { label:'',name:'comicSansMs',val:'comic sans ms'},
// { label:'',name:'impact',val:'impact,chicago'},
// { label:'',name:'timesNewRoman',val:'times new roman'}
//]
//fontsize
//字号
//,'fontsize':[10, 11, 12, 14, 16, 18, 20, 24, 36]
//paragraph
//段落格式 值留空时支持多语言自动识别,若配置,则以配置值为准
//,'paragraph':{'p':'', 'h1':'', 'h2':'', 'h3':'', 'h4':'', 'h5':'', 'h6':''}
//rowspacingtop
//段间距 值和显示的名字相同
//,'rowspacingtop':['5', '10', '15', '20', '25']
//rowspacingBottom
//段间距 值和显示的名字相同
//,'rowspacingbottom':['5', '10', '15', '20', '25']
//lineheight
//行内间距 值和显示的名字相同
//,'lineheight':['1', '1.5','1.75','2', '3', '4', '5']
//customstyle
//自定义样式,不支持国际化,此处配置值即可最后显示值
//block的元素是依据设置段落的逻辑设置的,inline的元素依据BIU的逻辑设置
//尽量使用一些常用的标签
//参数说明
//tag 使用的标签名字
//label 显示的名字也是用来标识不同类型的标识符,注意这个值每个要不同,
//style 添加的样式
//每一个对象就是一个自定义的样式
//,'customstyle':[
// {tag:'h1', name:'tc', label:'', style:'border-bottom:#ccc 2px solid;padding:0 4px 0 0;text-align:center;margin:0 0 20px 0;'},
// {tag:'h1', name:'tl',label:'', style:'border-bottom:#ccc 2px solid;padding:0 4px 0 0;margin:0 0 10px 0;'},
// {tag:'span',name:'im', label:'', style:'font-style:italic;font-weight:bold'},
// {tag:'span',name:'hi', label:'', style:'font-style:italic;font-weight:bold;color:rgb(51, 153, 204)'}
//]
//打开右键菜单功能
//,enableContextMenu: true
//右键菜单的内容,可以参考plugins/contextmenu.js里边的默认菜单的例子,label留空支持国际化,否则以此配置为准
//,contextMenu:[
// {
// label:'', //显示的名称
// cmdName:'selectall',//执行的command命令,当点击这个右键菜单时
// //exec可选,有了exec就会在点击时执行这个function,优先级高于cmdName
// exec:function () {
// //this是当前编辑器的实例
// //this.ui._dialogs['inserttableDialog'].open();
// }
// }
//]
//快捷菜单
//,shortcutMenu:["fontfamily", "fontsize", "bold", "italic", "underline", "forecolor", "backcolor", "insertorderedlist", "insertunorderedlist"]
//elementPathEnabled
//是否启用元素路径,默认是显示
//,elementPathEnabled : true
//wordCount
//,wordCount:true //是否开启字数统计
//,maximumWords:10000 //允许的最大字符数
//字数统计提示,{#count}代表当前字数,{#leave}代表还可以输入多少字符数,留空支持多语言自动切换,否则按此配置显示
//,wordCountMsg:'' //当前已输入 {#count} 个字符,您还可以输入{#leave} 个字符
//超出字数限制提示 留空支持多语言自动切换,否则按此配置显示
//,wordOverFlowMsg:'' //<span style="color:red;">你输入的字符个数已经超出最大允许值,服务器可能会拒绝保存!</span>
//tab
//点击tab键时移动的距离,tabSize倍数,tabNode什么字符做为单位
//,tabSize:4
//,tabNode:' '
//removeFormat
//清除格式时可以删除的标签和属性
//removeForamtTags标签
//,removeFormatTags:'b,big,code,del,dfn,em,font,i,ins,kbd,q,samp,small,span,strike,strong,sub,sup,tt,u,var'
//removeFormatAttributes属性
//,removeFormatAttributes:'class,style,lang,width,height,align,hspace,valign'
//undo
//可以最多回退的次数,默认20
//,maxUndoCount:20
//当输入的字符数超过该值时,保存一次现场
//,maxInputCount:1
//autoHeightEnabled
// 是否自动长高,默认true
//,autoHeightEnabled:true
//scaleEnabled
//是否可以拉伸长高,默认true(当开启时,自动长高失效)
//,scaleEnabled:false
//,minFrameWidth:800 //编辑器拖动时最小宽度,默认800
//,minFrameHeight:220 //编辑器拖动时最小高度,默认220
//autoFloatEnabled
//是否保持toolbar的位置不动,默认true
//,autoFloatEnabled:true
//浮动时工具栏距离浏览器顶部的高度,用于某些具有固定头部的页面
//,topOffset:30
//编辑器底部距离工具栏高度(如果参数大于等于编辑器高度,则设置无效)
//,toolbarTopOffset:400
//pageBreakTag
//分页标识符,默认是_ueditor_page_break_tag_
//,pageBreakTag:'_ueditor_page_break_tag_'
//autotypeset
//自动排版参数
//,autotypeset: {
// mergeEmptyline: true, //合并空行
// removeClass: true, //去掉冗余的class
// removeEmptyline: false, //去掉空行
// textAlign:"left", //段落的排版方式,可以是 left,right,center,justify 去掉这个属性表示不执行排版
// imageBlockLine: 'center', //图片的浮动方式,独占一行剧中,左右浮动,默认: center,left,right,none 去掉这个属性表示不执行排版
// pasteFilter: false, //根据规则过滤没事粘贴进来的内容
// clearFontSize: false, //去掉所有的内嵌字号,使用编辑器默认的字号
// clearFontFamily: false, //去掉所有的内嵌字体,使用编辑器默认的字体
// removeEmptyNode: false, // 去掉空节点
// //可以去掉的标签
// removeTagNames: {标签名字:1},
// indent: false, // 行首缩进
// indentValue : '2em', //行首缩进的大小
// bdc2sb: false,
// tobdc: false
//}
//tableDragable
//表格是否可以拖拽
//,tableDragable: true
//,disabledTableInTable:true //禁止表格嵌套
//sourceEditor
//源码的查看方式,codemirror 是代码高亮,textarea是文本框,默认是codemirror
//注意默认codemirror只能在ie8+和非ie中使用
//,sourceEditor:"codemirror"
//如果sourceEditor是codemirror,还用配置一下两个参数
//codeMirrorJsUrl js加载的路径,默认是 URL + "third-party/codemirror/codemirror.js"
//,codeMirrorJsUrl:URL + "third-party/codemirror/codemirror.js"
//codeMirrorCssUrl css加载的路径,默认是 URL + "third-party/codemirror/codemirror.css"
//,codeMirrorCssUrl:URL + "third-party/codemirror/codemirror.css"
//编辑器初始化完成后是否进入源码模式,默认为否。
//,sourceEditorFirst:false
//iframeUrlMap
//dialog内容的路径 ~会被替换成URL,垓属性一旦打开,将覆盖所有的dialog的默认路径
//,iframeUrlMap:{
// 'anchor':'~/dialogs/anchor/anchor.html',
//}
//webAppKey 百度应用的APIkey,每个站长必须首先去百度官网注册一个key后方能正常使用app功能,注册介绍,http://app.baidu.com/static/cms/getapikey.html
//, webAppKey: ""
};
function getUEBasePath(docUrl, confUrl) {
return getBasePath(docUrl || self.document.URL || self.location.href, confUrl || getConfigFilePath());
}
function getConfigFilePath() {
var configPath = document.getElementsByTagName('script');
return configPath[ configPath.length - 1 ].src;
}
function getBasePath(docUrl, confUrl) {
var basePath = confUrl;
if (/^(\/|\\\\)/.test(confUrl)) {
basePath = /^.+?\w(\/|\\\\)/.exec(docUrl)[0] + confUrl.replace(/^(\/|\\\\)/, '');
} else if (!/^[a-z]+:/i.test(confUrl)) {
docUrl = docUrl.split("#")[0].split("?")[0].replace(/[^\\\/]+$/, '');
basePath = docUrl + "" + confUrl;
}
return optimizationPath(basePath);
}
function optimizationPath(path) {
var protocol = /^[a-z]+:\/\//.exec(path)[ 0 ],
tmp = null,
res = [];
path = path.replace(protocol, "").split("?")[0].split("#")[0];
path = path.replace(/\\/g, '/').split(/\//);
path[ path.length - 1 ] = "";
while (path.length) {
if (( tmp = path.shift() ) === "..") {
res.pop();
} else if (tmp !== ".") {
res.push(tmp);
}
}
return protocol + res.join("/");
}
window.UE = {
getUEBasePath: getUEBasePath
};
})();
| XieXianbin/ueditor-for-all-cdn | web/ueditor-plus-webapp/src/main/webapp/ueditor1_4_3-utf8-jsp/ueditor.config.update.js | JavaScript | apache-2.0 | 20,933 |
// ----------------------------------------------------------------------------------
//
// Copyright Microsoft Corporation
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------------------------------------------------------------
using Microsoft.Azure.Batch;
using System.Collections.Generic;
namespace Microsoft.Azure.Commands.Batch.Models
{
public class ListVMFileOptions : VMOperationParameters
{
public ListVMFileOptions(BatchAccountContext context, string poolName, string vmName, PSVM vm, IEnumerable<BatchClientBehavior> additionalBehaviors = null)
: base(context, poolName, vmName, vm, additionalBehaviors)
{ }
/// <summary>
/// If specified, the single vm file with this name will be returned
/// </summary>
public string VMFileName { get; set; }
/// <summary>
/// The OData filter to use when querying for vm files
/// </summary>
public string Filter { get; set; }
/// <summary>
/// The maximum number of vm files to return
/// </summary>
public int MaxCount { get; set; }
/// <summary>
/// If true, performs a recursive list of all files of the vm. If false, returns only the files at the vm directory root.
/// </summary>
public bool Recursive { get; set; }
}
}
| kagamsft/azure-powershell | src/ResourceManager/Batch/Commands.Batch/Models/ListVMFileOptions.cs | C# | apache-2.0 | 1,877 |
/*
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the lambda-2014-11-11.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.Text;
using System.IO;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
namespace Amazon.Lambda.Model
{
/// <summary>
/// Contains a list of event sources (see <a>API_EventSourceConfiguration</a>)
/// </summary>
public partial class ListEventSourcesResult : AmazonWebServiceResponse
{
private List<EventSourceConfiguration> _eventSources = new List<EventSourceConfiguration>();
private string _nextMarker;
/// <summary>
/// Gets and sets the property EventSources.
/// <para>
/// An arrary of <code>EventSourceConfiguration</code> objects.
/// </para>
/// </summary>
public List<EventSourceConfiguration> EventSources
{
get { return this._eventSources; }
set { this._eventSources = value; }
}
// Check to see if EventSources property is set
internal bool IsSetEventSources()
{
return this._eventSources != null && this._eventSources.Count > 0;
}
/// <summary>
/// Gets and sets the property NextMarker.
/// <para>
/// A string, present if there are more event source mappings.
/// </para>
/// </summary>
public string NextMarker
{
get { return this._nextMarker; }
set { this._nextMarker = value; }
}
// Check to see if NextMarker property is set
internal bool IsSetNextMarker()
{
return this._nextMarker != null;
}
}
} | ykbarros/aws-sdk-xamarin | AWS.XamarinSDK/AWSSDK_iOS/Amazon.Lambda/Model/ListEventSourcesResult.cs | C# | apache-2.0 | 2,346 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.igfs;
import org.apache.ignite.*;
import org.apache.ignite.cache.*;
import org.apache.ignite.compute.*;
import org.apache.ignite.configuration.*;
import org.apache.ignite.igfs.*;
import org.apache.ignite.igfs.mapreduce.*;
import org.apache.ignite.igfs.mapreduce.records.*;
import org.apache.ignite.internal.util.typedef.*;
import org.apache.ignite.internal.util.typedef.internal.*;
import org.apache.ignite.lang.*;
import org.apache.ignite.resources.*;
import org.apache.ignite.spi.discovery.tcp.*;
import org.apache.ignite.spi.discovery.tcp.ipfinder.*;
import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.*;
import java.io.*;
import java.util.*;
import static org.apache.ignite.cache.CacheAtomicityMode.*;
import static org.apache.ignite.cache.CacheMode.*;
import static org.apache.ignite.cache.CacheWriteSynchronizationMode.*;
import static org.apache.ignite.igfs.IgfsMode.*;
/**
* Tests for {@link IgfsTask}.
*/
public class IgfsTaskSelfTest extends IgfsCommonAbstractTest {
/** Predefined words dictionary. */
private static final String[] DICTIONARY = new String[] {"word0", "word1", "word2", "word3", "word4", "word5",
"word6", "word7"};
/** File path. */
private static final IgfsPath FILE = new IgfsPath("/file");
/** Shared IP finder. */
private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
/** Block size: 64 Kb. */
private static final int BLOCK_SIZE = 64 * 1024;
/** Total words in file. */
private static final int TOTAL_WORDS = 2 * 1024 * 1024;
/** Node count */
private static final int NODE_CNT = 4;
/** Repeat count. */
private static final int REPEAT_CNT = 10;
/** IGFS. */
private static IgniteFileSystem igfs;
/** {@inheritDoc} */
@Override protected void beforeTestsStarted() throws Exception {
for (int i = 0; i < NODE_CNT; i++) {
Ignite g = G.start(config(i));
if (i + 1 == NODE_CNT)
igfs = g.fileSystem("igfs");
}
}
/** {@inheritDoc} */
@Override protected void afterTestsStopped() throws Exception {
stopAllGrids(false);
}
/** {@inheritDoc} */
@Override protected void beforeTest() throws Exception {
igfs.format();
}
/**
* Create grid configuration.
*
* @param idx Node index.
* @return Grid configuration
*/
private IgniteConfiguration config(int idx) {
FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
igfsCfg.setDataCacheName("dataCache");
igfsCfg.setMetaCacheName("metaCache");
igfsCfg.setName("igfs");
igfsCfg.setBlockSize(BLOCK_SIZE);
igfsCfg.setDefaultMode(PRIMARY);
igfsCfg.setFragmentizerEnabled(false);
CacheConfiguration dataCacheCfg = new CacheConfiguration();
dataCacheCfg.setName("dataCache");
dataCacheCfg.setCacheMode(PARTITIONED);
dataCacheCfg.setAtomicityMode(TRANSACTIONAL);
dataCacheCfg.setWriteSynchronizationMode(FULL_SYNC);
dataCacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(1));
dataCacheCfg.setBackups(0);
CacheConfiguration metaCacheCfg = new CacheConfiguration();
metaCacheCfg.setName("metaCache");
metaCacheCfg.setCacheMode(REPLICATED);
metaCacheCfg.setAtomicityMode(TRANSACTIONAL);
dataCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
IgniteConfiguration cfg = new IgniteConfiguration();
TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
discoSpi.setIpFinder(IP_FINDER);
cfg.setDiscoverySpi(discoSpi);
cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg);
cfg.setFileSystemConfiguration(igfsCfg);
cfg.setGridName("node-" + idx);
return cfg;
}
/**
* Test task.
*
* @throws Exception If failed.
*/
public void testTask() throws Exception {
U.sleep(3000); // TODO: Sleep in order to wait for fragmentizing to finish.
for (int i = 0; i < REPEAT_CNT; i++) {
String arg = DICTIONARY[new Random(System.currentTimeMillis()).nextInt(DICTIONARY.length)];
generateFile(TOTAL_WORDS);
Long genLen = igfs.info(FILE).length();
IgniteBiTuple<Long, Integer> taskRes = igfs.execute(new Task(),
new IgfsStringDelimiterRecordResolver(" "), Collections.singleton(FILE), arg);
assert F.eq(genLen, taskRes.getKey());
assert F.eq(TOTAL_WORDS, taskRes.getValue());
}
}
/**
* Test task.
*
* @throws Exception If failed.
*/
public void testTaskAsync() throws Exception {
U.sleep(3000);
assertFalse(igfs.isAsync());
IgniteFileSystem igfsAsync = igfs.withAsync();
assertTrue(igfsAsync.isAsync());
for (int i = 0; i < REPEAT_CNT; i++) {
String arg = DICTIONARY[new Random(System.currentTimeMillis()).nextInt(DICTIONARY.length)];
generateFile(TOTAL_WORDS);
Long genLen = igfs.info(FILE).length();
assertNull(igfsAsync.execute(
new Task(), new IgfsStringDelimiterRecordResolver(" "), Collections.singleton(FILE), arg));
IgniteFuture<IgniteBiTuple<Long, Integer>> fut = igfsAsync.future();
assertNotNull(fut);
IgniteBiTuple<Long, Integer> taskRes = fut.get();
assert F.eq(genLen, taskRes.getKey());
assert F.eq(TOTAL_WORDS, taskRes.getValue());
}
igfsAsync.format();
IgniteFuture<?> fut = igfsAsync.future();
assertNotNull(fut);
fut.get();
}
/**
* Generate file with random data and provided argument.
*
* @param wordCnt Word count.
* @throws Exception If failed.
*/
private void generateFile(int wordCnt)
throws Exception {
Random rnd = new Random(System.currentTimeMillis());
try (OutputStreamWriter writer = new OutputStreamWriter(igfs.create(FILE, true))) {
int cnt = 0;
while (cnt < wordCnt) {
String word = DICTIONARY[rnd.nextInt(DICTIONARY.length)];
writer.write(word + " ");
cnt++;
}
}
}
/**
* Task.
*/
private static class Task extends IgfsTask<String, IgniteBiTuple<Long, Integer>> {
/** {@inheritDoc} */
@Override public IgfsJob createJob(IgfsPath path, IgfsFileRange range,
IgfsTaskArgs<String> args) {
return new Job();
}
/** {@inheritDoc} */
@Override public IgniteBiTuple<Long, Integer> reduce(List<ComputeJobResult> ress) {
long totalLen = 0;
int argCnt = 0;
for (ComputeJobResult res : ress) {
IgniteBiTuple<Long, Integer> res0 = (IgniteBiTuple<Long, Integer>)res.getData();
if (res0 != null) {
totalLen += res0.getKey();
argCnt += res0.getValue();
}
}
return F.t(totalLen, argCnt);
}
}
/**
* Job.
*/
private static class Job implements IgfsJob, Serializable {
@IgniteInstanceResource
private Ignite ignite;
@TaskSessionResource
private ComputeTaskSession ses;
@JobContextResource
private ComputeJobContext ctx;
/** {@inheritDoc} */
@Override public Object execute(IgniteFileSystem igfs, IgfsFileRange range, IgfsInputStream in)
throws IOException {
assert ignite != null;
assert ses != null;
assert ctx != null;
in.seek(range.start());
byte[] buf = new byte[(int)range.length()];
int totalRead = 0;
while (totalRead < buf.length) {
int b = in.read();
assert b != -1;
buf[totalRead++] = (byte)b;
}
String str = new String(buf);
String[] chunks = str.split(" ");
int ctr = 0;
for (String chunk : chunks) {
if (!chunk.isEmpty())
ctr++;
}
return F.t(range.length(), ctr);
}
/** {@inheritDoc} */
@Override public void cancel() {
// No-op.
}
}
}
| akuznetsov-gridgain/ignite | modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsTaskSelfTest.java | Java | apache-2.0 | 9,305 |
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/eh-frame.h"
namespace v8 {
namespace internal {
static const int kR0DwarfCode = 0;
static const int kFpDwarfCode = 11;
static const int kSpDwarfCode = 13;
static const int kLrDwarfCode = 14;
const int EhFrameConstants::kCodeAlignmentFactor = 4;
const int EhFrameConstants::kDataAlignmentFactor = -4;
void EhFrameWriter::WriteReturnAddressRegisterCode() {
WriteULeb128(kLrDwarfCode);
}
void EhFrameWriter::WriteInitialStateInCie() {
SetBaseAddressRegisterAndOffset(fp, 0);
RecordRegisterNotModified(lr);
}
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
case kRegCode_fp:
return kFpDwarfCode;
case kRegCode_sp:
return kSpDwarfCode;
case kRegCode_lr:
return kLrDwarfCode;
case kRegCode_r0:
return kR0DwarfCode;
default:
UNIMPLEMENTED();
return -1;
}
}
#ifdef ENABLE_DISASSEMBLER
// static
const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
switch (code) {
case kFpDwarfCode:
return "fp";
case kSpDwarfCode:
return "sp";
case kLrDwarfCode:
return "lr";
default:
UNIMPLEMENTED();
return nullptr;
}
}
#endif
} // namespace internal
} // namespace v8
| weolar/miniblink49 | v8_7_5/src/arm/eh-frame-arm.cc | C++ | apache-2.0 | 1,416 |
/*-------------------------------------------------------------------------
*
* openbsd.h
* port-specific prototypes for OpenBSD
*
* Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/port/dynloader/openbsd.h,v 1.17 2007/01/05 22:19:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifndef PORT_PROTOS_H
#define PORT_PROTOS_H
#include <sys/types.h>
#include <nlist.h>
#include <link.h>
#include <dlfcn.h>
#include "utils/dynamic_loader.h"
/*
* Dynamic Loader on NetBSD 1.0.
*
* this dynamic loader uses the system dynamic loading interface for shared
* libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared
* library as the file to be dynamically loaded.
*
* agc - I know this is all a bit crufty, but it does work, is fairly
* portable, and works (the stipulation that the d.l. function must
* begin with an underscore is fairly tricky, and some versions of
* NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.)
*/
/*
* In some older systems, the RTLD_NOW flag isn't defined and the mode
* argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted
* if available, but it doesn't exist everywhere.
* If it doesn't exist, set it to 0 so it has no effect.
*/
#ifndef RTLD_NOW
#define RTLD_NOW 1
#endif
#ifndef RTLD_GLOBAL
#define RTLD_GLOBAL 0
#endif
#define pg_dlopen(f) BSD44_derived_dlopen((f), RTLD_NOW | RTLD_GLOBAL)
#define pg_dlsym BSD44_derived_dlsym
#define pg_dlclose BSD44_derived_dlclose
#define pg_dlerror BSD44_derived_dlerror
char *BSD44_derived_dlerror(void);
void *BSD44_derived_dlopen(const char *filename, int num);
void *BSD44_derived_dlsym(void *handle, const char *name);
void BSD44_derived_dlclose(void *handle);
#endif /* PORT_PROTOS_H */
| lpetrov-pivotal/gpdb | src/backend/port/dynloader/openbsd.h | C | apache-2.0 | 1,945 |
/****************************************************************************
*
* Copyright 2019 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "randombytes_default.h"
#include "esp_system.h"
static void randombytes_esp32_random_buf(void *const buf, const size_t size)
{
size_t i;
uint8_t *p = (uint8_t *)buf;
for (i = 0; i < size; i++) {
p[i] = esp_random();
}
}
static const char *randombytes_esp32_implementation_name(void)
{
return "esp32";
}
/*
Plug the ESP32 hardware RNG into libsodium's custom RNG support, as per
https://download.libsodium.org/doc/advanced/custom_rng.html
Note that this RNG is selected by default (see randombytes_default.h), so there
is no need to call randombytes_set_implementation().
*/
const struct randombytes_implementation randombytes_esp32_implementation = {
.implementation_name = randombytes_esp32_implementation_name,
.random = esp_random,
.stir = NULL,
.uniform = NULL,
.buf = randombytes_esp32_random_buf,
.close = NULL,
};
| jsdosa/TizenRT | external/libsodium/port/randombytes_esp32.c | C | apache-2.0 | 2,244 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.java.summarize.aggregation;
import org.apache.flink.annotation.Internal;
import org.apache.flink.api.java.tuple.Tuple;
/**
* Aggregate tuples using an array of aggregators, one for each "column" or position within the Tuple.
*/
@Internal
public class TupleSummaryAggregator<R extends Tuple> implements Aggregator<Tuple,R> {
private static final long serialVersionUID = 1L;
private final Aggregator[] columnAggregators;
public TupleSummaryAggregator(Aggregator[] columnAggregators) {
this.columnAggregators = columnAggregators;
}
@Override
@SuppressWarnings("unchecked")
public void aggregate(Tuple value) {
for(int i = 0; i < columnAggregators.length; i++) {
columnAggregators[i].aggregate(value.getField(i));
}
}
@Override
@SuppressWarnings("unchecked")
public void combine(Aggregator<Tuple, R> other) {
TupleSummaryAggregator tupleSummaryAggregator = (TupleSummaryAggregator) other;
for( int i = 0; i < columnAggregators.length; i++) {
columnAggregators[i].combine(tupleSummaryAggregator.columnAggregators[i]);
}
}
@Override
@SuppressWarnings("unchecked")
public R result() {
try {
Class tupleClass = Tuple.getTupleClass(columnAggregators.length);
R tuple = (R) tupleClass.newInstance();
for(int i = 0; i < columnAggregators.length; i++) {
tuple.setField(columnAggregators[i].result(), i);
}
return tuple;
}
catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException("Unexpected error instantiating Tuple class for aggregation results", e);
}
}
}
| WangTaoTheTonic/flink | flink-java/src/main/java/org/apache/flink/api/java/summarize/aggregation/TupleSummaryAggregator.java | Java | apache-2.0 | 2,389 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.client.watcher;
import org.elasticsearch.client.Validatable;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentType;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
/**
* An execute watch request to execute a watch by id or inline
*/
public class ExecuteWatchRequest implements Validatable, ToXContentObject {
public enum ActionExecutionMode {
SIMULATE,
FORCE_SIMULATE,
EXECUTE,
FORCE_EXECUTE,
SKIP
}
private final String id;
private final BytesReference watchContent;
private boolean ignoreCondition = false;
private boolean recordExecution = false;
private boolean debug = false;
@Nullable
private BytesReference triggerData = null;
@Nullable
private BytesReference alternativeInput = null;
private Map<String, ActionExecutionMode> actionModes = new HashMap<>();
/**
* Execute an existing watch on the cluster
*
* @param id the id of the watch to execute
*/
public static ExecuteWatchRequest byId(String id) {
return new ExecuteWatchRequest(Objects.requireNonNull(id, "Watch id cannot be null"), null);
}
/**
* Execute an inline watch
* @param watchContent the JSON definition of the watch
*/
public static ExecuteWatchRequest inline(String watchContent) {
return new ExecuteWatchRequest(null, Objects.requireNonNull(watchContent, "Watch content cannot be null"));
}
private ExecuteWatchRequest(String id, String watchContent) {
this.id = id;
this.watchContent = watchContent == null ? null : new BytesArray(watchContent);
}
public String getId() {
return this.id;
}
/**
* @param ignoreCondition set if the condition for this execution be ignored
*/
public void setIgnoreCondition(boolean ignoreCondition) {
this.ignoreCondition = ignoreCondition;
}
public boolean ignoreCondition() {
return ignoreCondition;
}
/**
* @param recordExecution Sets if this execution be recorded in the history index
*/
public void setRecordExecution(boolean recordExecution) {
if (watchContent != null && recordExecution) {
throw new IllegalArgumentException("The execution of an inline watch cannot be recorded");
}
this.recordExecution = recordExecution;
}
public boolean recordExecution() {
return recordExecution;
}
/**
* @param alternativeInput Sets the alternative input
*/
public void setAlternativeInput(String alternativeInput) {
this.alternativeInput = new BytesArray(alternativeInput);
}
/**
* @param data A JSON string representing the data that should be associated with the trigger event.
*/
public void setTriggerData(String data) {
this.triggerData = new BytesArray(data);
}
/**
* Sets the action execution mode for the give action (identified by its id).
*
* @param actionId the action id.
* @param actionMode the execution mode of the action.
*/
public void setActionMode(String actionId, ActionExecutionMode actionMode) {
Objects.requireNonNull(actionId, "actionId cannot be null");
actionModes.put(actionId, actionMode);
}
public Map<String, ActionExecutionMode> getActionModes() {
return this.actionModes;
}
/**
* @param debug indicates whether the watch should execute in debug mode. In debug mode the
* returned watch record will hold the execution {@code vars}
*/
public void setDebug(boolean debug) {
this.debug = debug;
}
public boolean isDebug() {
return debug;
}
@Override
public String toString() {
return "execute[" + id + "]";
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (triggerData != null) {
builder.rawField("trigger_data", triggerData.streamInput(), XContentType.JSON);
}
if (alternativeInput != null) {
builder.rawField("alternative_input", alternativeInput.streamInput(), XContentType.JSON);
}
if (actionModes.size() > 0) {
builder.field("action_modes", actionModes);
}
if (watchContent != null) {
builder.rawField("watch", watchContent.streamInput(), XContentType.JSON);
}
builder.endObject();
return builder;
}
}
| jmluy/elasticsearch | client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/ExecuteWatchRequest.java | Java | apache-2.0 | 5,217 |
/*
* Copyright 2012 Amadeus s.a.s.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var Aria = require("../../Aria");
var ariaUtilsData = require("../../utils/Data");
var ariaUtilsDom = require("../../utils/Dom");
/**
* Template script for the default ErrorListTemplate
*/
module.exports = Aria.tplScriptDefinition({
$classpath : "aria.widgets.errorlist.ErrorListTemplateScript",
$statics : {
/**
* Links each type of message with an icon. The order in the array is important as the first entry for which
* messages of that type exist in the messages list is used. Since it depends on aria.utils.Data wait for it to
* be loaded by the dependency manager
* @type Array
*/
ICONS : [],
/**
* Icon used if there is no matching icon in ICONS.
* @type String
*/
DEFAULT_ICON : "std:missing"
},
$prototype : {
/**
* Initialize this class building the icons object. It's done here so we are sure that aria.utils.Data is
* already loaded
* @param {aria.widgets.errorlist.ErrorListTemplateScript} proto Class prototype
*/
$init : function (proto) {
// Using push instead of resetting the reference because items are not copied from proto but from the
// parameter of tplScriptDefinition directly
proto.ICONS.push({
type : ariaUtilsData.TYPE_ERROR,
icon : "std:error"
}, {
type : ariaUtilsData.TYPE_WARNING,
icon : "std:warning"
}, {
type : ariaUtilsData.TYPE_INFO,
icon : "std:info"
}, {
type : ariaUtilsData.TYPE_FATAL,
icon : "std:error"
}, {
type : ariaUtilsData.TYPE_NOTYPE,
icon : "std:info"
}, {
type : ariaUtilsData.TYPE_CRITICAL_WARNING,
icon : "std:warning"
}, {
type : ariaUtilsData.TYPE_CONFIRMATION,
icon : "std:confirm"
});
},
/**
* React to module events
* @param {Event} evt
*/
onModuleEvent : function (evt) {
if (evt.name == "messagesChanged") {
if (evt.domRef) {
ariaUtilsDom.scrollIntoView(evt.domRef);
}
this.$refresh();
}
},
/**
* Click on an error message
* @param {Event} evt Not used
* @param {Object} msg Error message
*/
clickOnMessage : function (evt, msg) {
this.moduleCtrl.focusField(msg);
},
/**
* Get the icon name for the current message type
* @return {String} Icon name
*/
getIcon : function () {
var messageTypes = this.data.messageTypes;
var res = this.DEFAULT_ICON;
var icons = this.ICONS;
for (var i = 0, l = icons.length; i < l; i++) {
var curIcon = icons[i];
if (messageTypes[curIcon.type] > 0) {
res = curIcon.icon;
break;
}
}
return res;
},
/**
* Get the message to be displayed as label of the error list item
* @param {Object} msg Error message
* @return {String} localized message
*/
getDisplayMessage : function (msg) {
if (this.data.displayCodes && (msg.code || msg.code === 0)) {
return msg.localizedMessage + " (" + msg.code + ")";
}
return msg.localizedMessage;
}
}
});
| lsimone/ariatemplates | src/aria/widgets/errorlist/ErrorListTemplateScript.js | JavaScript | apache-2.0 | 4,296 |
//
// UIDevice+ADJAdditions.h
// Adjust
//
// Created by Christian Wellenbrock on 23.07.12.
// Copyright (c) 2012-2014 adjust GmbH. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
#import "ADJActivityHandler.h"
@interface UIDevice(ADJAdditions)
- (BOOL)adjTrackingEnabled;
- (NSString *)adjIdForAdvertisers;
- (NSString *)adjFbAttributionId;
- (NSString *)adjMacAddress;
- (NSString *)adjDeviceType;
- (NSString *)adjDeviceName;
- (NSString *)adjCreateUuid;
- (NSString *)adjVendorId;
- (void)adjSetIad:(ADJActivityHandler *)activityHandler;
@end
| BlueRiverInteractive/robovm-ios-bindings | adjust/libproj/adjust/adjust/ADJAdditions/UIDevice+ADJAdditions.h | C | apache-2.0 | 587 |
/**
* Licensed to Apereo under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright ownership. Apereo
* licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the License at the
* following location:
*
* <p>http://www.apache.org/licenses/LICENSE-2.0
*
* <p>Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apereo.portal.security;
import org.apereo.portal.AuthorizationException;
/**
* An interface for retrieving <code>IAuthorizationPrincipals</code> and their <code>Permissions
* </code> on behalf of a <code>Permission</code> owner.
*/
public interface IPermissionManager {
/**
* Returns <code>IAuthorizationPrincipals</code> granted <code>Permissions</code> by the owner
* of this <code>IPermissionManager</code>, for the given <code>activity</code> and <code>target
* </code>. If either parameter is null, it is ignored.
*
* @return IAuthorizationPrincipal[]
* @param activity String - the Permission activity
* @param target String - the Permission target
*/
public IAuthorizationPrincipal[] getAuthorizedPrincipals(String activity, String target)
throws AuthorizationException;
/** @return java.lang.String */
public String getOwner();
/** Obtains all permissions records with the specified target. */
IPermission[] getPermissionsForTarget(String target);
/**
* Retrieve an array of IPermission objects based on the given parameters. Any null parameters
* will be ignored.
*
* @param activity String
* @param target String
* @return Permission[]
* @exception AuthorizationException
*/
public IPermission[] getPermissions(String activity, String target)
throws AuthorizationException;
}
| ChristianMurphy/uPortal | uPortal-security/uPortal-security-core/src/main/java/org/apereo/portal/security/IPermissionManager.java | Java | apache-2.0 | 2,232 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.connector.elasticsearch.sink;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.util.TestLoggerExtension;
import org.apache.http.HttpHost;
import org.junit.jupiter.api.DynamicTest;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestFactory;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.extension.ExtendWith;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertThrows;
/** Tests for {@link ElasticsearchSinkBuilderBase}. */
@ExtendWith(TestLoggerExtension.class)
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
abstract class ElasticsearchSinkBuilderBaseTest<B extends ElasticsearchSinkBuilderBase<Object, B>> {
@TestFactory
Stream<DynamicTest> testValidBuilders() {
Stream<B> validBuilders =
Stream.of(
createMinimalBuilder(),
createMinimalBuilder()
.setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE),
createMinimalBuilder()
.setBulkFlushBackoffStrategy(FlushBackoffType.CONSTANT, 1, 1),
createMinimalBuilder()
.setConnectionUsername("username")
.setConnectionPassword("password"));
return DynamicTest.stream(
validBuilders,
ElasticsearchSinkBuilderBase::toString,
builder -> assertDoesNotThrow(builder::build));
}
@Test
void testThrowIfExactlyOnceConfigured() {
assertThrows(
IllegalStateException.class,
() -> createMinimalBuilder().setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE));
}
@Test
void testThrowIfHostsNotSet() {
assertThrows(
NullPointerException.class,
() -> createEmptyBuilder().setEmitter((element, indexer, context) -> {}).build());
}
@Test
void testThrowIfEmitterNotSet() {
assertThrows(
NullPointerException.class,
() -> createEmptyBuilder().setHosts(new HttpHost("localhost:3000")).build());
}
@Test
void testThrowIfSetInvalidTimeouts() {
assertThrows(
IllegalStateException.class,
() -> createEmptyBuilder().setConnectionRequestTimeout(-1).build());
assertThrows(
IllegalStateException.class,
() -> createEmptyBuilder().setConnectionTimeout(-1).build());
assertThrows(
IllegalStateException.class,
() -> createEmptyBuilder().setSocketTimeout(-1).build());
}
abstract B createEmptyBuilder();
abstract B createMinimalBuilder();
}
| wwjiang007/flink | flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBuilderBaseTest.java | Java | apache-2.0 | 3,686 |
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.ui.trans.steps.addsequence;
import org.eclipse.swt.SWT;
import org.eclipse.swt.custom.CCombo;
import org.eclipse.swt.events.ModifyEvent;
import org.eclipse.swt.events.ModifyListener;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.events.ShellAdapter;
import org.eclipse.swt.events.ShellEvent;
import org.eclipse.swt.layout.FormAttachment;
import org.eclipse.swt.layout.FormData;
import org.eclipse.swt.layout.FormLayout;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.Group;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.MessageBox;
import org.eclipse.swt.widgets.Shell;
import org.eclipse.swt.widgets.Text;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.database.Database;
import org.pentaho.di.core.database.DatabaseMeta;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.trans.TransMeta;
import org.pentaho.di.trans.step.BaseStepMeta;
import org.pentaho.di.trans.step.StepDialogInterface;
import org.pentaho.di.trans.steps.addsequence.AddSequenceMeta;
import org.pentaho.di.ui.core.dialog.EnterSelectionDialog;
import org.pentaho.di.ui.core.dialog.ErrorDialog;
import org.pentaho.di.ui.core.widget.TextVar;
import org.pentaho.di.ui.trans.step.BaseStepDialog;
public class AddSequenceDialog extends BaseStepDialog implements StepDialogInterface {
private static Class<?> PKG = AddSequenceMeta.class; // for i18n purposes, needed by Translator2!!
private Label wlValuename;
private Text wValuename;
private Group gDatabase, gCounter;
private FormData fdDatabase, fdCounter;
private Label wlUseDatabase;
private Button wUseDatabase;
private Button wbSequence;
private FormData fdbSequence;
private Label wlConnection;
private CCombo wConnection;
private Button wbnConnection, wbeConnection, wbwConnection;
private Label wlSchema;
private TextVar wSchema;
private FormData fdbSchema;
private Button wbSchema;
private Label wlSeqname;
private TextVar wSeqname;
private Label wlUseCounter;
private Button wUseCounter;
private Label wlCounterName;
private Text wCounterName;
private Label wlStartAt;
private TextVar wStartAt;
private Label wlIncrBy;
private TextVar wIncrBy;
private Label wlMaxVal;
private TextVar wMaxVal;
private AddSequenceMeta input;
public AddSequenceDialog( Shell parent, Object in, TransMeta transMeta, String sname ) {
super( parent, (BaseStepMeta) in, transMeta, sname );
input = (AddSequenceMeta) in;
}
public String open() {
Shell parent = getParent();
Display display = parent.getDisplay();
shell = new Shell( parent, SWT.DIALOG_TRIM | SWT.RESIZE | SWT.MAX | SWT.MIN );
props.setLook( shell );
setShellImage( shell, input );
ModifyListener lsMod = new ModifyListener() {
public void modifyText( ModifyEvent e ) {
input.setChanged();
}
};
changed = input.hasChanged();
FormLayout formLayout = new FormLayout();
formLayout.marginWidth = Const.FORM_MARGIN;
formLayout.marginHeight = Const.FORM_MARGIN;
shell.setLayout( formLayout );
shell.setText( BaseMessages.getString( PKG, "AddSequenceDialog.Shell.Title" ) );
int middle = props.getMiddlePct();
int margin = Const.MARGIN;
// Stepname line
wlStepname = new Label( shell, SWT.RIGHT );
wlStepname.setText( BaseMessages.getString( PKG, "AddSequenceDialog.StepName.Label" ) );
props.setLook( wlStepname );
fdlStepname = new FormData();
fdlStepname.left = new FormAttachment( 0, 0 );
fdlStepname.right = new FormAttachment( middle, -margin );
fdlStepname.top = new FormAttachment( 0, margin );
wlStepname.setLayoutData( fdlStepname );
wStepname = new Text( shell, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
wStepname.setText( stepname );
props.setLook( wStepname );
wStepname.addModifyListener( lsMod );
fdStepname = new FormData();
fdStepname.left = new FormAttachment( middle, 0 );
fdStepname.top = new FormAttachment( 0, margin );
fdStepname.right = new FormAttachment( 100, 0 );
wStepname.setLayoutData( fdStepname );
// Valuename line
wlValuename = new Label( shell, SWT.RIGHT );
wlValuename.setText( BaseMessages.getString( PKG, "AddSequenceDialog.Valuename.Label" ) );
props.setLook( wlValuename );
FormData fdlValuename = new FormData();
fdlValuename.left = new FormAttachment( 0, 0 );
fdlValuename.top = new FormAttachment( wStepname, margin );
fdlValuename.right = new FormAttachment( middle, -margin );
wlValuename.setLayoutData( fdlValuename );
wValuename = new Text( shell, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
wValuename.setText( "" );
props.setLook( wValuename );
wValuename.addModifyListener( lsMod );
FormData fdValuename = new FormData();
fdValuename.left = new FormAttachment( middle, 0 );
fdValuename.top = new FormAttachment( wStepname, margin );
fdValuename.right = new FormAttachment( 100, 0 );
wValuename.setLayoutData( fdValuename );
gDatabase = new Group( shell, SWT.NONE );
gDatabase.setText( BaseMessages.getString( PKG, "AddSequenceDialog.UseDatabaseGroup.Label" ) );
FormLayout databaseLayout = new FormLayout();
databaseLayout.marginHeight = margin;
databaseLayout.marginWidth = margin;
gDatabase.setLayout( databaseLayout );
props.setLook( gDatabase );
fdDatabase = new FormData();
fdDatabase.left = new FormAttachment( 0, 0 );
fdDatabase.right = new FormAttachment( 100, 0 );
fdDatabase.top = new FormAttachment( wValuename, 2 * margin );
gDatabase.setLayoutData( fdDatabase );
wlUseDatabase = new Label( gDatabase, SWT.RIGHT );
wlUseDatabase.setText( BaseMessages.getString( PKG, "AddSequenceDialog.UseDatabase.Label" ) );
props.setLook( wlUseDatabase );
FormData fdlUseDatabase = new FormData();
fdlUseDatabase.left = new FormAttachment( 0, 0 );
fdlUseDatabase.top = new FormAttachment( 0, 0 );
fdlUseDatabase.right = new FormAttachment( middle, -margin );
wlUseDatabase.setLayoutData( fdlUseDatabase );
wUseDatabase = new Button( gDatabase, SWT.CHECK );
props.setLook( wUseDatabase );
wUseDatabase.setToolTipText( BaseMessages.getString( PKG, "AddSequenceDialog.UseDatabase.Tooltip" ) );
FormData fdUseDatabase = new FormData();
fdUseDatabase.left = new FormAttachment( middle, 0 );
fdUseDatabase.top = new FormAttachment( 0, 0 );
wUseDatabase.setLayoutData( fdUseDatabase );
wUseDatabase.addSelectionListener( new SelectionAdapter() {
public void widgetSelected( SelectionEvent e ) {
wUseCounter.setSelection( !wUseDatabase.getSelection() );
enableFields();
input.setChanged();
}
} );
// Connection line
wlConnection = new Label( gDatabase, SWT.RIGHT );
wbwConnection = new Button( gDatabase, SWT.PUSH );
wbnConnection = new Button( gDatabase, SWT.PUSH );
wbeConnection = new Button( gDatabase, SWT.PUSH );
wConnection =
addConnectionLine(
gDatabase, wUseDatabase, middle, margin, wlConnection, wbwConnection, wbnConnection, wbeConnection );
if ( input.getDatabase() == null && transMeta.nrDatabases() == 1 ) {
wConnection.select( 0 );
}
wConnection.addModifyListener( lsMod );
wConnection.addSelectionListener( new SelectionAdapter() {
public void widgetSelected( SelectionEvent e ) {
activeSequence();
}
} );
// Schema line...
wlSchema = new Label( gDatabase, SWT.RIGHT );
wlSchema.setText( BaseMessages.getString( PKG, "AddSequenceDialog.TargetSchema.Label" ) );
props.setLook( wlSchema );
FormData fdlSchema = new FormData();
fdlSchema.left = new FormAttachment( 0, 0 );
fdlSchema.right = new FormAttachment( middle, -margin );
fdlSchema.top = new FormAttachment( wConnection, 2 * margin );
wlSchema.setLayoutData( fdlSchema );
wbSchema = new Button( gDatabase, SWT.PUSH | SWT.CENTER );
props.setLook( wbSchema );
wbSchema.setText( BaseMessages.getString( PKG, "AddSequenceDialog.GetSchemas.Label" ) );
fdbSchema = new FormData();
fdbSchema.top = new FormAttachment( wConnection, 2 * margin );
fdbSchema.right = new FormAttachment( 100, 0 );
wbSchema.setLayoutData( fdbSchema );
wSchema = new TextVar( transMeta, gDatabase, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
props.setLook( wSchema );
wSchema.addModifyListener( lsMod );
FormData fdSchema = new FormData();
fdSchema.left = new FormAttachment( middle, 0 );
fdSchema.top = new FormAttachment( wConnection, 2 * margin );
fdSchema.right = new FormAttachment( wbSchema, -margin );
wSchema.setLayoutData( fdSchema );
// Seqname line
wlSeqname = new Label( gDatabase, SWT.RIGHT );
wlSeqname.setText( BaseMessages.getString( PKG, "AddSequenceDialog.Seqname.Label" ) );
props.setLook( wlSeqname );
FormData fdlSeqname = new FormData();
fdlSeqname.left = new FormAttachment( 0, 0 );
fdlSeqname.right = new FormAttachment( middle, -margin );
fdlSeqname.top = new FormAttachment( wbSchema, margin );
wlSeqname.setLayoutData( fdlSeqname );
wbSequence = new Button( gDatabase, SWT.PUSH | SWT.CENTER );
props.setLook( wbSequence );
wbSequence.setText( BaseMessages.getString( PKG, "AddSequenceDialog.GetSequences.Label" ) );
fdbSequence = new FormData();
fdbSequence.right = new FormAttachment( 100, -margin );
fdbSequence.top = new FormAttachment( wbSchema, margin );
wbSequence.setLayoutData( fdbSequence );
wSeqname = new TextVar( transMeta, gDatabase, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
wSeqname.setText( "" );
props.setLook( wSeqname );
wSeqname.addModifyListener( lsMod );
FormData fdSeqname = new FormData();
fdSeqname.left = new FormAttachment( middle, 0 );
fdSeqname.top = new FormAttachment( wbSchema, margin );
fdSeqname.right = new FormAttachment( wbSequence, -margin );
wSeqname.setLayoutData( fdSeqname );
gCounter = new Group( shell, SWT.NONE );
gCounter.setText( BaseMessages.getString( PKG, "AddSequenceDialog.UseCounterGroup.Label" ) );
FormLayout counterLayout = new FormLayout();
counterLayout.marginHeight = margin;
counterLayout.marginWidth = margin;
gCounter.setLayout( counterLayout );
props.setLook( gCounter );
fdCounter = new FormData();
fdCounter.left = new FormAttachment( 0, 0 );
fdCounter.right = new FormAttachment( 100, 0 );
fdCounter.top = new FormAttachment( gDatabase, 2 * margin );
gCounter.setLayoutData( fdCounter );
wlUseCounter = new Label( gCounter, SWT.RIGHT );
wlUseCounter.setText( BaseMessages.getString( PKG, "AddSequenceDialog.UseCounter.Label" ) );
props.setLook( wlUseCounter );
FormData fdlUseCounter = new FormData();
fdlUseCounter.left = new FormAttachment( 0, 0 );
fdlUseCounter.top = new FormAttachment( wSeqname, margin );
fdlUseCounter.right = new FormAttachment( middle, -margin );
wlUseCounter.setLayoutData( fdlUseCounter );
wUseCounter = new Button( gCounter, SWT.CHECK );
props.setLook( wUseCounter );
wUseCounter.setToolTipText( BaseMessages.getString( PKG, "AddSequenceDialog.UseCounter.Tooltip" ) );
FormData fdUseCounter = new FormData();
fdUseCounter.left = new FormAttachment( middle, 0 );
fdUseCounter.top = new FormAttachment( wSeqname, margin );
wUseCounter.setLayoutData( fdUseCounter );
wUseCounter.addSelectionListener( new SelectionAdapter() {
public void widgetSelected( SelectionEvent e ) {
wUseDatabase.setSelection( !wUseCounter.getSelection() );
enableFields();
input.setChanged();
}
} );
// CounterName line
wlCounterName = new Label( gCounter, SWT.RIGHT );
wlCounterName.setText( BaseMessages.getString( PKG, "AddSequenceDialog.CounterName.Label" ) );
props.setLook( wlCounterName );
FormData fdlCounterName = new FormData();
fdlCounterName.left = new FormAttachment( 0, 0 );
fdlCounterName.right = new FormAttachment( middle, -margin );
fdlCounterName.top = new FormAttachment( wUseCounter, margin );
wlCounterName.setLayoutData( fdlCounterName );
wCounterName = new Text( gCounter, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
wCounterName.setText( "" );
props.setLook( wCounterName );
wCounterName.addModifyListener( lsMod );
FormData fdCounterName = new FormData();
fdCounterName.left = new FormAttachment( middle, 0 );
fdCounterName.top = new FormAttachment( wUseCounter, margin );
fdCounterName.right = new FormAttachment( 100, 0 );
wCounterName.setLayoutData( fdCounterName );
// StartAt line
wlStartAt = new Label( gCounter, SWT.RIGHT );
wlStartAt.setText( BaseMessages.getString( PKG, "AddSequenceDialog.StartAt.Label" ) );
props.setLook( wlStartAt );
FormData fdlStartAt = new FormData();
fdlStartAt.left = new FormAttachment( 0, 0 );
fdlStartAt.right = new FormAttachment( middle, -margin );
fdlStartAt.top = new FormAttachment( wCounterName, margin );
wlStartAt.setLayoutData( fdlStartAt );
wStartAt = new TextVar( transMeta, gCounter, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
wStartAt.setText( "" );
props.setLook( wStartAt );
wStartAt.addModifyListener( lsMod );
FormData fdStartAt = new FormData();
fdStartAt.left = new FormAttachment( middle, 0 );
fdStartAt.top = new FormAttachment( wCounterName, margin );
fdStartAt.right = new FormAttachment( 100, 0 );
wStartAt.setLayoutData( fdStartAt );
// IncrBy line
wlIncrBy = new Label( gCounter, SWT.RIGHT );
wlIncrBy.setText( BaseMessages.getString( PKG, "AddSequenceDialog.IncrBy.Label" ) );
props.setLook( wlIncrBy );
FormData fdlIncrBy = new FormData();
fdlIncrBy.left = new FormAttachment( 0, 0 );
fdlIncrBy.right = new FormAttachment( middle, -margin );
fdlIncrBy.top = new FormAttachment( wStartAt, margin );
wlIncrBy.setLayoutData( fdlIncrBy );
wIncrBy = new TextVar( transMeta, gCounter, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
wIncrBy.setText( "" );
props.setLook( wIncrBy );
wIncrBy.addModifyListener( lsMod );
FormData fdIncrBy = new FormData();
fdIncrBy.left = new FormAttachment( middle, 0 );
fdIncrBy.top = new FormAttachment( wStartAt, margin );
fdIncrBy.right = new FormAttachment( 100, 0 );
wIncrBy.setLayoutData( fdIncrBy );
// MaxVal line
wlMaxVal = new Label( gCounter, SWT.RIGHT );
wlMaxVal.setText( BaseMessages.getString( PKG, "AddSequenceDialog.MaxVal.Label" ) );
props.setLook( wlMaxVal );
FormData fdlMaxVal = new FormData();
fdlMaxVal.left = new FormAttachment( 0, 0 );
fdlMaxVal.right = new FormAttachment( middle, -margin );
fdlMaxVal.top = new FormAttachment( wIncrBy, margin );
wlMaxVal.setLayoutData( fdlMaxVal );
wMaxVal = new TextVar( transMeta, gCounter, SWT.SINGLE | SWT.LEFT | SWT.BORDER );
wMaxVal.setText( "" );
props.setLook( wMaxVal );
wMaxVal.addModifyListener( lsMod );
FormData fdMaxVal = new FormData();
fdMaxVal.left = new FormAttachment( middle, 0 );
fdMaxVal.top = new FormAttachment( wIncrBy, margin );
fdMaxVal.right = new FormAttachment( 100, 0 );
wMaxVal.setLayoutData( fdMaxVal );
wbSequence.addSelectionListener( new SelectionAdapter() {
public void widgetSelected( SelectionEvent e ) {
getSequences();
}
} );
wbSchema.addSelectionListener( new SelectionAdapter() {
public void widgetSelected( SelectionEvent e ) {
getSchemaNames();
}
} );
// THE BUTTONS
wOK = new Button( shell, SWT.PUSH );
wOK.setText( BaseMessages.getString( PKG, "System.Button.OK" ) );
wCancel = new Button( shell, SWT.PUSH );
wCancel.setText( BaseMessages.getString( PKG, "System.Button.Cancel" ) );
setButtonPositions( new Button[] { wOK, wCancel }, margin, gCounter );
// Add listeners
lsOK = new Listener() {
public void handleEvent( Event e ) {
ok();
}
};
lsCancel = new Listener() {
public void handleEvent( Event e ) {
cancel();
}
};
wOK.addListener( SWT.Selection, lsOK );
wCancel.addListener( SWT.Selection, lsCancel );
lsDef = new SelectionAdapter() {
public void widgetDefaultSelected( SelectionEvent e ) {
ok();
}
};
wStepname.addSelectionListener( lsDef );
wValuename.addSelectionListener( lsDef );
wSchema.addSelectionListener( lsDef );
wSeqname.addSelectionListener( lsDef );
wStartAt.addSelectionListener( lsDef );
wIncrBy.addSelectionListener( lsDef );
wMaxVal.addSelectionListener( lsDef );
wCounterName.addSelectionListener( lsDef );
// Detect X or ALT-F4 or something that kills this window...
shell.addShellListener( new ShellAdapter() {
public void shellClosed( ShellEvent e ) {
cancel();
}
} );
// Set the shell size, based upon previous time...
setSize();
getData();
input.setChanged( changed );
shell.open();
while ( !shell.isDisposed() ) {
if ( !display.readAndDispatch() ) {
display.sleep();
}
}
return stepname;
}
public void enableFields() {
boolean useDatabase = wUseDatabase.getSelection();
boolean useCounter = wUseCounter.getSelection();
wbSchema.setEnabled( useDatabase );
wlConnection.setEnabled( useDatabase );
wConnection.setEnabled( useDatabase );
wbwConnection.setEnabled( useDatabase );
wbnConnection.setEnabled( useDatabase );
wbeConnection.setEnabled( useDatabase );
wlSchema.setEnabled( useDatabase );
wSchema.setEnabled( useDatabase );
wlSeqname.setEnabled( useDatabase );
wSeqname.setEnabled( useDatabase );
wlCounterName.setEnabled( useCounter );
wCounterName.setEnabled( useCounter );
wlStartAt.setEnabled( useCounter );
wStartAt.setEnabled( useCounter );
wlIncrBy.setEnabled( useCounter );
wIncrBy.setEnabled( useCounter );
wlMaxVal.setEnabled( useCounter );
wMaxVal.setEnabled( useCounter );
activeSequence();
}
/**
* Copy information from the meta-data input to the dialog fields.
*/
public void getData() {
logDebug( BaseMessages.getString( PKG, "AddSequenceDialog.Log.GettingKeyInfo" ) );
if ( input.getValuename() != null ) {
wValuename.setText( input.getValuename() );
}
wUseDatabase.setSelection( input.isDatabaseUsed() );
if ( input.getDatabase() != null ) {
wConnection.setText( input.getDatabase().getName() );
} else if ( transMeta.nrDatabases() == 1 ) {
wConnection.setText( transMeta.getDatabase( 0 ).getName() );
}
if ( input.getSchemaName() != null ) {
wSchema.setText( input.getSchemaName() );
}
if ( input.getSequenceName() != null ) {
wSeqname.setText( input.getSequenceName() );
}
wUseCounter.setSelection( input.isCounterUsed() );
wCounterName.setText( Const.NVL( input.getCounterName(), "" ) );
wStartAt.setText( input.getStartAt() );
wIncrBy.setText( input.getIncrementBy() );
wMaxVal.setText( input.getMaxValue() );
enableFields();
wStepname.selectAll();
wStepname.setFocus();
}
private void cancel() {
stepname = null;
input.setChanged( changed );
dispose();
}
private void ok() {
if ( Const.isEmpty( wStepname.getText() ) ) {
return;
}
stepname = wStepname.getText(); // return value
input.setUseCounter( wUseCounter.getSelection() );
input.setUseDatabase( wUseDatabase.getSelection() );
String connection = wConnection.getText();
input.setDatabase( transMeta.findDatabase( connection ) );
input.setSchemaName( wSchema.getText() );
input.setSequenceName( wSeqname.getText() );
input.setValuename( wValuename.getText() );
input.setCounterName( wCounterName.getText() );
input.setStartAt( wStartAt.getText() );
input.setIncrementBy( wIncrBy.getText() );
input.setMaxValue( wMaxVal.getText() );
if ( input.isDatabaseUsed() && transMeta.findDatabase( connection ) == null ) {
MessageBox mb = new MessageBox( shell, SWT.OK | SWT.ICON_ERROR );
mb.setMessage( BaseMessages.getString( PKG, "AddSequenceDialog.NoValidConnectionError.DialogMessage" ) );
mb.setText( BaseMessages.getString( PKG, "AddSequenceDialog.NoValidConnectionError.DialogTitle" ) );
mb.open();
}
dispose();
}
private void activeSequence() {
boolean useDatabase = wUseDatabase.getSelection();
DatabaseMeta databaseMeta = transMeta.findDatabase( wConnection.getText() );
wbSequence.setEnabled( databaseMeta == null ? false : useDatabase && databaseMeta.supportsSequences() );
}
private void getSequences() {
DatabaseMeta databaseMeta = transMeta.findDatabase( wConnection.getText() );
if ( databaseMeta != null ) {
Database database = new Database( loggingObject, databaseMeta );
try {
database.connect();
String[] sequences = database.getSequences();
if ( null != sequences && sequences.length > 0 ) {
sequences = Const.sortStrings( sequences );
EnterSelectionDialog dialog =
new EnterSelectionDialog( shell, sequences,
BaseMessages.getString( PKG, "AddSequenceDialog.SelectSequence.Title", wConnection.getText() ),
BaseMessages.getString( PKG, "AddSequenceDialog.SelectSequence.Message" ) );
String d = dialog.open();
if ( d != null ) {
wSeqname.setText( Const.NVL( d.toString(), "" ) );
}
} else {
MessageBox mb = new MessageBox( shell, SWT.OK | SWT.ICON_ERROR );
mb.setMessage( BaseMessages.getString( PKG, "AddSequenceDialog.NoSequence.Message" ) );
mb.setText( BaseMessages.getString( PKG, "AddSequenceDialog.NoSequence.Title" ) );
mb.open();
}
} catch ( Exception e ) {
new ErrorDialog( shell, BaseMessages.getString( PKG, "System.Dialog.Error.Title" ), BaseMessages
.getString( PKG, "AddSequenceDialog.ErrorGettingSequences" ), e );
} finally {
if ( database != null ) {
database.disconnect();
database = null;
}
}
}
}
private void getSchemaNames() {
if ( wSchema.isDisposed() ) {
return;
}
DatabaseMeta databaseMeta = transMeta.findDatabase( wConnection.getText() );
if ( databaseMeta != null ) {
Database database = new Database( loggingObject, databaseMeta );
try {
database.connect();
String[] schemas = database.getSchemas();
if ( null != schemas && schemas.length > 0 ) {
schemas = Const.sortStrings( schemas );
EnterSelectionDialog dialog =
new EnterSelectionDialog( shell, schemas,
BaseMessages.getString( PKG, "AddSequenceDialog.SelectSequence.Title", wConnection.getText() ),
BaseMessages.getString( PKG, "AddSequenceDialog.SelectSequence.Message" ) );
String d = dialog.open();
if ( d != null ) {
wSchema.setText( Const.NVL( d.toString(), "" ) );
}
} else {
MessageBox mb = new MessageBox( shell, SWT.OK | SWT.ICON_ERROR );
mb.setMessage( BaseMessages.getString( PKG, "AddSequenceDialog.NoSchema.Message" ) );
mb.setText( BaseMessages.getString( PKG, "AddSequenceDialog.NoSchema.Title" ) );
mb.open();
}
} catch ( Exception e ) {
new ErrorDialog( shell, BaseMessages.getString( PKG, "System.Dialog.Error.Title" ), BaseMessages
.getString( PKG, "AddSequenceDialog.ErrorGettingSchemas" ), e );
} finally {
if ( database != null ) {
database.disconnect();
database = null;
}
}
}
}
}
| tgf/pentaho-kettle | ui/src/org/pentaho/di/ui/trans/steps/addsequence/AddSequenceDialog.java | Java | apache-2.0 | 24,982 |
define(
({
_widgetLabel: "موقعي",
title: "العثور على الموقع الخاص بي",
browserError: "الموقع الجغرافي غير مدعوم من قبل هذا المستعرض.",
failureFinding: "يتعذر إيجاد الموقع. يجرى التحقق من المتصفح للتأكد من أن الموقع قد تم مشاركته."
})
); | cob222/CPG | widgets/MyLocation/nls/ar/strings.js | JavaScript | apache-2.0 | 387 |
package com.pedrogomez.renderers;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.pedrogomez.renderers.exception.NotInflateViewException;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.Spy;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Test created to check the correctness of Renderer<T>
*
* @author Pedro Vicente Gómez Sánchez.
*/
public class RendererTest {
@Spy private ObjectRenderer renderer;
@Mock private Object mockedContent;
@Mock private LayoutInflater mockedLayoutInflater;
@Mock private ViewGroup mockedParent;
@Mock private View mockedView;
@Before public void setUp() {
initializeRenderer();
initializeMocks();
}
@Test public void shouldKeepTheContentAfterOnCreateCall() {
givenARendererInflatingView(mockedView);
onCreateRenderer();
assertEquals(mockedContent, renderer.getContent());
}
@Test public void shouldInflateViewUsingLayoutInflaterAndParentAfterOnCreateCall() {
givenARendererInflatingView(mockedView);
onCreateRenderer();
verify(renderer).inflate(mockedLayoutInflater, mockedParent);
}
@Test(expected = NotInflateViewException.class)
public void shouldThrowExceptionIfInflateReturnsAnEmptyViewAfterOnCreateCall() {
givenArendererInflatingANullView();
onCreateRenderer();
}
@Test public void shouldAssociateTheRendererToTheRootViewTagAfterOnCreateCall() {
givenARendererInflatingView(mockedView);
onCreateRenderer();
verify(mockedView).setTag(renderer);
}
@Test public void shouldSetUpViewWithTheInflatedViewAfterOnCreateCall() {
givenARendererInflatingView(mockedView);
onCreateRenderer();
verify(renderer).setUpView(mockedView);
}
@Test public void shouldHookListenersViewWithTheInflatedViewAfterOnCreateCall() {
givenARendererInflatingView(mockedView);
onCreateRenderer();
verify(renderer).hookListeners(mockedView);
}
@Test public void shouldKeepTheContentAfterOnRecycleCall() {
givenARendererInflatingView(mockedView);
onRecycleRenderer();
assertEquals(mockedContent, renderer.getContent());
}
private void initializeRenderer() {
renderer = new ObjectRenderer();
}
private void initializeMocks() {
MockitoAnnotations.initMocks(this);
}
private void onCreateRenderer() {
renderer.onCreate(mockedContent, mockedLayoutInflater, mockedParent);
}
private void onRecycleRenderer() {
renderer.onRecycle(mockedContent);
}
private void givenArendererInflatingANullView() {
givenARendererInflatingView(null);
}
private void givenARendererInflatingView(View view) {
when(renderer.inflate(mockedLayoutInflater, mockedParent)).thenReturn(view);
}
}
| tmexcept/Renderers-Eclipse | renderers/test/com/pedrogomez/renderers/RendererTest.java | Java | apache-2.0 | 2,913 |
cask 'komodo-edit' do
version '10.0.1-17276'
sha256 '6ff89c1c0a43b16900889da6d7c6b7988f9aa82d04623bfc15c0b95b6e7fc591'
# activestate.com/Komodo was verified as official when first introduced to the cask
url "https://downloads.activestate.com/Komodo/releases/#{version.sub(%r{-.*}, '')}/Komodo-Edit-#{version}-macosx-x86_64.dmg"
name 'Komodo Edit'
homepage 'https://komodoide.com/komodo-edit/'
app "Komodo Edit #{version.major}.app"
end
| decrement/homebrew-cask | Casks/komodo-edit.rb | Ruby | bsd-2-clause | 452 |
import os
from flask import Flask, render_template_string, request
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask_user import roles_required
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///single_file_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', 'email@example.com')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <noreply@example.com>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
mail = Mail(app) # Initialize Flask-Mail
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
# Define the User data model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user_auth = db.relationship('UserAuth', uselist=False)
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define the UserAuth data model.
class UserAuth(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user = db.relationship('User', uselist=False)
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserAuthClass=UserAuth)
user_manager = UserManager(db_adapter, app)
# Create 'user007' user with 'secret' and 'agent' roles
if not UserAuth.query.filter(UserAuth.username=='user007').first():
user1 = User(email='user007@example.com', first_name='James', last_name='Bond', active=True)
db.session.add(user1)
user_auth1 = UserAuth(user=user1, username='user007',
password=user_manager.hash_password('Password1')
)
db.session.add(user_auth1)
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.commit()
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Special page requires a user with 'special' and 'sauce' roles or with 'special' and 'agent' roles.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Special Page</h2>
<p>This page can only be accessed by user007.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
| jamescarignan/Flask-User | example_apps/user_auth_app.py | Python | bsd-2-clause | 6,986 |
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate an import library for its dll
# - create a def-file for python??.dll
# - create an import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
import os
import sys
import copy
from subprocess import Popen, PIPE, check_output
import re
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils import log
from distutils.version import LooseVersion
from distutils.spawn import find_executable
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
""" Handles the Cygwin port of the GNU C compiler to Windows.
"""
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compiles the source by spawning GCC and windres if needed."""
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link the objects."""
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KiB < stripped_file < ??100KiB
# unstripped_file = stripped_file + XXX KiB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
# -- Miscellaneous methods -----------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""Adds supports for rc and res files."""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
# these need to be compiled to object files
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(CygwinCCompiler):
""" Handles the Mingw32 port of the GNU C compiler to Windows.
"""
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if is_cygwingcc():
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
self.set_executables(compiler='gcc -O -Wall',
compiler_so='gcc -mdll -O -Wall',
compiler_cxx='g++ -O -Wall',
linker_exe='gcc',
linker_so='%s %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using an unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation appears amenable to building
extensions with GCC.
Returns a tuple (status, details), where 'status' is one of the following
constants:
- CONFIG_H_OK: all is well, go ahead and compile
- CONFIG_H_NOTOK: doesn't look good
- CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
def _find_exe_version(cmd):
"""Find the version of an executable by running `cmd` in the shell.
If the command is not found, or the output does not match
`RE_VERSION`, returns None.
"""
executable = cmd.split()[0]
if find_executable(executable) is None:
return None
out = Popen(cmd, shell=True, stdout=PIPE).stdout
try:
out_string = out.read()
finally:
out.close()
result = RE_VERSION.search(out_string)
if result is None:
return None
# LooseVersion works with strings
# so we need to decode our bytes
return LooseVersion(result.group(1).decode())
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out_string = check_output(['gcc', '-dumpmachine'])
return out_string.strip().endswith(b'cygwin')
| xyuanmu/XX-Net | python3.8.2/Lib/distutils/cygwinccompiler.py | Python | bsd-2-clause | 16,478 |
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_EXTENSIONS_APP_NOTIFICATION_STORAGE_H__
#define CHROME_BROWSER_EXTENSIONS_APP_NOTIFICATION_STORAGE_H__
#pragma once
#include <set>
#include "chrome/browser/extensions/app_notification.h"
class FilePath;
// Represents storage for app notifications for a particular extension id.
//
// IMPORTANT NOTE: Instances of this class should only be used on the FILE
// thread.
class AppNotificationStorage {
public:
// Must be called on the FILE thread. The storage will be created at |path|.
static AppNotificationStorage* Create(const FilePath& path);
virtual ~AppNotificationStorage();
// Get the set of extension id's that have entries, putting them into
// |result|.
virtual bool GetExtensionIds(std::set<std::string>* result) = 0;
// Gets the list of stored notifications for extension_id. On success, writes
// results into |result|. On error, returns false.
virtual bool Get(const std::string& extension_id,
AppNotificationList* result) = 0;
// Writes the |list| for |extension_id| into storage.
virtual bool Set(const std::string& extension_id,
const AppNotificationList& list) = 0;
// Deletes all data for |extension_id|.
virtual bool Delete(const std::string& extension_id) = 0;
};
#endif // CHROME_BROWSER_EXTENSIONS_APP_NOTIFICATION_STORAGE_H__
| aYukiSekiguchi/ACCESS-Chromium | chrome/browser/extensions/app_notification_storage.h | C | bsd-3-clause | 1,517 |
/**
* Copyright 2013-2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @emails oncall+relay
*/
'use strict';
jest
.dontMock('GraphQLMutatorConstants')
.dontMock('GraphQLRange')
.dontMock('GraphQLSegment')
.mock('warning');
var RelayTestUtils = require('RelayTestUtils');
RelayTestUtils.unmockRelay();
var Relay = require('Relay');
var RelayConnectionInterface = require('RelayConnectionInterface');
var RelayQueryTracker = require('RelayQueryTracker');
var RelayChangeTracker = require('RelayChangeTracker');
var RelayMutationType = require('RelayMutationType');
var RelayQueryWriter = require('RelayQueryWriter');
var GraphQLMutatorConstants = require('GraphQLMutatorConstants');
var generateClientEdgeID = require('generateClientEdgeID');
var generateRQLFieldAlias = require('generateRQLFieldAlias');
var writeRelayUpdatePayload = require('writeRelayUpdatePayload');
describe('writePayload()', () => {
var RelayRecordStore;
var {getNode, writePayload} = RelayTestUtils;
beforeEach(() => {
jest.resetModuleRegistry();
RelayRecordStore = require('RelayRecordStore');
jest.addMatchers(RelayTestUtils.matchers);
});
describe('range delete mutations', () => {
var store, queueStore, commentID, connectionID, edgeID;
beforeEach(() => {
var records = {};
var queuedRecords = {};
var nodeConnectionMap = {};
var rootCallMaps = {rootCallMap: {}};
commentID = '123';
store = new RelayRecordStore(
{records},
rootCallMaps,
nodeConnectionMap
);
queueStore = new RelayRecordStore(
{records, queuedRecords},
rootCallMaps,
nodeConnectionMap,
undefined /* cacheManager */,
'mutationID'
);
var query = getNode(Relay.QL`
query {
node(id:"feedback_id") {
topLevelComments(first:"1") {
count,
edges {
node {
id,
},
},
},
}
}
`);
var payload = {
feedback_id: {
id: 'feedback_id',
[generateRQLFieldAlias('topLevelComments.first(1)')]: {
count: 1,
edges: [
{
cursor: commentID + ':cursor',
node: {
id: commentID,
},
},
],
},
},
};
writePayload(store, query, payload);
connectionID = store.getLinkedRecordID(
'feedback_id',
'topLevelComments'
);
edgeID = generateClientEdgeID(connectionID, commentID);
});
it('optimistically removes range edges', () => {
// create the mutation and payload
var input = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
deletedCommentId: commentID,
};
var mutation = getNode(Relay.QL`
mutation {
commentDelete(input:$input) {
deletedCommentId,
feedback {
topLevelComments {
count,
},
},
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.RANGE_DELETE,
deletedIDFieldName: 'deletedCommentId',
pathToConnection: ['feedback', 'topLevelComments'],
}];
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
deletedCommentId: commentID,
feedback: {
id: 'feedback_id',
topLevelComments: {
count: 0,
},
},
};
// write to the queued store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
queueStore,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: true}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {},
updated: {
[connectionID]: true, // range edge deleted & count changed
[edgeID]: true, // edge deleted
// `commentID` is not modified
},
});
expect(queueStore.getField(connectionID, 'count')).toBe(0);
expect(queueStore.getRecordState(edgeID)).toBe('NONEXISTENT');
expect(queueStore.getRecordState(commentID)).toBe('EXISTENT');
// the range no longer returns this edge
expect(queueStore.getRangeMetadata(
connectionID,
[{name: 'first', value: '1'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([]);
expect(store.getField(connectionID, 'count')).toBe(1);
expect(store.getRecordState(edgeID)).toBe('EXISTENT');
// the range still contains this edge
expect(store.getRangeMetadata(
connectionID,
[{name: 'first', value: '1'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([
edgeID,
]);
});
it('non-optimistically removes range edges', () => {
// create the mutation and payload
var input = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
deletedCommentId: commentID,
};
var mutation = getNode(Relay.QL`
mutation {
commentDelete(input:$input) {
deletedCommentId,
feedback {
topLevelComments {
count,
},
},
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.RANGE_DELETE,
deletedIDFieldName: 'deletedCommentId',
pathToConnection: ['feedback', 'topLevelComments'],
}];
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
deletedCommentId: commentID,
feedback: {
id: 'feedback_id',
topLevelComments: {
count: 0,
},
},
};
// write to the queued store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
store,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: false}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {},
updated: {
[connectionID]: true, // range edge deleted & count changed
[edgeID]: true, // edge deleted
// `commentID` is not modified
},
});
expect(store.getField(connectionID, 'count')).toBe(0);
expect(store.getRecordState(edgeID)).toBe('NONEXISTENT');
expect(store.getRecordState(commentID)).toBe('EXISTENT');
// the range no longer returns this edge
expect(store.getRangeMetadata(
connectionID,
[{name: 'first', value: '1'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([]);
});
});
describe('node/range delete mutations', () => {
var store, queueStore, feedbackID, connectionID, firstCommentID,
secondCommentID, firstEdgeID, secondEdgeID;
beforeEach(() => {
var records = {};
var queuedRecords = {};
var nodeConnectionMap = {};
var rootCallMaps = {rootCallMap: {}};
feedbackID = 'feedback123';
firstCommentID = 'comment456';
secondCommentID = 'comment789';
store = new RelayRecordStore(
{records},
rootCallMaps,
nodeConnectionMap
);
queueStore = new RelayRecordStore(
{records, queuedRecords},
rootCallMaps,
nodeConnectionMap,
undefined /* cacheManager */,
'mutationID'
);
var query = getNode(Relay.QL`
query {
node(id:"feedback123") {
topLevelComments(first:"1") {
count,
edges {
node {
id
}
}
}
}
}
`);
var alias = generateRQLFieldAlias('topLevelComments.first(1)');
var payload = {
feedback123: {
id: feedbackID,
[alias]: {
count: 1,
edges: [
{
cursor: firstCommentID + ':cursor',
node: {
id: firstCommentID,
},
},
{
cursor: secondCommentID + ':cursor',
node: {
id: secondCommentID,
},
},
],
},
},
};
writePayload(store, query, payload);
connectionID = store.getLinkedRecordID(feedbackID, 'topLevelComments');
firstEdgeID = generateClientEdgeID(connectionID, firstCommentID);
secondEdgeID = generateClientEdgeID(connectionID, secondCommentID);
});
it('optimistically deletes comments', () => {
// create the mutation and payload
var input = {
actor_id: 'actor:123',
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
deletedCommentId: firstCommentID,
};
var mutation = getNode(Relay.QL`
mutation {
commentDelete(input:$input) {
deletedCommentId,
feedback {
id,
topLevelComments {
count
}
}
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.NODE_DELETE,
deletedIDFieldName: 'deletedCommentId',
}];
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
deletedCommentId: firstCommentID,
feedback: {
id: feedbackID,
topLevelComments: {
count: 0,
},
},
};
// write to the queued store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
queueStore,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: true}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {},
updated: {
[connectionID]: true, // range item deleted & count changed
[firstEdgeID]: true, // edge deleted
[firstCommentID]: true, // node deleted
},
});
// node is deleted
expect(queueStore.getRecordState(firstCommentID)).toBe('NONEXISTENT');
expect(queueStore.getRecordState(secondCommentID)).toBe('EXISTENT');
// corresponding edge is deleted for every range this node appears in
expect(queueStore.getRecordState(firstEdgeID)).toBe('NONEXISTENT');
expect(queueStore.getRecordState(secondEdgeID)).toBe('EXISTENT');
// the range no longer returns this edge
expect(queueStore.getRangeMetadata(
connectionID,
[{name: 'first', value: '2'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([
secondEdgeID,
]);
// connection metadata is merged into the queued store
expect(queueStore.getField(connectionID, 'count')).toBe(0);
// base records are not modified: node & edge exist, the edge is still
// in the range, and the connection metadata is unchanged
expect(store.getRecordState(firstCommentID)).toBe('EXISTENT');
expect(store.getRecordState(secondCommentID)).toBe('EXISTENT');
expect(store.getRecordState(firstEdgeID)).toBe('EXISTENT');
expect(store.getRecordState(secondEdgeID)).toBe('EXISTENT');
expect(store.getField(connectionID, 'count')).toBe(1);
expect(store.getRangeMetadata(
connectionID,
[{name: 'first', value: '2'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([
firstEdgeID,
secondEdgeID,
]);
});
it('non-optimistically deletes comments', () => {
// create the mutation and payload
var input = {
actor_id: 'actor:123',
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
deletedCommentId: firstCommentID,
};
var mutation = getNode(Relay.QL`
mutation {
commentDelete(input:$input) {
deletedCommentId,
feedback {
id,
topLevelComments {
count
}
}
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.NODE_DELETE,
deletedIDFieldName: 'deletedCommentId',
}];
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
deletedCommentId: firstCommentID,
feedback: {
id: feedbackID,
topLevelComments: {
count: 0,
},
},
};
// write to the base store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
store,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: false}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {},
updated: {
[connectionID]: true, // range item deleted & count changed
[firstEdgeID]: true, // edge deleted
[firstCommentID]: true, // node deleted
},
});
// node is deleted
expect(store.getRecordState(firstCommentID)).toBe('NONEXISTENT');
expect(store.getRecordState(secondCommentID)).toBe('EXISTENT');
// corresponding edge is deleted for every range this node appears in
expect(store.getRecordState(firstEdgeID)).toBe('NONEXISTENT');
expect(store.getRecordState(secondEdgeID)).toBe('EXISTENT');
// the range no longer returns this edge
expect(store.getRangeMetadata(
connectionID,
[{name: 'first', value: '1'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([
secondEdgeID,
]);
// connection metadata is merged into the queued store
expect(store.getField(connectionID, 'count')).toBe(0);
});
});
describe('plural node delete mutation', () => {
var store, queueStore, firstRequestID, secondRequestID, thirdRequestID;
beforeEach(() => {
var records = {};
var queuedRecords = {};
var rootCallMaps = {rootCallMap: {}};
firstRequestID = 'request1';
secondRequestID = 'request2';
thirdRequestID = 'request3';
store = new RelayRecordStore(
{records},
rootCallMaps,
{}
);
queueStore = new RelayRecordStore(
{records, queuedRecords},
rootCallMaps,
{},
undefined /* cacheManager */,
'mutationID'
);
var query = getNode(Relay.QL`
query {
nodes(ids:["request1","request2","request3"]) {
id
}
}
`);
var payload = {
request1: {
id: firstRequestID,
},
request2: {
id: secondRequestID,
},
request3: {
id: secondRequestID,
},
};
writePayload(store, query, payload);
});
it('optimistically deletes requests', () => {
// create the mutation and payload
var input = {
actor_id: 'actor:123',
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
deletedRequestIds: [firstRequestID, secondRequestID],
};
var mutation = getNode(Relay.QL`
mutation {
applicationRequestDeleteAll(input:$input) {
deletedRequestIds,
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.NODE_DELETE,
deletedIDFieldName: 'deletedRequestIds',
}];
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
deletedRequestIds: [firstRequestID, secondRequestID],
};
// write to the queued store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
queueStore,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: true}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {},
updated: {
[firstRequestID]: true, // node deleted
[secondRequestID]: true, // node deleted
},
});
// node is deleted
expect(queueStore.getRecordState(firstRequestID)).toBe('NONEXISTENT');
expect(queueStore.getRecordState(secondRequestID)).toBe('NONEXISTENT');
// third node is not deleted
expect(queueStore.getRecordState(thirdRequestID)).toBe('EXISTENT');
// base records are not modified: node & edge exist, the edge is still
// in the range, and the connection metadata is unchanged
expect(store.getRecordState(firstRequestID)).toBe('EXISTENT');
expect(store.getRecordState(secondRequestID)).toBe('EXISTENT');
expect(store.getRecordState(thirdRequestID)).toBe('EXISTENT');
});
it('non-optimistically deletes requests', () => {
// create the mutation and payload
var input = {
actor_id: 'actor:123',
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
deletedRequestIds: [firstRequestID, secondRequestID],
};
var mutation = getNode(Relay.QL`
mutation {
applicationRequestDeleteAll(input:$input) {
deletedRequestIds,
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.NODE_DELETE,
deletedIDFieldName: 'deletedRequestIds',
}];
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
deletedRequestIds: [firstRequestID, secondRequestID]
};
// write to the base store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
store,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: false}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {},
updated: {
[firstRequestID]: true, // node deleted
[secondRequestID]: true,
},
});
// node is deleted
expect(store.getRecordState(firstRequestID)).toBe('NONEXISTENT');
expect(store.getRecordState(secondRequestID)).toBe('NONEXISTENT');
// third node is not deleted
expect(store.getRecordState(thirdRequestID)).toBe('EXISTENT');
});
});
describe('range add mutations', () => {
var store, queueStore, feedbackID, connectionID, commentID, edgeID;
beforeEach(() => {
var records = {};
var queuedRecords = {};
var nodeConnectionMap = {};
var rootCallMaps = {rootCallMap: {}};
feedbackID = 'feedback123';
commentID = 'comment456';
store = new RelayRecordStore(
{records},
rootCallMaps,
nodeConnectionMap
);
queueStore = new RelayRecordStore(
{records, queuedRecords},
rootCallMaps,
nodeConnectionMap,
undefined /* cacheManager */,
'mutationID'
);
var query = getNode(Relay.QL`
query {
node(id:"feedback123") {
topLevelComments(first:"1") {
count,
edges {
node {
id
}
}
}
}
}
`);
var alias = generateRQLFieldAlias('topLevelComments.first(1)');
var payload = {
feedback123: {
id: feedbackID,
[alias]: {
count: 1,
edges: [
{
cursor: commentID + ':cursor',
node: {
id: commentID,
},
},
],
},
},
};
writePayload(store, query, payload);
connectionID = store.getLinkedRecordID(feedbackID, 'topLevelComments');
edgeID = generateClientEdgeID(connectionID, commentID);
});
it('warns if the created `edge` field is missing in the payload', () => {
var input = {
actor_id: 'actor:123',
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
feedback_id: feedbackID,
};
var mutation = getNode(Relay.QL`
mutation {
commentCreate(input:$input) {
feedback {
id,
topLevelComments {
count,
},
},
}
}
`, {input: JSON.stringify(input)}
);
var configs = [{
type: RelayMutationType.RANGE_ADD,
connectionName: 'topLevelComments',
edgeName: 'feedbackCommentEdge',
rangeBehaviors: {'': GraphQLMutatorConstants.PREPEND},
}];
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
feedback: {
id: feedbackID,
topLevelComments: {
count: 2,
},
},
};
// write to queued store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
queueStore,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: true}
);
expect([
'writeRelayUpdatePayload(): Expected response payload to include the ' +
'newly created edge `%s` and its `node` field. Did you forget to ' +
'update the `RANGE_ADD` mutation config?',
'feedbackCommentEdge',
]).toBeWarnedNTimes(1);
// feedback is updated, but the edge is not added
expect(queueStore.getField(connectionID, 'count')).toBe(2);
expect(queueStore.getRangeMetadata(
connectionID,
[{name: 'first', value: '2'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([edgeID]);
});
it('optimistically prepends comments', () => {
// create the mutation and payload
var input = {
actor_id: 'actor:123',
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
feedback_id: feedbackID,
message: {
text: 'Hello!',
ranges: [],
}
};
var mutation = getNode(Relay.QL`
mutation {
commentCreate(input:$input) {
feedback {
id,
topLevelComments {
count,
},
},
feedbackCommentEdge {
cursor,
node {
id,
body {
text,
},
},
source {
id,
},
},
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.RANGE_ADD,
connectionName: 'topLevelComments',
edgeName: 'feedbackCommentEdge',
rangeBehaviors: {'': GraphQLMutatorConstants.PREPEND},
}];
var nextCursor = 'comment789:cursor';
var nextNodeID = 'comment789';
var bodyID = 'client:2';
var nextEdgeID = generateClientEdgeID(connectionID, nextNodeID);
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
feedback: {
id: feedbackID,
topLevelComments: {
count: 2,
},
},
feedbackCommentEdge: {
cursor: nextCursor,
node: {
id: nextNodeID,
body: {
text: input.message.text,
},
},
source: {
id: feedbackID,
},
},
};
// write to queued store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
queueStore,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: true}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {
[nextNodeID]: true, // node added
[nextEdgeID]: true, // edge added
[bodyID]: true, // `body` subfield
},
updated: {
[connectionID]: true, // range item added & count changed
},
});
// queued records are updated: edge/node added
expect(queueStore.getField(connectionID, 'count')).toBe(2);
expect(queueStore.getLinkedRecordID(nextEdgeID, 'source')).toBe(
feedbackID
);
expect(queueStore.getField(nextEdgeID, 'cursor')).toBe(nextCursor);
expect(queueStore.getLinkedRecordID(nextEdgeID, 'node')).toBe(nextNodeID);
expect(queueStore.getField(nextNodeID, 'id')).toBe(nextNodeID);
expect(queueStore.getLinkedRecordID(nextNodeID, 'body')).toBe(bodyID);
expect(queueStore.getField(bodyID, 'text')).toBe(input.message.text);
expect(queueStore.getRangeMetadata(
connectionID,
[{name: 'first', value: '2'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([
nextEdgeID,
edgeID
]);
// base records are not modified
expect(store.getField(connectionID, 'count')).toBe(1);
expect(store.getRecordState(nextEdgeID)).toBe('UNKNOWN');
expect(store.getRecordState(nextNodeID)).toBe('UNKNOWN');
expect(store.getRecordState(bodyID)).toBe('UNKNOWN');
expect(store.getRangeMetadata(
connectionID,
[{name: 'first', value: '2'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([
edgeID
]);
});
it('non-optimistically prepends comments', () => {
// create the mutation and payload
var input = {
actor_id: 'actor:123',
[RelayConnectionInterface.CLIENT_MUTATION_ID]: '0',
feedback_id: feedbackID,
message: {
text: 'Hello!',
ranges: [],
}
};
var mutation = getNode(Relay.QL`
mutation {
commentCreate(input:$input) {
feedback {
id,
topLevelComments {
count,
},
},
feedbackCommentEdge {
cursor,
node {
id,
body {
text,
},
},
source {
id,
},
},
}
}
`, {
input: JSON.stringify(input),
});
var configs = [{
type: RelayMutationType.RANGE_ADD,
connectionName: 'topLevelComments',
edgeName: 'feedbackCommentEdge',
rangeBehaviors: {'': GraphQLMutatorConstants.PREPEND},
}];
var nextCursor = 'comment789:cursor';
var nextNodeID = 'comment789';
var bodyID = 'client:2';
var nextEdgeID = generateClientEdgeID(connectionID, nextNodeID);
var payload = {
[RelayConnectionInterface.CLIENT_MUTATION_ID]:
input[RelayConnectionInterface.CLIENT_MUTATION_ID],
feedback: {
id: feedbackID,
topLevelComments: {
count: 2,
},
},
feedbackCommentEdge: {
cursor: nextCursor,
node: {
id: nextNodeID,
body: {
text: input.message.text,
},
},
source: {
id: feedbackID,
},
},
};
// write to base store
var changeTracker = new RelayChangeTracker();
var queryTracker = new RelayQueryTracker();
var writer = new RelayQueryWriter(
store,
queryTracker,
changeTracker
);
writeRelayUpdatePayload(
writer,
mutation,
payload,
{configs, isOptimisticUpdate: false}
);
expect(changeTracker.getChangeSet()).toEqual({
created: {
[nextNodeID]: true, // node added
[nextEdgeID]: true, // edge added
[bodyID]: true, // `body` subfield
},
updated: {
[connectionID]: true, // range item added & count changed
},
});
// base records are updated: edge/node added
expect(store.getField(connectionID, 'count')).toBe(2);
expect(store.getLinkedRecordID(nextEdgeID, 'source')).toBe(
feedbackID
);
expect(store.getField(nextEdgeID, 'cursor')).toBe(nextCursor);
expect(store.getLinkedRecordID(nextEdgeID, 'node')).toBe(nextNodeID);
expect(store.getField(nextNodeID, 'id')).toBe(nextNodeID);
expect(store.getLinkedRecordID(nextNodeID, 'body')).toBe(bodyID);
expect(store.getField(bodyID, 'text')).toBe(input.message.text);
expect(store.getRangeMetadata(
connectionID,
[{name: 'first', value: '2'}]
).requestedEdges.map(edge => edge.edgeID)).toEqual([
nextEdgeID,
edgeID
]);
});
});
});
| quazzie/relay | src/traversal/__tests__/writeRelayUpdatePayload-test.js | JavaScript | bsd-3-clause | 30,490 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.