max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
code/parsing/aggregation/counting.py | mdheller/SPARQA | 1 | 12757651 | <gh_stars>1-10
from parsing import parsing_utils
from parsing import parsing_args
def is_count_funct(serialization_list):
'''is_count_funct(serialization_list)'''
is_count = False
for element in serialization_list:
if element in parsing_args.count_ner_tags:
is_count = True
break
return is_count
def count_serialization(question):
question_tokens_list = question.split(' ')
serialization_list = ['O' for _ in question_tokens_list]
for count_mention in parsing_args.count_phrases:
if count_mention not in question:
continue
serialization_list = parsing_utils.serialization_mention(question_tokens_list, count_mention.split(' '), ner_tag='count')
return serialization_list
def counting_recognition_interface():
pass
def counting_binding():
pass
def grounded_to_answers():
pass
def grounded_graph_to_sparql():
pass
def is_count_by_token_ner_tag(token):
result = False
if token.ner_tag is not None and token.ner_tag == 'count':
result = True
return result
| 2.765625 | 3 |
pyqt/svg/generate.py | smartnova/samples | 0 | 12757652 | <reponame>smartnova/samples
#!/usr/bin/env python
# svgwrite Documentation: https://svgwrite.readthedocs.io/en/master/
import svgwrite as svg
g = svg.Drawing('sample.svg', profile='tiny')
g.add(g.line((0, 10), (100, 10), stroke=svg.rgb(20, 20, 80, '%')))
g.add(g.text('Testing', insert=(0, 23), fill='red'))
g.add(g.polyline([(0, 30), (10, 40), (20, 30), (30, 40), (40, 30), (30, 40)], stroke=svg.rgb(0, 30, 0, '%'), fill='none'))
g.add(g.rect((0, 50), (100, 10), fill=svg.rgb(80, 20, 20, '%')))
p = g.path(d=('m', 0, 70), stroke='red', fill='none', stroke_width=0.02)
p.push('q', 5,-10, 10,0); p.push('q', 5,10, 10,0)
p.push('q', 5,-10, 10,0); p.push('q', 5,10, 10,0)
p.push('q', 5,-10, 10,0); p.push('q', 5,10, 10,0)
p.push('q', 5,-10, 10,0); p.push('q', 5,10, 10,0)
g.add(p)
g.save()
from viewer import SVG
SVG('sample.svg')
| 2.265625 | 2 |
zlmdb/_schema.py | isabella232/zlmdb | 23 | 12757653 | <filename>zlmdb/_schema.py
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from zlmdb._pmap import PersistentMap
class Slot(object):
"""
LMDB database slot. A slot is defined just by the convention of using
the first 2 bytes of keys in a LMDB database as the "slot index".
The 2 bytes are interpreted as an uint16 in big endian byte order.
"""
def __init__(self, slot, name, pmap):
"""
:param slot:
:param name:
:param pmap:
"""
self.slot = slot
self.name = name
self.pmap = pmap
class Schema(object):
"""
ZLMDB database schema definition.
"""
SLOT_DATA_EMPTY = 0
"""
Database slot is empty (unused, not necessarily zero'ed, but uninitialized).
"""
SLOT_DATA_METADATA = 1
"""
FIXME.
"""
SLOT_DATA_TYPE = 2
"""
FIXME.
"""
SLOT_DATA_SEQUENCE = 3
"""
FIXME.
"""
SLOT_DATA_TABLE = 4
"""
Database slot contains a persistent map, for example a map of type OID to Pickle.
"""
SLOT_DATA_INDEX = 5
"""
FIXME.
"""
SLOT_DATA_REPLICATION = 6
"""
FIXME.
"""
SLOT_DATA_MATERIALIZATION = 7
"""
FIXME.
"""
def __init__(self):
self._index_to_slot = {}
self._name_to_slot = {}
def slot(self, slot_index, marshal=None, unmarshal=None, build=None, cast=None, compress=False):
"""
Decorator for use on classes derived from zlmdb.PersistentMap. The decorator define slots
in a LMDB database schema based on persistent maps, and slot configuration.
:param slot_index:
:param marshal:
:param unmarshal:
:param build:
:param cast:
:param compress:
:return:
"""
def decorate(o):
assert isinstance(o, PersistentMap)
name = o.__class__.__name__
assert slot_index not in self._index_to_slot
assert name not in self._name_to_slot
o._zlmdb_slot = slot_index
o._zlmdb_marshal = marshal
o._zlmdb_unmarshal = unmarshal
o._zlmdb_build = build
o._zlmdb_cast = cast
o._zlmdb_compress = compress
_slot = Slot(slot_index, name, o)
self._index_to_slot[slot_index] = _slot
self._name_to_slot[name] = _slot
return o
return decorate
| 1.453125 | 1 |
luminaire/model/window_density.py | Dima2022/luminaire | 525 | 12757654 | from luminaire.model.base_model import BaseModel, BaseModelHyperParams
from luminaire.exploration.data_exploration import DataExploration
class WindowDensityHyperParams(BaseModelHyperParams):
"""
Hyperparameter class for Luminaire Window density model.
:param str freq: The frequency of the time-series. Luminaire supports default configuration for 'S', T, '15T',
'H', 'D'. Any other frequency type should be specified as 'custom' and configuration should be set manually.
:param float max_missing_train_prop: Maximum proportion of missing observation allowed in the training data.
:param bool is_log_transformed: A flag to specify whether to take a log transform of the input data.
If the data contain negatives, is_log_transformed is ignored even though it is set to True.
:param str baseline_type: A string flag to specify whether to take set a baseline as the previous sub-window from
the training data for scoring or to aggregate the overall window as a baseline. Possible values:
- "last_window"
- "aggregated"
:param str detection_method: A string that select between two window testing method. Possible values:
- "kldiv" (KL-divergence). This is recommended to be set for high frequency time series such as 'S', 'T' etc.
- "sign_test" (Wilcoxon sign rank test). This is recommended to be set for low frequency time series such as 'H', 'D' etc.
:param int min_window_length: Minimum size of the scoring window / a stable training sub-window length.
.. Note :: This is not the minimum size of the whole training window which is the combination of stable sub-windows.
:param int max_window_length: Maximum size of the scoring window / a stable training sub-window length.
.. Note :: This is not the maximum size of the whole training window which is the combination of stable sub-windows.
:param int window_length: Size of the scoring window / a stable training sub-window length.
.. Note :: This is not the size of the whole training window which is the combination of stable sub-windows.
:param str detrend_method: A string that select between two stationarizing method. Possible values:
- "ma" (moving average based)
- "diff" (differencing based).
"""
def __init__(self,
freq=None,
max_missing_train_prop=0.1,
is_log_transformed=False,
baseline_type="aggregated",
detection_method=None,
min_window_length=None,
max_window_length=None,
window_length=None,
detrend_method='modeling'
):
super(WindowDensityHyperParams, self).__init__(
model_name="WindowDensityModel",
freq=freq,
max_missing_train_prop=max_missing_train_prop,
is_log_transformed=is_log_transformed,
baseline_type=baseline_type,
detection_method=detection_method,
min_window_length=min_window_length,
max_window_length=max_window_length,
window_length=window_length,
detrend_method=detrend_method
)
class WindowDensityModel(BaseModel):
"""
This model detects anomalous windows using KL divergence (for high frequency data) and Wilcoxon sign rank test
(for low frequency data). This default monitoring frequency is set to pandas time frequency type 'T'.
:param dict hyper_params: Hyper parameters for Luminaire window density model.
See :class:`luminaire.model.window_density.WindowDensityHyperParams` for detailed information.
:return: Anomaly probability for the execution window and other related model outputs
:rtype: list[dict]
"""
__version__ = "0.1"
def __init__(self,
hyper_params: WindowDensityHyperParams().params or None,
**kwargs):
# Specifying the minimum and maximum number of training windows
self.min_num_train_windows = 5
self.max_num_train_windows = 10000
self.hyper_params = hyper_params
self.sig_level = 0.001
super(WindowDensityModel, self).__init__(**hyper_params, **kwargs)
def _volume_shift_detection(self, mean_list=None, sd_list=None, probability_threshold=0.5):
"""
This function detects any significant shift in the training data volume using a Bayesian change point detection
technique.
:param list mean_list: The list of means from each training sub-window.
:param list sd_list: The list of standard deviations from each training sub-window.
:param float probability_threshold: Threshold for the probability value to be flagged as a change point.
:return: Indices with significant vdata volume shift.
:rtype: int
"""
import numpy as np
from bayesian_changepoint_detection import offline_changepoint_detection as offcd
from functools import partial
# Volume shift detection over the means of the training window
q, p, pcp = offcd.offline_changepoint_detection(
data=np.array(mean_list),
prior_func=partial(offcd.const_prior, l=(len(mean_list) + 1)),
observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood,
truncate=-10)
mask_mean = np.append(0, np.exp(pcp).sum(0)) > probability_threshold
# Volume shift detection over the standard deviations of the training window
change_points = np.array(mask_mean).nonzero()
last_mean_cp = change_points[0][-1] if len(change_points[0]) > 0 else []
q, p, pcp = offcd.offline_changepoint_detection(
data=np.array(sd_list),
prior_func=partial(offcd.const_prior, l=(len(sd_list) + 1)),
observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood,
truncate=-10)
mask_sd = np.append(0, np.exp(pcp).sum(0)) > probability_threshold
change_points = np.array(mask_sd).nonzero()
last_sd_cp = change_points[0][-1] if len(change_points[0]) > 0 else []
# Change point is the maximum obtained from mean list and the standard deviation list
cdate = max(last_mean_cp, last_sd_cp)
return cdate
def _distance_function(self, data=None, called_for=None, baseline=None):
"""
This function finds the distance of the given data from the baseline using KL divergence.
:param list data: The list containing the scoring window (for scoring) / training sub-window (for training).
:param str distance_method: The method to be used to calculate the distance between two datasets.
:param str called_for: A flag to specify whether this function is called for training or scoring.
:param list baseline: A list containing the base line to be compared with the given data.
:return: KL divergence between two time windows.
:rtype: float
"""
import numpy as np
import scipy.stats as stats
float_min = 1e-50
float_max = 1e50
# If called for training, Kl divergence is performed over each pair of consecutive windows to create
# the past anomaly scores
if called_for == "training":
distance = []
for i in range(0, len(data) - 1):
q = stats.kde.gaussian_kde(data[i])
p = stats.kde.gaussian_kde(data[i + 1])
ts_min = min(np.min(data[i]), np.min(data[i + 1]))
ts_max = max(np.max(data[i]), np.max(data[i + 1]))
density_domain = np.linspace(ts_min, ts_max, 1000)
q = q(density_domain)
p = p(density_domain)
# approximating the zero probability regions to avoid divide by zero issue in KL divergence
q[q == 0] = min(np.array(q)[np.array(q) > 0])
p[p == 0] = min(np.array(p)[np.array(p) > 0])
q = np.clip(q, float_min, float_max)
p = np.clip(p, float_min, float_max)
distance.append(stats.entropy(pk=p, qk=q))
# If called for scoring, Kl divergence is performed between the scoring window and the baseline
elif called_for == "scoring":
q = stats.kde.gaussian_kde(baseline)
p = stats.kde.gaussian_kde(data)
ts_min = min(np.min(baseline), np.min(data))
ts_max = max(np.max(baseline), np.max(data))
density_domain = np.linspace(ts_min, ts_max, 1000)
q = q(density_domain)
p = p(density_domain)
q[q == 0] = min(np.array(q)[np.array(q) > 0])
p[p == 0] = min(np.array(p)[np.array(p) > 0])
q = np.clip(q, float_min, float_max)
p = np.clip(p, float_min, float_max)
distance = stats.entropy(pk=p, qk=q)
return distance
def _training_data_truncation(self, sliced_training_data=None):
"""
This function performs the truncation of the training data using the _volume_shift_detection function.
:param list sliced_training_data: The list containing the training data.
:return: Sliced training sample based on the most recent change point
:rtype: list
"""
import numpy as np
# Change point detection is performed over the means and standard deviations of the sub windows
window_means = []
window_sds = []
for ts in sliced_training_data:
window_means.append(np.mean(ts))
window_sds.append(np.std(ts))
change_point = self._volume_shift_detection(mean_list=window_means, sd_list=window_sds)
# Truncating the training data based on the last change point
if change_point:
sliced_training_data_truncated = sliced_training_data[change_point:]
return sliced_training_data_truncated
else:
return sliced_training_data
def _call_training(self, df=None, window_length=None, imputed_metric=None, detrend_method=None,
detection_method=None, freq=None, **kwargs):
"""
This function generates the baseline and training metrics to be used for scoring.
:param pandas.DataFrame df: Input training data frame.
:param int window_length: The length of a training sub-window.
:param str imputed_metric: Column storing the time series values.
:param str detrend_method: Detrend method "modeling" or "diff" for nonstationarity.
:param str detection_method: Detection method "kldiv" or "sign_test".
:param str freq: Data frequency.
:return: Returns past anomaly scores based on training data, baseline and other related metrics.
:rtype: tuple(list, float, float, float, int, list, luminaire.model, float, dict, list)
"""
import pandas as pd
past_anomaly_scores = dict()
gamma_alpha = dict()
gama_loc = dict()
gamma_beta = dict()
detrend_order = dict()
baseline = dict()
agg_data_model = dict()
agg_data = dict()
past_model = kwargs.get('past_model')
training_start = df.first_valid_index()
training_end = df.last_valid_index()
current_training_end = training_end
while (training_end - current_training_end) < pd.Timedelta('1D'):
df_current = df[df.index <= current_training_end]
past_anomaly_scores_current, gamma_alpha_current, gama_loc_current, gamma_beta_current, \
detrend_order_current, baseline_current, agg_data_model_current, \
agg_data_current = self._anomalous_region_detection(input_df=df_current,
window_length=window_length,
value_column=imputed_metric,
called_for="training",
detrend_method=detrend_method,
past_model=past_model,
detection_method=detection_method)
past_anomaly_scores.update({str(current_training_end.time().strftime('%H:%M:%S')): past_anomaly_scores_current})
gamma_alpha.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_alpha_current) if gamma_alpha_current else None})
gama_loc.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gama_loc_current) if gama_loc_current else None})
gamma_beta.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_beta_current) if gamma_beta_current else None})
detrend_order.update({str(current_training_end.time().strftime('%H:%M:%S')): detrend_order_current})
baseline.update({str(current_training_end.time().strftime('%H:%M:%S')): baseline_current})
agg_data_model.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_model_current})
agg_data.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_current})
if isinstance(freq, str):
freq = pd.Timedelta('1' + freq)
current_training_end = current_training_end - min(pd.Timedelta('30T'), freq * 10)
return past_anomaly_scores, gamma_alpha, gama_loc, gamma_beta, \
detrend_order, baseline, agg_data_model, agg_data, training_start, training_end
def _get_model(self, input_df=None, window_length=None, value_column=None, detrend_method=None, baseline_type=None,
detection_method=None, past_model=None):
"""
This function runs the training process given the input parameters.
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int window_length: The length of a training sub-window / scoring window.
:param str value_column: Column containing the values.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param str baseline_type: Selects between "aggregated" or "last_window" baseline.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param luminaire.model.window_density.WindowDensityModel past_model: luminaire.model to append model metadata from past
:return: Returns past anomaly scores based on training data, baseline and other related metrics.
:rtype: tuple(list, float, float, float, int, list, luminaire.model, float)
"""
import numpy as np
import pandas as pd
from itertools import chain
import scipy.stats as st
model_history_truncation_prop = 0.25 # This is the proportion of history to truncate from both sides
# everytime we store the past anomaly scores
de_obj = DataExploration()
sliced_training_data, agg_datetime = de_obj._partition(input_df, window_length, value_column)
# performing the stationarity test
sliced_training_data_cleaned, detrend_order, agg_data_model, agg_data = de_obj._detrender(
training_data_sliced=sliced_training_data,
significance_level=0.05,
detrend_method=detrend_method,
agg_datetime=agg_datetime,
past_model=past_model)
# Obtain the past anomaly scores and the anomaly means and standard deviation if the detection method
# is KL divergence
if detection_method == "kldiv":
past_anomaly_scores = np.array(self._distance_function(data=sliced_training_data_cleaned,
called_for="training"))
if past_model:
model_timestamps = list(past_model._params['PastAnomalyScores'].keys())
training_end = input_df.index[-1]
current_min_timedelta = pd.Timedelta('10D')
for timestamp in model_timestamps:
current_datetime = pd.Timestamp(str(training_end.date()) + ' ' + timestamp)
temp_timedelta = training_end - current_datetime
temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta(
0) else temp_timedelta
if temp_timedelta < current_min_timedelta:
opt_timestamp = timestamp
current_min_timedelta = temp_timedelta
past_anomaly_scores = np.concatenate([past_model._params['PastAnomalyScores'][opt_timestamp][
int(len(past_anomaly_scores) * model_history_truncation_prop):
-int(len(past_anomaly_scores) * model_history_truncation_prop)]
, past_anomaly_scores])
if len(past_anomaly_scores) < 100:
alpha = []
loc = []
beta = []
for i in range(10):
boot_scores = np.random.choice(past_anomaly_scores.tolist(), size=100, replace=True)
alpha_i, loc_i, beta_i = st.gamma.fit(boot_scores)
alpha.append(alpha_i)
loc.append(loc_i)
beta.append(beta_i)
gamma_alpha = np.mean(alpha)
gamma_loc = np.mean(loc)
gamma_beta = np.mean(beta)
else:
gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(past_anomaly_scores)
else:
past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta = None, None, None, None
# If aggregated baseline type is specified, we take the whole training window as a baseline, else we
# take the last training sub window from the sliced training data
if baseline_type == "aggregated":
sliced_training_data_cleaned = self._training_data_truncation(
sliced_training_data=sliced_training_data_cleaned)
if detection_method == "kldiv":
baseline = list(chain.from_iterable(sliced_training_data_cleaned))
elif detection_method == "sign_test":
baseline = sliced_training_data_cleaned
elif baseline_type == "last_window":
baseline = sliced_training_data_cleaned[-1]
return past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta, detrend_order, \
baseline, agg_data_model, agg_data
def train(self, data, **kwargs):
"""
Input time series for training.
:param pandas.DataFrame data: Input time series.
:return: Trained model with the training timestamp and a success flag
:rtype: tuple(bool, str, python model object)
>>> data
raw interpolated
index
2017-10-02 00:00:00 118870 118870
2017-10-02 01:00:00 121914 121914
2017-10-02 02:00:00 116097 116097
2017-10-02 03:00:00 94511 94511
2017-10-02 04:00:00 68330 68330
... ... ...
2018-10-10 19:00:00 219908 219908
2018-10-10 20:00:00 219149 219149
2018-10-10 21:00:00 207232 207232
2018-10-10 22:00:00 198741 198741
2018-10-10 23:00:00 213751 213751
>>> hyper_params = WindowDensityHyperParams(freq='H').params
>>> wdm_obj = WindowDensityModel(hyper_params=hyper_params)
>>> success, model = wdm_obj.train(data)
>>> success, model
(True, "2018-10-10 23:00:00", <luminaire.model.window_density.WindowDensityModel object at 0x7fd7c5a34e80>)
"""
import numpy as np
import pandas as pd
freq = pd.Timedelta(self._params['freq']) if self._params['freq'] not in ['S', 'T', '15T', 'H', 'D'] \
else self._params['freq']
if freq in ['S', 'T', '15T', 'H', 'D']:
window_length = self._params['window_length']
else:
min_window_length = self._params['min_window_length']
max_window_length = self._params['max_window_length']
window_length = self._params['window_length']
if not min_window_length or not max_window_length or not window_length:
raise ValueError(
'Training window length with min and max should be specified in case frequency not in the '
'specified list')
is_log_transformed = self._params['is_log_transformed']
detrend_method = self._params['detrend_method']
target_metric = 'raw'
imputed_metric = 'interpolated'
if not self._params['detection_method']:
if freq in ['S', 'T', '15T']:
detection_method = 'kldiv'
elif freq in ['H', 'D']:
detection_method = 'sign_test'
else:
detection_method = 'sign_test' if freq > np.timedelta64(30, 'm') else 'kldiv'
else:
detection_method = self._params['detection_method']
if len(data) == 0:
model = {'ErrorMessage': 'DataFrame length is 0'}
success = False
return success, WindowDensityModel(**model)
# Shift the interpolated value by +1 and get the log. This handles values with 0.
if is_log_transformed:
neg_flag = True if not data[data[target_metric] < 0].empty else False
data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1)
past_anomaly_scores, anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc, anomaly_scores_gamma_beta, \
detrend_order, baseline, agg_data_model, agg_data, \
training_start, training_end = self._call_training(df=data, window_length=window_length,
imputed_metric=imputed_metric,
detrend_method=detrend_method,
detection_method=detection_method,
freq=freq, **kwargs)
success = True
self.hyper_params['is_log_transformed'] = is_log_transformed
self.hyper_params['detection_method'] = detection_method
model = {'TrainingStartDate': str(training_start),
'PastAnomalyScores': past_anomaly_scores,
'AnomalyScoresGammaAlpha': anomaly_scores_gamma_alpha,
'AnomalyScoresGammaLoc': anomaly_scores_gamma_loc,
'AnomalyScoresGammaBeta': anomaly_scores_gamma_beta,
'NonStationarityOrder': detrend_order,
'Baseline': baseline,
'AggregatedDataModel': agg_data_model,
'AggregatedData': agg_data
}
return success, str(training_end), WindowDensityModel(hyper_params=self.hyper_params, **model)
def _call_scoring(self, df=None, target_metric=None, anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None,
anomaly_scores_gamma_beta=None, baseline=None, detrend_order=None, detrend_method=None,
agg_data_model=None, detection_method=None, attributes=None, agg_data=None):
"""
This function generates the anomaly flag and and probability for the scoring window.
:param pandas.DataFrame df: Input training data frame.
:param str target_metric: Column storing the time series values.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param list baseline: A list storing a baseline window used to score the scoring window.
:param int detrend_order: The order of detrending based on MA or differencing method.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param attributes: Model attributes.
:param agg_data: Aggregated Data per day.
:return: Returns the anomaly flag with the corresponding anomaly probability.
:rtype: tuple(bool, float, dict)
"""
is_anomaly, prob_of_anomaly = self._anomalous_region_detection(input_df=df, value_column=target_metric,
called_for="scoring",
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
baseline=baseline,
detrend_order=detrend_order,
detrend_method=detrend_method,
agg_data_model=agg_data_model,
detection_method=detection_method,
agg_data=agg_data)
return is_anomaly, prob_of_anomaly, attributes
def _get_result(self, input_df=None, detrend_order=None, agg_data_model=None, value_column=None,
detrend_method=None, baseline_type=None, detection_method=None, baseline=None,
anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None, anomaly_scores_gamma_beta=None,
agg_data=None):
"""
The function scores the scoring window for anomalies based on the training metrics and the baseline
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int detrend_order: The non-negative order of detrending based on Modeling or differencing method. When
the detrend_order > 0, corresponding detrending need to be performed using the method specified in the model
config.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param str value_column: Column containing the values.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param str baseline_type: Selects between "aggregated" or "last_window" baseline.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param list baseline: A list storing a baseline window used to score the scoring window.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param agg_data: Aggregated Data per day.
:return: Returns the anomaly flag with the corresponding anomaly probability.
:rtype: tuple(bool, float)
"""
import numpy as np
import pandas as pd
import copy
import scipy.stats as st
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EmpiricalCovariance, MinCovDet
import collections
import operator
is_anomaly = False
execution_data = input_df[value_column]
adjusted_execution_data = []
prob_of_anomaly = []
len_req_agg_data_model = 42 # Setting a hard threshold to have predictions from aggregated data
# for stationarity adjustment
if detrend_method == 'diff':
# Obtain the execution data and perform the necessary differencing
execution_data = list(execution_data)
adjusted_execution_data = np.diff(execution_data, detrend_order).tolist() if detrend_order > 0 \
else execution_data
elif detrend_method == 'modeling':
idx = input_df.index.normalize()
dates_freq_dist = dict(collections.Counter(idx))
scoring_datetime = str(max(dates_freq_dist.items(), key=operator.itemgetter(1))[0])
execution_data_avg = np.mean(execution_data)
# If detrending is needed, we scale the scoring data accordingly using the agg_dat_model forecast
if detrend_order > 0:
snapshot_len_max = min(len(agg_data), len_req_agg_data_model)
agg_data_trunc = np.array(agg_data)[:, 1][-snapshot_len_max:]
data_adjust_forecast = []
try:
# Setting the data adjustment window of the original data using the predictions and the CILower and
# CIUpper keeping the prediction uncertainty of the agg_model in mind
if agg_data_model and len(agg_data) > len_req_agg_data_model:
score = agg_data_model.score(execution_data_avg, scoring_datetime)
data_adjust_forecast.append(score['Prediction'])
data_adjust_forecast.append(score['CILower'])
data_adjust_forecast.append(score['CIUpper'])
else:
data_adjust_forecast.append(np.median(agg_data_trunc))
data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit
data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit
except:
# If the scoring for the agg_data_model fails for some reason, we use the latest agg_data for the
# detrending adjustment
data_adjust_forecast.append(np.median(agg_data_trunc))
data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit
data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit
for i in range(3):
if data_adjust_forecast[i] != 0:
adjusted_execution_data.append((execution_data / data_adjust_forecast[i]).tolist())
else:
adjusted_execution_data = list(execution_data)
# Kl divergence based anomaly detection
if detection_method == "kldiv":
if detrend_order > 0:
prob_of_anomaly = []
for i in range(3):
current_anomaly_score = self._distance_function(data=adjusted_execution_data[i],
called_for="scoring", baseline=baseline)
prob_of_anomaly.append(st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc, anomaly_scores_gamma_beta))
prob_of_anomaly = np.min(prob_of_anomaly)
else:
current_anomaly_score = self._distance_function(data=adjusted_execution_data,
called_for="scoring", baseline=baseline)
prob_of_anomaly = st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)
if 1 - prob_of_anomaly < self.sig_level:
is_anomaly = True
# Sign test based anomaly detection
elif detection_method == "sign_test":
# If last window is the baseline, we perform the Wilcoxon sign rank test for means and levene
# test for variance to detect anomalies
if baseline_type == "last_window":
test_stat_wilcoxon, pvalue_wilcoxon = st.wilcoxon(execution_data, baseline)
test_stat_levene, pvalue_levene = st.levene(execution_data, baseline)
if pvalue_wilcoxon < self.sig_level or pvalue_levene < self.sig_level:
is_anomaly = True
prob_of_anomaly = 1 - min(pvalue_wilcoxon, pvalue_levene)
# If aggregated is the baseline, we perform the Wilcoxon sign rank test for means and gamma distribution
# based test for the past standard deviations to detect anomalies
elif baseline_type == "aggregated":
baseline_sds = np.array(baseline).std(1).tolist()
if detrend_order == 0:
# crearing a 2d list to make it easy to loop through in the following for loop
adjusted_execution_data = [adjusted_execution_data]
for current_adjusted_data in adjusted_execution_data:
baseline_execution_data = copy.copy(baseline)
baseline_execution_data.append(current_adjusted_data)
pca = PCA()
scores = pca.fit_transform(StandardScaler().fit_transform(baseline_execution_data))
robust_cov = MinCovDet().fit(scores[:, :3])
mahalanobis_distance = robust_cov.mahalanobis(scores[:, :3]) # getting the top 3 dimensions
pvalue_mahalanobis = 1 - st.chi2.cdf(mahalanobis_distance[-1],
np.array(baseline_execution_data).shape[1])
gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(baseline_sds)
pvalue_gamma = 1 - st.gamma.cdf(np.std(current_adjusted_data), gamma_alpha, gamma_loc, gamma_beta)
if pvalue_mahalanobis < self.sig_level or pvalue_gamma < self.sig_level:
is_anomaly = True
prob_of_anomaly.append(1 - min(pvalue_mahalanobis, pvalue_gamma))
prob_of_anomaly = np.min(prob_of_anomaly)
return is_anomaly, prob_of_anomaly
def score(self, data, **kwargs):
"""
Function scores input series for anomalies
:param pandas.DataFrame data: Input time series to score
:return: Output dictionary with scoring summary.
:rtype: dict
>>> data
raw interpolated
index
2018-10-11 00:00:00 204800 204800
2018-10-11 01:00:00 222218 222218
2018-10-11 02:00:00 218903 218903
2018-10-11 03:00:00 190639 190639
2018-10-11 04:00:00 148214 148214
2018-10-11 05:00:00 106358 106358
2018-10-11 06:00:00 70081 70081
2018-10-11 07:00:00 47748 47748
2018-10-11 08:00:00 36837 36837
2018-10-11 09:00:00 33023 33023
2018-10-11 10:00:00 44432 44432
2018-10-11 11:00:00 72773 72773
2018-10-11 12:00:00 115180 115180
2018-10-11 13:00:00 157568 157568
2018-10-11 14:00:00 180174 180174
2018-10-11 15:00:00 190048 190048
2018-10-11 16:00:00 188391 188391
2018-10-11 17:00:00 189233 189233
2018-10-11 18:00:00 191703 191703
2018-10-11 19:00:00 189848 189848
2018-10-11 20:00:00 192685 192685
2018-10-11 21:00:00 196743 196743
2018-10-11 22:00:00 193016 193016
2018-10-11 23:00:00 196441 196441
>>> model
<luminaire.model.window_density.WindowDensityModel object at 0x7fcaab72fdd8>
>>> model.score(data)
{'Success': True, 'ConfLevel': 99.9, 'IsAnomaly': False, 'AnomalyProbability': 0.6963188902776808}
"""
import numpy as np
import pandas as pd
is_log_transformed = self._params['is_log_transformed']
detrend_method = self._params['detrend_method']
target_metric = 'raw'
imputed_metric = 'interpolated'
detection_method = self._params['detection_method']
# We want to make sure the time series does not contain any negatives in case of log transformation
if is_log_transformed:
neg_flag = True if not data[data[target_metric] < 0].empty else False
data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1)
model_timestamps = list(self._params['AnomalyScoresGammaAlpha'].keys())
scoring_start = data.index[0]
current_min_timedelta = pd.Timedelta('10D')
for timestamp in model_timestamps:
current_datetime = pd.Timestamp(str(scoring_start.date()) + ' ' + timestamp)
temp_timedelta = scoring_start - current_datetime
temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta(0) else temp_timedelta
if temp_timedelta < current_min_timedelta:
opt_timestamp = timestamp
current_min_timedelta = temp_timedelta
anomaly_scores_gamma_alpha = self._params['AnomalyScoresGammaAlpha'][opt_timestamp]
anomaly_scores_gamma_loc = self._params['AnomalyScoresGammaLoc'][opt_timestamp]
anomaly_scores_gamma_beta = self._params['AnomalyScoresGammaBeta'][opt_timestamp]
baseline = self._params['Baseline'][opt_timestamp]
detrend_order = self._params['NonStationarityOrder'][opt_timestamp]
agg_data_model = self._params['AggregatedDataModel'][opt_timestamp]
agg_data = self._params['AggregatedData'][opt_timestamp]
is_anomaly, prob_of_anomaly, attributes = self._call_scoring(df=data,
target_metric=target_metric,
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
baseline=baseline,
detrend_order=detrend_order,
detrend_method=detrend_method,
agg_data_model=agg_data_model,
detection_method=detection_method,
agg_data=agg_data)
result = {'Success': True,
'ConfLevel': float(1.0 - self.sig_level) * 100,
'IsAnomaly': is_anomaly,
'AnomalyProbability': float(prob_of_anomaly),
}
return result, data.reset_index().values.tolist()
def _anomalous_region_detection(self, input_df=None, window_length=None,
value_column=None, called_for=None,
anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None,
anomaly_scores_gamma_beta=None, detrend_order=None, baseline=None,
detrend_method=None, agg_data_model=None, past_model=None, detection_method=None,
agg_data=None):
"""
This function detects anomaly given a training and a scoring window.
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int window_length: The length of a training sub-window / scoring window.
:param str value_column: A string identifying the value column from the input dataframe
:param str called_for: A flag to specify whether this function is called for training or scoring.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param int detrend_order: Number of differencing for the scoring data. Only required if called for scoring.
:param list baseline: The baseline for the scoring. only required if called for scoring.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param luminaire.model.window_density.WindowDensityModel past_model: Past stored window density model.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param agg_data: Aggregated Data per day.
:return: Anomaly flag with the corresponding probability of anomaly.
:rtype: tuple(bool, float)
"""
baseline_type = self._params['baseline_type']
input_df.fillna(0, inplace=True)
# The function can be called for either training or scoring
if called_for == "training":
return self._get_model(input_df=input_df,
window_length=window_length,
value_column=value_column,
detrend_method=detrend_method,
baseline_type=baseline_type,
detection_method=detection_method,
past_model=past_model)
elif called_for == "scoring":
return self._get_result(input_df=input_df,
detrend_order=detrend_order,
agg_data_model=agg_data_model,
value_column=value_column,
detrend_method=detrend_method,
baseline_type=baseline_type,
detection_method=detection_method,
baseline=baseline,
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
agg_data=agg_data)
| 2.640625 | 3 |
housemate/__init__.py | intuso/housemate-python | 0 | 12757655 | import json
import traceback
from abc import abstractmethod
import stomp
def serialise(obj):
def to_camel_case(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else "_" for x in value.split("_"))
""" JSON serializer for objects not serializable by default json code"""
""" Do something different for special objects """
# if isinstance(obj, date):
# serial = obj.isoformat()
# return serial
""" for an object, return its dict, but first convert all keys to the camelCase """
result = {}
for key in obj.__dict__:
result[to_camel_case(key)] = obj.__dict__[key]
return result
# Implementation of stomp message listener that maintains a map of destination -> callback and calls that callback with
# the deserialised object
class MessageListener(stomp.ConnectionListener):
def __init__(self):
self.callbacks = {}
def subscribe(self, path, callback, dict_to_obj):
self.callbacks[path] = (callback, dict_to_obj)
def on_error(self, headers, message):
print('received an error "%s"' % message)
def on_message(self, headers, message):
# Find the destination
if 'destination' in headers:
if headers['destination'] in self.callbacks:
# Call the callback for the destination
try:
print 'Received "%s" on "%s' % (message, headers['destination'])
(callback, dict_to_obj) = self.callbacks[headers['destination']]
callback(dict_to_obj(json.loads(message)))
except Exception:
print 'Failed to process message:'
traceback.print_exc()
else:
print 'received a message for "%s" but no listener registered' % headers['destination']
else:
print 'received a message with no destination'
# Wrapper around a stomp connection with HM specific ways of sending messages and listening to a topic
class HMConnection:
def __init__(self, host, port):
self.conn = stomp.Connection(host_and_ports=[(host, port)])
self.message_listener = MessageListener()
self.conn.set_listener('', self.message_listener)
self.conn.start()
self.conn.connect(wait=True)
def send(self, path, data, persist=True):
json_string = json.dumps(data, default=serialise)
print 'Sending "%s" on "%s' % (json_string, path)
self.conn.send(body=json_string, destination=path, headers={'com.intuso.housemate.message.store': persist})
def register(self, path, callback, dict_to_json):
self.conn.subscribe(path, path)
self.message_listener.subscribe(path, callback, dict_to_json)
def disconnect(self):
self.conn.disconnect()
# HM data class and subclasses where they contain extra data. Contains _type so can be deserialised to correct type,
# object_class (basically same as type but a property of the resulting deserialised object), id, name and description
class Data:
def __init__(self, subtype, object_class, object_id, name, description):
self._type = subtype
self.object_class = object_class
self.id = object_id
self.name = name
self.description = description
class DeviceConnectedData(Data):
def __init__(self, subtype, object_class, object_id, name, description, abilities, classes):
Data.__init__(self, subtype, object_class, object_id, name, description)
self.abilities = abilities
self.classes = classes
# HM value. Has a string value and/or a map of child values
class TypeInstance:
def __init__(self, value, children=None):
# Check what value is and make sure it's a string
if isinstance(value, basestring):
self.value = value
else:
self.value = str(value)
# Check what childValues is and make sure it's a TypeInstanceMap
self.children = {} if children is None else children
# HM objects for performing commands and sending status back
class Perform:
def __init__(self, op_id, instance_map):
self.op_id = op_id
# Make sure instanceMap is a TypeInstancesMap
self.instanceMap = instance_map
class PerformStatus:
def __init__(self, op_id, finished=False, error=None):
self.op_id = op_id
self.finished = finished
self.error = error
def perform_finished(self, error=None):
self.finished = True
self.error = error
# Types
class Type:
def __init__(self):
pass
@abstractmethod
def to_value(self, instances):
raise NotImplementedError()
@abstractmethod
def from_value(self, value):
raise NotImplementedError()
class PrimitiveType(Type):
def __init__(self, parse):
Type.__init__(self)
self.parse = parse
def to_value(self, instances):
return self.parse(instances[0]['value']) if isinstance(instances, list) and len(instances) > 0 else None
def from_value(self, value):
return [{"value": str(value)}] # temp fix while HM UI is case sensitive
class BooleanType(PrimitiveType):
def __init__(self):
PrimitiveType.__init__(self, bool)
class FloatType(PrimitiveType):
def __init__(self):
PrimitiveType.__init__(self, float)
class IntegerType(PrimitiveType):
def __init__(self):
PrimitiveType.__init__(self, int)
class LongType(PrimitiveType):
def __init__(self):
PrimitiveType.__init__(self, long)
class StringType(PrimitiveType):
def __init__(self):
PrimitiveType.__init__(self, str)
# Methods to create objects from json parsed into a dict
def dict_to_perform(json_dict):
return None if json_dict is None else Perform(json_dict['opId'], json_dict['instanceMap'])
| 2.671875 | 3 |
ldpred/LDpred_gibbs.py | bbitarello/ldpred | 89 | 12757656 | import sys
import time
import scipy as sp
from scipy import stats
import h5py
from ldpred import LDpred_inf
from ldpred import util
from ldpred import ld
from ldpred import reporting
from ldpred import coord_genotypes
def get_LDpred_sample_size(n,ns,verbose):
if n is None:
#If coefficient of variation is very small, then use one N nevertheless.
n_cv = sp.std(ns)/sp.mean(ns)
if n_cv<0.01:
ldpred_n = sp.mean(ns)
if verbose:
print ("Sample size does not vary much (CV=%0.4f). Using a fixed sample size of %0.2f"%(n_cv,ldpred_n))
else:
if verbose:
print ("Using varying sample sizes")
print ("Sample size ranges between %d and %d"%(min(ns),max(ns)))
print ("Average sample size is %0.2f "%(sp.mean(ns)))
ldpred_inf_n = sp.mean(ns)
ldpred_n = None
else:
ldpred_n = float(n)
if verbose:
print ("Using the given fixed sample size of %d"%(n))
ldpred_inf_n = float(n)
return ldpred_n,ldpred_inf_n
def prepare_constants(ldpred_n,ns,m,p,h2,sampl_var_shrink_factor):
Mp = m * p
hdmp = (h2 / Mp)
const_dict = {'Mp':Mp, 'hdmp':hdmp}
rv_scalars = sp.zeros(m)
if ldpred_n is not None:
hdmpn = hdmp + 1.0 / ldpred_n
hdmp_hdmpn = (hdmp / hdmpn)
c_const = (p / sp.sqrt(hdmpn))
d_const = (1.0 - p) / (sp.sqrt(1.0 / ldpred_n))
const_dict['n']=ldpred_n
const_dict['hdmpn']=hdmpn
const_dict['hdmp_hdmpn']=hdmp_hdmpn
const_dict['c_const']=c_const
const_dict['d_const']=d_const
rv_scalars[:]=sampl_var_shrink_factor* sp.sqrt((hdmp_hdmpn) * (1.0 / ldpred_n))
else:
snp_dict = {}
for i in range(m):
ni = ns[i]
hdmpn_i = hdmp + 1.0 / ni
hdmp_hdmpn_i = (hdmp / hdmpn_i)
c_const_i = (p / sp.sqrt(hdmpn_i))
d_const_i = (1.0 - p) / (sp.sqrt(1.0 / ni))
snp_dict[i]={'n':ni, 'hdmpn':hdmpn_i,
'hdmp_hdmpn':hdmp_hdmpn_i,
'c_const':c_const_i,
'd_const':d_const_i}
rv_scalars[i]=sampl_var_shrink_factor* sp.sqrt((hdmp_hdmpn_i) * (1.0 / ni))
const_dict['snp_dict']=snp_dict
const_dict['rv_scalars']=rv_scalars
return const_dict
def get_constants(snp_i,const_dict):
if 'snp_dict' in const_dict:
return const_dict['snp_dict'][snp_i]
else:
return const_dict
def ldpred_gibbs(beta_hats, genotypes=None, start_betas=None, h2=None, n=None, ns= None, ld_radius=100,
num_iter=60, burn_in=10, p=None, zero_jump_prob=0.01, sampl_var_shrink_factor=0.9,
tight_sampling=False,ld_dict=None, reference_ld_mats=None, ld_boundaries=None,
snp_lrld=None, verbose=False, print_progress=True):
"""
LDpred (Gibbs Sampler)
"""
# Set random seed to stabilize results
sp.random.seed(42)
t0 = time.time()
m = len(beta_hats)
ldpred_n, ldpred_inf_n = get_LDpred_sample_size(n,ns,verbose)
# If no starting values for effects were given, then use the infinitesimal model starting values.
if start_betas is None and verbose:
print('Initializing LDpred effects with posterior mean LDpred-inf effects.')
print('Calculating LDpred-inf effects.')
start_betas = LDpred_inf.ldpred_inf(beta_hats, genotypes=genotypes, reference_ld_mats=reference_ld_mats,
h2=h2, n=ldpred_inf_n, ld_window_size=2 * ld_radius, verbose=False)
curr_betas = sp.copy(start_betas)
assert len(curr_betas)==m,'Betas returned by LDpred_inf do not have the same length as expected.'
curr_post_means = sp.zeros(m)
avg_betas = sp.zeros(m)
# Iterating over effect estimates in sequential order
iter_order = sp.arange(m)
# Setting up the marginal Bayes shrink
const_dict = prepare_constants(ldpred_n,ns,m,p,h2,sampl_var_shrink_factor)
for k in range(num_iter): # Big iteration
h2_est = max(0.00001, sp.sum(curr_betas ** 2))
if tight_sampling:
# Force an alpha shrink if estimates are way off compared to heritability estimates.
#(May improve MCMC convergence.)
alpha = min(1.0 - zero_jump_prob, 1.0 / h2_est, (h2 + 1.0 / sp.sqrt(ldpred_n)) / h2_est)
else:
alpha = 1.0 - zero_jump_prob
rand_ps = sp.random.random(m)
rand_norms = stats.norm.rvs(0.0, 1, size=m)*const_dict['rv_scalars']
for i, snp_i in enumerate(iter_order):
if ld_boundaries is None:
start_i = max(0, snp_i - ld_radius)
focal_i = min(ld_radius, snp_i)
stop_i = min(m, snp_i + ld_radius + 1)
else:
start_i = ld_boundaries[snp_i][0]
stop_i = ld_boundaries[snp_i][1]
focal_i = snp_i - start_i
if snp_lrld is not None:
if snp_lrld[snp_i]:
continue
#Figure out what sample size and constants to use
cd = get_constants(snp_i,const_dict)
# Local LD matrix
D_i = ld_dict[snp_i]
# Local (most recently updated) effect estimates
local_betas = curr_betas[start_i: stop_i]
# Calculate the local posterior mean, used when sampling.
local_betas[focal_i] = 0.0
res_beta_hat_i = beta_hats[snp_i] - sp.dot(D_i , local_betas)
b2 = res_beta_hat_i ** 2
d_const_b2_exp = cd['d_const'] * sp.exp(-b2 * cd['n'] / 2.0)
if sp.isreal(d_const_b2_exp):
numerator = cd['c_const'] * sp.exp(-b2 / (2.0 * cd['hdmpn']))
if sp.isreal(numerator):
if numerator == 0.0:
postp = 0.0
else:
postp = numerator / (numerator + d_const_b2_exp)
assert sp.isreal(postp), 'The posterior mean is not a real number? Possibly due to problems with summary stats, LD estimates, or parameter settings.'
else:
postp = 0.0
else:
postp = 1.0
curr_post_means[snp_i] = cd['hdmp_hdmpn'] * postp * res_beta_hat_i
if rand_ps[i] < postp * alpha:
# Sample from the posterior Gaussian dist.
proposed_beta = rand_norms[snp_i] + cd['hdmp_hdmpn'] * res_beta_hat_i
else:
# Sample 0
proposed_beta = 0.0
curr_betas[snp_i] = proposed_beta # UPDATE BETA
if verbose and print_progress:
sys.stdout.write('\r%0.2f%%' % (100.0 * (min(1, float(k + 1) / num_iter))))
sys.stdout.flush()
if k >= burn_in:
avg_betas += curr_post_means # Averaging over the posterior means instead of samples.
if verbose and print_progress:
sys.stdout.write('\r%0.2f%%\n' % (100.0))
sys.stdout.flush()
avg_betas = avg_betas / float(num_iter - burn_in)
t1 = time.time()
t = (t1 - t0)
if verbose:
print('Took %d minutes and %0.2f seconds' % (t / 60, t % 60))
return {'betas':avg_betas, 'inf_betas':start_betas}
def ldpred_genomewide(data_file=None, ld_radius=None, ld_dict=None, out_file_prefix=None,
summary_dict=None, ps=None,n=None, h2=None, use_gw_h2=False,
sampl_var_shrink_factor=1, incl_long_range_ld=False,
num_iter=None, verbose=False, zero_jump_prob=0.01,
burn_in=5):
"""
Calculate LDpred for a genome
"""
print('Applying LDpred with LD radius: %d' % ld_radius)
df = h5py.File(data_file, 'r')
has_phenotypes = False
if 'y' in df:
y = df['y'][...] # Phenotype
num_individs = len(y)
risk_scores_pval_derived = sp.zeros(num_individs)
has_phenotypes = True
ld_scores_dict = ld_dict['ld_scores_dict']
chrom_ld_dict = ld_dict['chrom_ld_dict']
chrom_ref_ld_mats = ld_dict['chrom_ref_ld_mats']
cord_data_g = df['cord_data']
mean_n = coord_genotypes.get_mean_sample_size(n, cord_data_g)
#Calculating genome-wide heritability using LD score regression, and partition heritability by chromsomes
herit_dict = ld.get_chromosome_herits(cord_data_g, ld_scores_dict, mean_n, h2=h2, use_gw_h2=use_gw_h2,
debug=verbose, summary_dict=summary_dict)
if herit_dict['gw_h2_ld_score_est']>ld_radius/10.0:
print ('\033[93m Warning: LD radius seems small in comparison to the average LD score. '
'Please consider a larger one, or a smaller number of SNPs used in the analysis. \033[0m')
LDpred_inf_chrom_dict = {}
print('Calculating LDpred-inf weights')
for chrom_str in util.chromosomes_list:
if chrom_str in cord_data_g:
if verbose:
print('Calculating SNP weights for Chromosome %s' % ((chrom_str.split('_'))[1]))
g = cord_data_g[chrom_str]
# Filter monomorphic SNPs
snp_stds = g['snp_stds_ref'][...]
snp_stds = snp_stds.flatten()
ok_snps_filter = snp_stds > 0
pval_derived_betas = g['betas'][...]
pval_derived_betas = pval_derived_betas[ok_snps_filter]
h2_chrom = herit_dict[chrom_str]['h2']
start_betas = LDpred_inf.ldpred_inf(pval_derived_betas, genotypes=None, reference_ld_mats=chrom_ref_ld_mats[chrom_str],
h2=h2_chrom, n=mean_n, ld_window_size=2 * ld_radius, verbose=verbose)
LDpred_inf_chrom_dict[chrom_str] = start_betas
if not incl_long_range_ld:
lrld_dict = util.load_lrld_dict()
num_snps_in_lrld = 0
results_dict = {}
convergence_report = {}
for p in ps:
convergence_report[p] = False
print('Starting LDpred gibbs with f=%0.4f' % p)
p_str = '%0.4f' % p
results_dict[p_str] = {}
if out_file_prefix:
# Preparing output files
raw_effect_sizes = []
ldpred_effect_sizes = []
ldpred_inf_effect_sizes = []
out_sids = []
chromosomes = []
out_positions = []
out_nts = []
chrom_i = 0
num_chrom = len(cord_data_g.keys())
for chrom_str in util.chromosomes_list:
chrom_i+=1
if chrom_str in cord_data_g:
g = cord_data_g[chrom_str]
if out_file_prefix:
positions = g['positions'][...]
sids = (g['sids'][...]).astype(util.sids_u_dtype)
log_odds = g['log_odds'][...]
nts = (g['nts'][...]).astype(util.nts_u_dtype)
chromosomes.extend([chrom_str] * len(positions))
out_positions.extend(positions)
out_sids.extend(sids)
raw_effect_sizes.extend(log_odds)
out_nts.extend(nts)
pval_derived_betas = g['betas'][...]
ns = g['ns'][...]
h2_chrom = herit_dict[chrom_str]['h2']
snp_lrld = None
if not incl_long_range_ld:
snp_lrld = util.get_snp_lrld_status(chrom_i, positions, lrld_dict)
num_snps_in_lrld +=sp.sum(snp_lrld)
ld_boundaries = None
if 'chrom_ld_boundaries' in ld_dict:
ld_boundaries = ld_dict['chrom_ld_boundaries'][chrom_str]
if verbose:
print('Calculating SNP weights for Chromosome %s' % ((chrom_str.split('_'))[1]))
res_dict = ldpred_gibbs(pval_derived_betas,h2=h2_chrom, n=n, ns=ns, p=p, ld_radius=ld_radius,
verbose=verbose, num_iter=num_iter, burn_in=burn_in, ld_dict=chrom_ld_dict[chrom_str],
start_betas=LDpred_inf_chrom_dict[chrom_str], ld_boundaries=ld_boundaries,
zero_jump_prob=zero_jump_prob,sampl_var_shrink_factor=sampl_var_shrink_factor,
snp_lrld=snp_lrld, print_progress=False)
updated_betas = res_dict['betas']
updated_inf_betas = res_dict['inf_betas']
sum_sqr_effects = sp.sum(updated_betas ** 2)
if sum_sqr_effects > herit_dict['gw_h2_ld_score_est']:
print('Sum of squared updated effects estimates seems too large: %0.4f'% sum_sqr_effects)
print('This suggests that the Gibbs sampler did not convergence.')
convergence_report[p] = True
snp_stds = g['snp_stds_ref'][...]
snp_stds = snp_stds.flatten()
updated_betas = updated_betas / snp_stds
updated_inf_betas = updated_inf_betas / snp_stds
ldpred_effect_sizes.extend(updated_betas)
ldpred_inf_effect_sizes.extend(updated_inf_betas)
if not verbose:
sys.stdout.write('\r%0.2f%%' % (100.0 * (min(1, float(chrom_i) / num_chrom))))
sys.stdout.flush()
else:
if has_phenotypes:
if 'raw_snps_val' in g:
raw_snps = g['raw_snps_val'][...]
else:
raw_snps = g['raw_snps_ref'][...]
prs = sp.dot(updated_betas, raw_snps)
risk_scores_pval_derived += prs
corr = sp.corrcoef(y, prs)[0, 1]
r2 = corr ** 2
print('The R2 prediction accuracy of PRS using %s was: %0.4f' % (chrom_str, r2))
if not incl_long_range_ld:
summary_dict[1.3]={'name':'SNPs in long-range LD regions','value':'%d'%num_snps_in_lrld}
if not verbose:
sys.stdout.write('\r%0.2f%%\n' % (100.0))
sys.stdout.flush()
if verbose and has_phenotypes:
num_indivs = len(y)
results_dict[p_str]['y'] = y
results_dict[p_str]['risk_scores_pd'] = risk_scores_pval_derived
print('Prediction accuracy was assessed using %d individuals.' % (num_indivs))
corr = sp.corrcoef(y, risk_scores_pval_derived)[0, 1]
r2 = corr ** 2
results_dict[p_str]['r2_pd'] = r2
print('The R2 prediction accuracy (observed scale) for the whole genome was: %0.4f (%0.6f)' % (r2, ((1 - r2) ** 2) / num_indivs))
if corr < 0:
risk_scores_pval_derived = -1 * risk_scores_pval_derived
auc = util.calc_auc(y, risk_scores_pval_derived)
print('AUC for the whole genome was: %0.4f' % auc)
# Now calibration
denominator = sp.dot(risk_scores_pval_derived.T, risk_scores_pval_derived)
y_norm = (y - sp.mean(y)) / sp.std(y)
numerator = sp.dot(risk_scores_pval_derived.T, y_norm)
regression_slope = (numerator / denominator) # [0][0]
print('The slope for predictions with P-value derived effects is: %0.4f' % regression_slope)
results_dict[p_str]['slope_pd'] = regression_slope
weights_out_file = '%s_LDpred_p%0.4e.txt' % (out_file_prefix, p)
with open(weights_out_file, 'w') as f:
f.write('chrom pos sid nt1 nt2 raw_beta ldpred_beta\n')
for chrom, pos, sid, nt, raw_beta, ldpred_beta in zip(chromosomes, out_positions, out_sids, out_nts, raw_effect_sizes, ldpred_effect_sizes):
nt1, nt2 = nt[0], nt[1]
f.write('%s %d %s %s %s %0.4e %0.4e\n' % (chrom, pos, sid, nt1, nt2, raw_beta, ldpred_beta))
weights_out_file = '%s_LDpred-inf.txt' % (out_file_prefix)
with open(weights_out_file, 'w') as f:
f.write('chrom pos sid nt1 nt2 raw_beta ldpred_inf_beta \n')
for chrom, pos, sid, nt, raw_beta, ldpred_inf_beta in zip(chromosomes, out_positions, out_sids, out_nts, raw_effect_sizes, ldpred_inf_effect_sizes):
nt1, nt2 = nt[0], nt[1]
f.write('%s %d %s %s %s %0.4e %0.4e\n' % (chrom, pos, sid, nt1, nt2, raw_beta, ldpred_inf_beta))
summary_dict[2.0]={'name':'Gibbs sampler fractions used','value':str(ps)}
['Yes' if convergence_report[p] else 'No' for p in ps]
summary_dict[2.1]={'name':'Number of burn-iterations used','value':'%i'%burn_in}
summary_dict[2.2]={'name':'Number of iterations used','value':'%i'%num_iter}
summary_dict[2.3]={'name':'Convergence issues (for each fraction)','value':str(['Yes' if convergence_report[p] else 'No' for p in ps])}
def main(p_dict):
#Check parameters
summary_dict = {}
summary_dict[0]={'name':'Coordinated data filename','value':p_dict['cf']}
summary_dict[0.1]={'name':'SNP weights output file (prefix)', 'value':p_dict['out']}
summary_dict[0.2]={'name':'LD data filename (prefix)', 'value':p_dict['ldf']}
summary_dict[1.01]={'name':'LD radius used','value':str(p_dict['ldr'])}
t0 = time.time()
summary_dict[1]={'name':'dash', 'value':'LD information'}
ld_dict = ld.get_ld_dict_using_p_dict(p_dict, summary_dict)
t1 = time.time()
t = (t1 - t0)
summary_dict[1.2]={'name':'Running time for calculating LD information:','value':'%d min and %0.2f secs'% (t / 60, t % 60)}
t0 = time.time()
summary_dict[1.9]={'name':'dash', 'value':'LDpred Gibbs sampler'}
ldpred_genomewide(data_file=p_dict['cf'], out_file_prefix=p_dict['out'], ps=p_dict['f'], ld_radius=p_dict['ldr'],
ld_dict=ld_dict, n=p_dict['N'], num_iter=p_dict['n_iter'], burn_in=p_dict['n_burn_in'],
h2=p_dict['h2'], use_gw_h2=p_dict['use_gw_h2'], incl_long_range_ld=p_dict['incl_long_range_ld'],
sampl_var_shrink_factor=1, verbose=p_dict['debug'], summary_dict=summary_dict)
t1 = time.time()
t = (t1 - t0)
summary_dict[3]={'name':'Running time for Gibbs sampler(s):','value':'%d min and %0.2f secs'% (t / 60, t % 60)}
reporting.print_summary(summary_dict, 'Summary of LDpred Gibbs')
| 2.109375 | 2 |
tests/test_integration.py | lancelote/sml-test | 0 | 12757657 | from textwrap import dedent
from sml_test.cli import cli
def assert_result(result, ok=0, fail=0, err=0, exit_code=0, contains=""):
assert f"OK={ok}, FAIL={fail}, ERR={err}" in result.output
assert result.exit_code == exit_code
assert contains in result.output
def test_ok_and_err(sml_test_file, cli_runner):
sml_test_file.write_text(
dedent(
"""
val test_1 = 1 = 1
val test_2 = 1 = 2
"""
)
)
result = cli_runner.invoke(cli)
assert_result(result, ok=1, fail=1, exit_code=1)
def test_err(sml_test_file, cli_runner):
sml_test_file.write_text(
dedent(
"""
test_1 = 2 = 2
"""
)
)
result = cli_runner.invoke(cli)
# Two errors: mismatch type and unbound variable
assert_result(result, err=2, exit_code=1)
def test_ok(sml_test_file, cli_runner):
sml_test_file.write_text(
dedent(
"""
val test_1 = 1 = 1
val test_2 = 2 = 2
"""
)
)
result = cli_runner.invoke(cli)
assert_result(result, ok=2)
def test_no_tests(sml_test_file, cli_runner):
sml_test_file.write_text(
dedent(
"""
val foo = 2 = 2
"""
)
)
result = cli_runner.invoke(cli)
assert_result(result)
def test_verbose(sml_test_file, cli_runner):
sml_test_file.write_text(
dedent(
"""
val test_1 = 1 = 1
"""
)
)
error_message = "val test_1 = true : bool"
result = cli_runner.invoke(cli, ["-v"])
assert_result(result, ok=1, exit_code=0, contains=error_message)
def test_unknown_symbol_in_impl(sml_test_file, sml_impl_file, cli_runner):
sml_impl_file.write_text(
dedent(
"""
fun sum_pair_list(xs : (int * int) list) =
sum_list(firsts xs) + sum_list(seconds xs)
"""
)
)
sml_test_file.write_text(
dedent(
"""
use "sample.sml";
val test_1 = sum_pair_list([(1, 2), (3, 4)]) = 10
"""
)
)
result = cli_runner.invoke(cli)
assert_result(result, err=4, exit_code=1)
def test_type_mismatch_in_impl(sml_test_file, sml_impl_file, cli_runner):
sml_impl_file.write_text(
dedent(
"""
fun list_product(xs : int list) =
if null xs
then 1
else hd xs * list_product(tl xs)
fun countdown(x : int) =
if x = 0
then []
else x :: countdown(x - 1)
fun factorial(x : int) =
list_product countdown x
"""
)
)
sml_test_file.write_text(
dedent(
"""
use "sample.sml";
val test9_2 = factorial 4 = 24
"""
)
)
result = cli_runner.invoke(cli)
assert_result(result, err=1, exit_code=1)
def test_runtime_exception(sml_test_file, sml_impl_file, cli_runner):
sml_impl_file.write_text(
dedent(
"""
fun max1(xs : int list) =
if null xs
then NONE
else
let val tl_ans = max1(tl xs)
in if isSome tl_ans andalso valOf tl_ans > hd xs
then tl_ans
else SOME (hd xs)
end
"""
)
)
sml_test_file.write_text(
dedent(
"""
use "sample.sml";
val test3 = valOf(max1 []);
"""
)
)
result = cli_runner.invoke(cli)
assert_result(result, err=1, exit_code=1)
def test_usage_fail(sml_test_file, cli_runner):
sml_test_file.write_text('use "foo_bar.sml";')
error_message = "use failed: 'foo_bar.sml'"
result = cli_runner.invoke(cli)
assert_result(result, err=1, exit_code=1, contains=error_message)
| 2.578125 | 3 |
image_crawler/tests/test_face_encoding.py | javi-cortes/average-face-encoding | 1 | 12757658 | from unittest import TestCase
from unittest.mock import patch
import numpy as np
from exceptions import NoUriProviden
from main import calculate_average_face_encoding
from main import obtain_image_face_encodings
from main import parallelize_face_encodings
class TryTesting(TestCase):
def test_obtain_image_face_encodings_empty_uri(self):
with self.assertRaises(NoUriProviden):
obtain_image_face_encodings("")
def test_obtain_image_face_encodings_uri_not_found(self):
with self.assertRaises(FileNotFoundError):
obtain_image_face_encodings("uri/that/doesnt/exists")
@patch("main.IMAGE_DIRECTORY", "/path/that/doesnt/exist")
def test_parallelize_face_encodings_directory_not_found(self):
# this is not checked on the actual code but the actual
# face_recognition library is raising the exception
with self.assertRaises(FileNotFoundError):
parallelize_face_encodings()
@patch("main.obtain_image_face_encodings", return_value=[])
@patch("os.listdir", return_value=[])
def test_parallelize_face_encodings_empty_directory_encoding_not_called(
self, listdir_mock, obtain_mock
):
self.assertFalse(obtain_mock.called)
def test_calculate_average_face_encoding_with_empty_encodings(self):
self.assertIsNone(calculate_average_face_encoding([]))
@patch("main.np.savetxt")
def test_calculate_average_face_encoding_ensure_file_creation_called(
self, mocked_np
):
calculate_average_face_encoding(np.ndarray([1]))
self.assertTrue(mocked_np.called)
| 3.03125 | 3 |
src/python/m5/util/smartdict.py | mandaltj/gem5_chips | 17 | 12757659 | <reponame>mandaltj/gem5_chips<gh_stars>10-100
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
# The SmartDict class fixes a couple of issues with using the content
# of os.environ or similar dicts of strings as Python variables:
#
# 1) Undefined variables should return False rather than raising KeyError.
#
# 2) String values of 'False', '0', etc., should evaluate to False
# (not just the empty string).
#
# #1 is solved by overriding __getitem__, and #2 is solved by using a
# proxy class for values and overriding __nonzero__ on the proxy.
# Everything else is just to (a) make proxies behave like normal
# values otherwise, (b) make sure any dict operation returns a proxy
# rather than a normal value, and (c) coerce values written to the
# dict to be strings.
from convert import *
from attrdict import attrdict
class Variable(str):
"""Intelligent proxy class for SmartDict. Variable will use the
various convert functions to attempt to convert values to useable
types"""
def __int__(self):
return toInteger(str(self))
def __long__(self):
return toLong(str(self))
def __float__(self):
return toFloat(str(self))
def __bool__(self):
return toBool(str(self))
# Python 2.7 uses __nonzero__ instead of __bool__
__nonzero__ = __bool__
def convert(self, other):
t = type(other)
if t == bool:
return bool(self)
if t == int:
return int(self)
if t == long:
return long(self)
if t == float:
return float(self)
return str(self)
def __lt__(self, other):
return self.convert(other) < other
def __le__(self, other):
return self.convert(other) <= other
def __eq__(self, other):
return self.convert(other) == other
def __ne__(self, other):
return self.convert(other) != other
def __gt__(self, other):
return self.convert(other) > other
def __ge__(self, other):
return self.convert(other) >= other
def __add__(self, other):
return self.convert(other) + other
def __sub__(self, other):
return self.convert(other) - other
def __mul__(self, other):
return self.convert(other) * other
def __div__(self, other):
return self.convert(other) / other
def __truediv__(self, other):
return self.convert(other) / other
def __radd__(self, other):
return other + self.convert(other)
def __rsub__(self, other):
return other - self.convert(other)
def __rmul__(self, other):
return other * self.convert(other)
def __rdiv__(self, other):
return other / self.convert(other)
def __rtruediv__(self, other):
return other / self.convert(other)
class UndefinedVariable(object):
"""Placeholder class to represent undefined variables. Will
generally cause an exception whenever it is used, but evaluates to
zero for boolean truth testing such as in an if statement"""
def __bool__(self):
return False
# Python 2.7 uses __nonzero__ instead of __bool__
__nonzero__ = __bool__
class SmartDict(attrdict):
"""Dictionary class that holds strings, but intelligently converts
those strings to other types depending on their usage"""
def __getitem__(self, key):
"""returns a Variable proxy if the values exists in the database and
returns an UndefinedVariable otherwise"""
if key in self:
return Variable(dict.get(self, key))
else:
# Note that this does *not* change the contents of the dict,
# so that even after we call env['foo'] we still get a
# meaningful answer from "'foo' in env" (which
# calls dict.__contains__, which we do not override).
return UndefinedVariable()
def __setitem__(self, key, item):
"""intercept the setting of any variable so that we always
store strings in the dict"""
dict.__setitem__(self, key, str(item))
def values(self):
for value in dict.values(self):
yield Variable(value)
def items(self):
for key,value in dict.items(self):
yield key, Variable(value)
def get(self, key, default='False'):
return Variable(dict.get(self, key, str(default)))
def setdefault(self, key, default='False'):
return Variable(dict.setdefault(self, key, str(default)))
__all__ = [ 'SmartDict' ]
| 1.421875 | 1 |
addons/mixer/blender_client/codec.py | trisadmeslek/V-Sekai-Blender-tools | 0 | 12757660 | # GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Define and register encodable/decodable message types
"""
from mixer import codec
from mixer.broadcaster.common import MessageType
from mixer.blender_client import messages
message_types = {
MessageType.TRANSFORM: messages.TransformMessage,
MessageType.LIGHT: messages.LightMessage,
}
def register():
codec.register_message_types(message_types)
def unregister():
codec.unregister_message_types(message_types)
| 1.875 | 2 |
Python_Exercicios/Mundo3/Dicionários em Python/python_072.py | jbauermanncode/Curso_Em_Video_Python | 0 | 12757661 | '''
Faça um programa que leia nome e média de um aluno, guardando também a situação em um dicionário. No final, mostre o conteúdo da estrutura na tela.
'''
# Fazer um dicionario para aluno
aluno = dict()
# Pedir nome e média do aluno
aluno['nome'] = str(input('Nome: '))
aluno['média'] = float(input(f'Média de {aluno["nome"]}: '))
# Definir a situação do aluno em relação a média
if aluno['média'] >= 7:
aluno['situação'] = 'Aprovado'
elif 5 <= aluno['média'] < 7:
aluno['situação'] = 'Recuperação'
else:
aluno['situação'] = 'Reprovado'
print('-=' * 30)
# Fazer um for para imprimir na tela
for k, v in aluno.items():
print(f'{k} é igual a {v}.')
| 3.90625 | 4 |
lane-finding/test_DemoWeightedAverageAndPrediction.py | Sashulik/Detroit-Autonomous-Vehicle-Group | 24 | 12757662 | import numpy as np
import cv2
import matplotlib.pyplot as plt
from davg.lanefinding.Prediction import Prediction
def plot_line(img, x, y, color=(255,255,0), thickness=2):
''' Takes an image and two arrays of x and y points similar to matplotlib
and writes the lines onto the image. If the points are floats, they
are rounded and converted to ints to satisfy opencv.
'''
points = np.rint(np.vstack([x,y]).T).astype(int)
#print(points)
cv2.polylines(img, [points], False, color, thickness)
def demonstrate_weighted_average_and_prediction():
# Create a blank array to be used as an image
test_img = np.zeros((128, 128, 3), dtype='uint8')
# Define common y-points
y = np.array([0,31,63,95,127])
# Define an array of x-point arrays
#recent_x = np.array([[40,40,40,40,40]])
#recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40]])
#recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40]])
#recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40], [20,30,35,38,40]])
recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40], [0,20,29,36,40]])
print ("recent_x", recent_x)
# Calculate the softmax weighted averages for the x-points
averages = Prediction.find_weighted_averages(recent_x, window=3)
print("weighted averages", averages)
# Calculate the differences between the each consecutive set of x-points
recent_xdiff = np.diff(recent_x, axis=0)
print ("recent_xdiff", recent_xdiff)
if len(recent_xdiff) != 0:
# Calculate the non-weighted average of the differences for a baseline
recent_xdiff_avg = np.average(recent_xdiff, axis=0)
print ("recent_xdiff_avg", recent_xdiff_avg)
# Calculate the softmax weighted averages for the differences in the x-points
xdiff_weighted_averages = Prediction.find_weighted_averages(recent_xdiff, window=2)
print("xdiff_weighted_averages[-1]:", xdiff_weighted_averages[-1])
# Predict the next line location by applying the last weighted diff to the last x-points
#predicted_x = np.add(xdiff_weighted_averages[-1], recent_x[-1])
predicted_x = Prediction.predict_next_values(recent_x, window=2)
print("predicted:", predicted_x)
# Plot the various lines
for i in range(len(recent_x)):
# Plot a red line for the weighted moving averages
plot_line(test_img, averages[i], y, thickness=1, color=(200,0,0))
# Plot a yellow line for the current points
plot_line(test_img, recent_x[i], y, thickness=1)
# Plot a green line for the predicted next line based on weighted averages of the diffs
plot_line(test_img, predicted_x, y, thickness=1, color=(0,200,0))
plt.imshow(test_img)
plt.show()
# UNCOMMENT TO RUN
demonstrate_weighted_average_and_prediction()
| 3.21875 | 3 |
Coding/Array/two-sum.py | Yong-Zhuang/Tutoring | 0 | 12757663 | <reponame>Yong-Zhuang/Tutoring
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dic = {}
for i in range(len(nums)):
if dic.get(target - nums[i]) is not None:
return [dic[target - nums[i]], i]
dic[nums[i]] = i
| 3.375 | 3 |
tools/android/roll/update_support_library.py | google-ar/chromium | 777 | 12757664 | #!/usr/bin/env python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Updates the Android support repository (m2repository).
"""
import argparse
import fnmatch
import os
import subprocess
import shutil
import sys
DIR_SOURCE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..'))
ANDROID_SDK_PATH = os.path.abspath(os.path.join(DIR_SOURCE_ROOT, 'third_party',
'android_tools', 'sdk'))
TARGET_NAME = 'extra-android-m2repository'
# The first version we included was 23.2.1. Any folders that are older than
# that should not be included by Chrome's git repo. Unstable versions should
# also be excluded.
REMOVE_LIST = ['databinding', '13.*', '18.*', '19.*', '20.*', '21.*', '22.*',
'23.0.*', '23.1.*', '23.2.0', '*-alpha*', '*-beta*']
def main():
parser = argparse.ArgumentParser(description='Updates the Android support '
'repository in third_party/android_tools')
parser.add_argument('--sdk-dir',
help='Directory for the Android SDK.')
args = parser.parse_args()
sdk_path = ANDROID_SDK_PATH
if args.sdk_dir is not None:
sdk_path = os.path.abspath(os.path.join(DIR_SOURCE_ROOT, args.sdk_dir))
sdk_tool = os.path.abspath(os.path.join(sdk_path, 'tools', 'android'))
if not os.path.exists(sdk_tool):
print 'SDK tool not found at %s' % sdk_tool
return 1
# Run the android sdk update tool in command line.
subprocess.check_call([sdk_tool, 'update', 'sdk' , '--no-ui',
'--filter', TARGET_NAME])
m2repo = os.path.abspath(os.path.join(sdk_path, 'extras', 'android',
'm2repository'))
# Remove obsolete folders and unused folders according to REMOVE_LIST.
count = 0
for folder, _, _ in os.walk(m2repo):
for pattern in REMOVE_LIST:
if fnmatch.fnmatch(os.path.basename(folder), pattern):
count += 1
print 'Removing %s' % os.path.relpath(folder, sdk_path)
shutil.rmtree(folder)
if count == 0:
print ('No files were removed from the updated support library. '
'Did you update it successfully?')
return 1
if __name__ == '__main__':
sys.exit(main())
| 2.078125 | 2 |
scripts/build_coco_dataset.py | hnt4499/faster-rcnn | 0 | 12757665 | import os
import json
import sys
import argparse
from pathlib import Path
import pandas as pd
from tqdm import tqdm
DESCRIPTION = """
Build a csv file containing necessary information of a COCO dataset that is
compatible with this package.
"""
def get_bbox(bbox):
"""Get bbox of type (xmin, ymin, xmax, ymax) from a bbox of type
(x, y, w, h)"""
xmin, ymin, w, h = bbox
xmin = round(xmin)
ymin = round(ymin)
xmax = round(xmin + w) - 1
ymax = round(ymin + h) - 1
return [xmin, ymin, xmax, ymax]
def process_df(df_images, df_objects):
if df_objects is None:
df_merge = df_images[["id", "file_name", "width", "height"]]
df_merge = df_merge.set_index("id")
else:
# Merge
df = pd.merge(df_objects, df_images, left_on="image_id", right_on="id")
df = df[["image_id", "bbox", "category_id",
"file_name", "height", "width"]]
# Convert bboxes to integers
df["bbox"] = df["bbox"].apply(get_bbox)
# Merge all objects within each image
def transform(sub_df):
image_id, file_name, height, width = sub_df.iloc[0][
["image_id", "file_name", "height", "width"]]
category_ids = sub_df["category_id"].tolist()
category_ids = ",".join(map(str, category_ids))
bboxes = sub_df["bbox"].tolist()
bboxes = sum(bboxes, [])
bboxes = ",".join(map(str, bboxes))
return pd.Series({
"image_id": image_id, "img_name": file_name, "width": width,
"height": height, "bboxes": bboxes, "labels": category_ids
})
df_merge = df.groupby("image_id").apply(transform)
assert len(df_merge) == df_objects["image_id"].nunique()
return df_merge
def main(args):
# Read annotation file
print("Reading annotation file...")
with open(args.ann_path) as fin:
ann = json.load(fin)
print(f"Number of images: {len(ann['images'])}, number of annotations: "
f"{len(ann['annotations']) if 'annotations' in ann else -1}")
# Convert to dataframes
df_images = pd.DataFrame.from_records(ann["images"])
if "annotations" in ann:
df_objects = pd.DataFrame.from_records(ann["annotations"])
assert df_objects["image_id"].isin(df_images["id"]).all()
else:
df_objects = None
# Process dataframes
print("Processing dataframes...")
df = process_df(df_images, df_objects)
# Parse images
print("Parsing images...")
ids = []
file_paths = []
no_info_ids = []
paths = list(Path(args.image_dir).glob("*.jpg"))
for file_path in tqdm(paths):
_, file_name = os.path.split(file_path)
if not file_name.startswith("COCO"):
continue
name, _ = os.path.splitext(file_name)
id = int(name.split("_")[-1])
if id not in df.index:
no_info_ids.append(id)
else:
ids.append(id)
file_paths.append(file_path)
assert len(ids) == len(df) # make sure all images in `df` are found
df = df.loc[ids]
df["img_path"] = file_paths
if df_objects is None:
df = df[["img_path", "width", "height"]]
else:
df = df[["img_path", "width", "height", "bboxes", "labels"]]
df.to_csv(args.save_path, index=False)
print(f"There are {len(no_info_ids)} images that have no "
f"information: {no_info_ids}")
print("Done.")
def parse_arguments(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=DESCRIPTION)
parser.add_argument(
'-d', '--image-dir', type=str, required=True,
help='Path(s) to the image directory.')
parser.add_argument(
'-a', '--ann-path', type=str, required=True,
help='Path to the annotation file (e.g., instances_train2014.json).')
parser.add_argument(
'-s', '--save-path', type=str, required=True,
help='Path(s) to save the dataset information.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 2.796875 | 3 |
custom_classifier/text_classifier.py | calebkoy/pulse-check | 0 | 12757666 | <filename>custom_classifier/text_classifier.py
import math
import sys
from collections import defaultdict, Counter
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
class NaiveBayesClassifier(ClassifierMixin, BaseEstimator):
def __init__(self, text_preprocessor):
self.text_preprocessor = text_preprocessor
def fit(self, X, y):
X, y = check_X_y(X, y, dtype='str')
self.classes_, self.y_ = np.unique(y, return_inverse=True)
self.X_ = X.reshape(len(X))
self.__group_data_by_class()
self.__compute_log_class_priors()
self.vocab_ = set()
self.class_total_word_counts_ = defaultdict(int)
for c, data in self.grouped_data_.items():
for index, text in enumerate(data):
processed_text = self.text_preprocessor.process(text)
data[index] = processed_text
split_text = processed_text.split()
self.class_total_word_counts_[c] += len(split_text)
for word in split_text:
self.vocab_.add(word)
self.tf_idf_matrices_ = {}
vectorizer = TfidfVectorizer(vocabulary=self.vocab_)
for c, data in self.grouped_data_.items():
self.tf_idf_matrices_[c] = vectorizer.fit_transform(data).toarray()
self.tf_idf_matrix_feature_names_ = vectorizer.get_feature_names()
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X, dtype='str')
vocab_size = len(self.vocab_)
predictions = np.empty(len(X))
for index, text in enumerate(X.reshape(len(X))):
predictions[index] = self.__compute_maximum_a_posteriori(text, vocab_size)
return predictions
def __group_data_by_class(self):
self.grouped_data_ = {}
for index, c in enumerate(self.classes_):
self.grouped_data_[c] = self.X_[np.asarray(self.y_ == index).nonzero()]
def __compute_log_class_priors(self):
self.log_class_priors_ = {}
number_of_samples = len(self.X_)
for c in self.classes_:
self.log_class_priors_[c] = math.log(len(self.grouped_data_[c]) / number_of_samples)
def __compute_maximum_a_posteriori(self, text, vocab_size):
max_posterior = -sys.maxsize
most_likely_class = -sys.maxsize
for c in self.classes_:
posterior = self.log_class_priors_[c]
processed_text = self.text_preprocessor.process(text)
word_counts = Counter(processed_text.split())
total_words_in_class_texts = self.class_total_word_counts_[c]
tf_idf_matrix_column_sums = self.tf_idf_matrices_[c].sum(axis=0)
for index, word in enumerate(self.vocab_):
word_count = word_counts[word]
if word_count == 0:
continue
tf_idf_matrix_word_column_index = self.tf_idf_matrix_feature_names_.index(word)
tf_idf_matrix_column_sum = tf_idf_matrix_column_sums[tf_idf_matrix_word_column_index]
laplace_probability = (tf_idf_matrix_column_sum + 1) / (total_words_in_class_texts + vocab_size)
posterior += (word_count * math.log(laplace_probability))
if posterior > max_posterior:
max_posterior = posterior
most_likely_class = c
return most_likely_class | 2.65625 | 3 |
src/infi.py | jonasvandervennet/advent-of-code-2021 | 0 | 12757667 | <gh_stars>0
from collections import defaultdict
from itertools import combinations_with_replacement
# PART 1: 428940
def main():
with open("inputs/infi.txt") as ifp:
lines = ifp.readlines()
# lines = [
# "46 onderdelen missen",
# "Zoink: 9 Oink, 5 Dink",
# "Floep: 2 Flap, 4 Dink",
# "Flap: 4 Oink, 3 Dink"
# ]
missing_parts = int(lines[0].split(" onderdelen missen")[0])
num_parts_per_key = defaultdict(lambda: 1)
rules = defaultdict(lambda: [])
for rule in lines[1:]:
key, parts = rule.strip().split(": ")
for consituent in parts.split(", "):
amount, material = consituent.split(" ")
rules[key].append((int(amount), material))
# part 1
done = False
while not done:
done = True
for key, parts in rules.items():
if key in num_parts_per_key.keys():
continue
if all([part[1] in num_parts_per_key.keys() or part[1] not in rules.keys() for part in parts]):
num_parts_per_key[key] = sum([part[0]*num_parts_per_key[part[1]] for part in parts])
done = False
print(f"part 1: {max(num_parts_per_key.values())}")
# part 2
materials: list[str] = [material_amount[1] for material_amounts in rules.values() for material_amount in material_amounts]
toys = [key for key in rules.keys() if key not in materials]
NUM_PRESENTS = 20
match_found = False
for comb in combinations_with_replacement(toys, NUM_PRESENTS):
parts = sum([num_parts_per_key[toy] for toy in comb])
if parts == missing_parts:
code = ''.join(sorted([name[0] for name in comb]))
print(f"part 2: {code}")
match_found = True
break
if not match_found:
print("ERROR: Could not find correct amount of used parts")
if __name__ == "__main__":
main()
| 2.859375 | 3 |
source/timeseries/multi_output/residual_wrapper.py | supercoder3000/py_tensorflow_experiments | 0 | 12757668 | <gh_stars>0
import tensorflow as tf
class ResidualWrapper(tf.keras.Model):
def __init__(self, model):
super().__init__()
self.model = model
def call(self, inputs, *args, **kwargs):
delta = self.model(inputs, *args, **kwargs)
# The prediction for each time step is the input
# from the previous time step plus the delta
# calculated by the model.
return inputs + delta
| 2.734375 | 3 |
src/kivy_paint/_utils.py | gottadiveintopython/kivypaint | 0 | 12757669 | <filename>src/kivy_paint/_utils.py
__all__ = ('show_yes_no_dialog', )
from kivy.core.text import DEFAULT_FONT
from kivy.lang import Builder
from kivy.factory import Factory as F
Builder.load_string('''
<KPYesNoDialog@ModalView>:
size_hint: .7, .3
BoxLayout:
orientation: 'vertical'
padding: '8dp'
spacing: '8dp'
Label:
id: main
font_size: max(sp(16), 30)
BoxLayout:
spacing: '4dp'
size_hint_y: None
height: self.minimum_height
Button:
id: yes
font_size: max(sp(16), 30)
size_hint_min: self.texture_size
Button:
id: no
font_size: max(sp(16), 30)
size_hint_min: self.texture_size
''')
async def show_yes_no_dialog(*, text_main='', font_name=DEFAULT_FONT, text_yes='Yes', text_no='No', _cache=[]):
import asynckivy as ak
try:
dialog = _cache.pop()
except IndexError:
dialog = F.KPYesNoDialog()
main_label = dialog.ids.main.__self__
yes_button = dialog.ids.yes.__self__
no_button = dialog.ids.no.__self__
main_label.text = text_main
main_label.font_name = font_name
yes_button.text = text_yes
yes_button.font_name = font_name
no_button.text = text_no
no_button.font_name = font_name
try:
dialog.open()
tasks = await ak.or_(
ak.event(yes_button, 'on_release'),
ak.event(no_button, 'on_release'),
ak.event(dialog, 'on_dismiss'),
)
if tasks[0].done:
return 'yes'
elif tasks[1].done:
return 'no'
else:
return 'cancelled'
finally:
dialog.dismiss()
_cache.append(dialog)
| 2.203125 | 2 |
covermaker/layout.py | xgmo9/comic-translation | 0 | 12757670 | <reponame>xgmo9/comic-translation
import os
import re
from collections import deque
from unicodedata import east_asian_width
from PIL import ImageFont
_DEBUG = True
_MIN_FONT_SIZE = 2
def _is_wide_char(uchar):
w = east_asian_width(uchar)
return w == 'W' or w == 'F'
def _get_font(font_name, font_size):
cur_dir = os.path.dirname(__file__)
font_file = os.path.join(cur_dir, 'fonts', font_name)
return ImageFont.truetype(font_file, font_size)
# 用于提取单词和数字
_RE_H_WORDS = re.compile(r"\w+|[!,.?|\"\'-]+", re.ASCII)
_RE_V_WORDS = re.compile(r"\+|[!,.?|\"\'-]+", re.ASCII)
def _splite_text_to_words(text,section):
'''将文本切分成单词。非宽字符将是单个的字,英文和数字将是词组
Returns:
list : 例如输入 '豆瓣2020 hello' 将返回 ['豆', '瓣', '2020', ' ', 'hello']
'''
if section.dir == 'v':
ascii_words_range = ((x.start(), x.end())
for x in _RE_V_WORDS.finditer(text))
else:
ascii_words_range = ((x.start(), x.end())
for x in _RE_H_WORDS.finditer(text))
i = 0
ret = []
for r in ascii_words_range:
while i < r[0]:
ret.append(text[i])
i += 1
ret.append(text[r[0]:r[1]])
i = r[1]
while i < len(text):
ret.append(text[i])
i += 1
return ret
# 行首禁止出现的标点符号
_PUNCTUATION_BLOCK_SET = {
',',
'.',
':',
';',
'!',
'?',
')',
'}',
']',
'\'',
',',
'。',
':',
';',
'!',
'?',
')',
'】',
'、',
'》',
'…',
'”',
'’',
}
class Line(object):
'''行。每行保存了很多词。'''
def __init__(self, font, letter_spacing, section):
self.words = deque()
self._font = font
self._letter_spacing = letter_spacing
self._words_width = 0
self._letter_count = 0
self._section = section
def _update(self, word, sign):
self._letter_count += sign * len(word)
if self._section.dir == 'h':
self._words_width += sign * self._font.getsize(word)[0]
else:
self._words_width += sign * self._font.getsize(word)[1]
def append(self, word):
self.words.append(word)
self._update(word, 1)
def append_left(self, word):
self.words.appendleft(word)
self._update(word, 1)
def pop(self):
word = self.words.pop()
self._update(word, -1)
return word
def pop_left(self):
word = self.words.popleft()
self._update(word, -1)
return word
def get_display_width(self):
'''返回当前行所有字在排版后的宽度。包含了字符间距'''
ls = (self._letter_count - 0) * self._letter_spacing
return int(ls + self._words_width)
def __str__(self):
return ''.join(self.words)
class Layout(object):
'''排版后最终向外展示的类'''
def __init__(self, lines, font, font_size, line_spacing, letter_spacing):
self.lines = lines
self.font = font
self.font_size = font_size
self.line_spacing = line_spacing
self.letter_spacing = letter_spacing
self._lines_start_pos = [[0, 0] for _ in range(len(lines))]
self._lines_height = len(lines) * (font_size +
line_spacing) - line_spacing
self._dir = None
def update(self, text_box, dir, valign, halign):
# 执行一次重排版
self._dir = dir
if dir == 'h':
self._update_h(text_box, valign, halign)
else:
self._update_v(text_box, valign, halign)
def iter_letters(self):
'''遍历所有的单个字,获取他们的排版信息
可用于绘制。
Yields:
str, tuple, int, tuple: 单个字,字的左上角坐标(x, y) ,旋转角度,(宽度,高度)
'''
if not self._dir:
return
if self._dir == 'h':
for i, line in enumerate(self.lines):
pos = self._lines_start_pos[i]
x = pos[0]
for word in line.words:
for c in word:
fw = self.font.getsize(c)[0]
fh = self.font.getsize(c)[1]
yield c, (x, pos[1]), 0, (fw, fh)
x += fw + self.letter_spacing
else:
for i, line in enumerate(self.lines):
pos = self._lines_start_pos[i]
y = pos[1]
for word in line.words:
for c in word:
is_wide = _is_wide_char(c)
degree = 0 if is_wide else -90
fw = self.font.getsize(c)[1]
fh = self.font.getsize(c)[0]
# 这里的宽高仍旧按照横向写,你需要根据角度自行计算
yield c, (pos[0], y), 0, (fw, fh)
y += fw + self.letter_spacing
def _update_h(self, box, valign, halign):
for i, line in enumerate(self.lines):
# 求 x 坐标
if valign == 'l':
xoff = 0
elif valign == 'r':
xoff = box.w - line.get_display_width()
else:
xoff = (box.w - line.get_display_width()) / 2
# 求 y 坐标
yoff = i * (self.line_spacing + self.font_size)
if halign == 'b':
yoff += box.h - self._lines_height
elif halign == 'c':
yoff += (box.h - self._lines_height) / 2
self._lines_start_pos[i][0] = int(box.lt[0]) + int(xoff)
self._lines_start_pos[i][1] = int(box.lt[1]) + int(yoff)
def _update_v(self, box, valign, halign):
for i, line in enumerate(self.lines):
# 求 x 坐标
xoff = self.font_size + (self.font_size + self.line_spacing) * i
if valign == 'l':
xoff += (box.w - self._lines_height)
elif valign == 'c':
xoff += (box.w - self._lines_height) / 2
# 求 y 坐标
if halign == 't':
yoff = 0
elif halign == 'b':
yoff = box.h - line.get_display_width()
else:
yoff = (box.h - line.get_display_width()) / 2
self._lines_start_pos[i][0] = int(box.rb[0]) - int(xoff)
self._lines_start_pos[i][1] = int(box.lt[1]) + int(yoff)
def _build_lines(text, font, words, boxw, boxh, font_size, lespc, lispc, section):
'''将text按照行分割后,返回每一行的数据
Returns:
list: [Line ...] 如果列表为空,表示不能按照指定配置在文本框内完成排版
'''
texth = 0
lines = []
prei, i = 0, 0
line = Line(font, lespc, section)
while i < len(words):
word = words[i]
line.append(word)
lw = line.get_display_width()
#print('line width '+ f'{lw} '+ f'{line} '+ f'{i} '+f'{prei} '+f'{boxw}')
if lw > boxw:
# 超框了直接返回
if i == prei:
return []
# 更新文本高V度
texth += font_size
if lines:
texth += lispc
# 判断行高是否超限
if texth > boxh:
return []
# 添加新行
line.pop()
lines.append(line)
line = Line(font, lespc, section)
prei = i
# 如果行首有违反排版规则的字符则从前面的行借字符
if word[0] in _PUNCTUATION_BLOCK_SET:
prei -= 1
i = prei
lines[-1].pop()
else:
i += 1
if line.words:
# 更新文本高V度
texth += font_size
if lines:
texth += lispc
# 判断行高是否超限
if texth > boxh:
return []
lines.append(line)
return lines
def _build_max_font_lines(text, section):
'''在文本框内寻找能最大利用文本框显示区域字号,并执行分行操作'''
# 1. 把文本块中所有的单词和数字找出来,保证他们不会被分割。这样符合排版规则
words = _splite_text_to_words(text,section)
# 3. 求字号范围
boxw, boxh = section.box.w, section.box.h
if section.dir == 'v':
boxw, boxh = boxh, boxw
max_font_size = int(min(boxw, boxh))
min_font_size = int(min(boxw, boxh, _MIN_FONT_SIZE))
# 4. 二分法查找最合适的字号分行操作
lfs, rfs = min_font_size, max_font_size
lines = []
while lfs <= rfs:
mfs = lfs + int((rfs - lfs) / 2)
lespc = int(section.letter_spacing_factor * mfs)
lispc = int(section.line_spacing_factor * mfs)
#print('fontsize', mfs, lfs, rfs)
font = _get_font(section.font, mfs)
lines = _build_lines(text, font, words, boxw, boxh, mfs, lespc, lispc, section)
if mfs == lfs:
break
if lines:
lfs = mfs
else:
rfs = mfs
return lines, font, mfs, lispc, lespc
def _build_trimed_lines(text, section):
if section.dir == 'h':
fs = section.box.h
width = section.box.w
else:
fs = section.box.w
width = section.box.h
lespc = int(section.letter_spacing_factor * fs)
lispc = int(section.line_spacing_factor * fs)
font = _get_font(section.font, fs)
line = Line(font, lespc, section)
limit = width - font.getsize('…')[0]
i = 0
while i < len(text):
line.append(text[i])
if line.get_display_width() > limit:
break
i += 1
if i < len(text):
line.pop()
line.append('…')
return [line], font, fs, lispc, lespc
def layout_text(text, section) -> Layout:
'''按照 section 指定的配置对 text 进行排版
Args:
text (str): 待排版字体
section (config.Section): 排版配置
Returns:
Layout: 排版好的 Layout 对象
'''
# 按规则执行分行
lines, font, font_size, line_spacing, letter_spacing = _build_max_font_lines(
text, section)
# 进行布局运算
ret = Layout(lines, font, font_size, line_spacing, letter_spacing)
ret.update(section.box, section.dir, section.valign, section.halign)
return ret
| 2.796875 | 3 |
model/df_generation.py | spacewaterbear/pubmed_data | 0 | 12757671 | import pandas as pd
from utils.date import to_datetime
from functools import wraps
nb_author = 16
class DataFrameGenertion:
def __init__(self):
self.columns = self._generate_columns()
self.df = pd.DataFrame(columns=self.columns)
self.d = None
self.article = None
def _generate_columns(self):
columns = ["pmid", "title", "abstract", "date", "journal", "substance", "author_list", "affiliation_list"]
for i in range(0, nb_author):
columns.append(f'author_{i}')
columns.append(f'affiliation_{i}')
return columns
def try_data(func):
"""
decorator to add before every function that get information from article :
"""
def inner(self,):
try:
return func(self)
except:
return None
return inner
@try_data
def get_abstract(self):
return self.article["MedlineCitation"]["Article"]["Abstract"]["AbstractText"][0]
@try_data
def get_affiliation_list(self):
return [ele['Affiliation'] for ele in self.article["MedlineCitation"]['Article']['AuthorList'][0]['AffiliationInfo']]
@try_data
def get_author_list(self):
return [ele['ForeName'] + " " + ele['LastName'] for ele in
self.article["MedlineCitation"]['Article']['AuthorList']]
@try_data
def get_chemical_list(self):
return ', '.join([str(ele['NameOfSubstance']) for ele in self.article["MedlineCitation"]["ChemicalList"]])
def explode_data(self, dic_key):
data_l = self.d[dic_key]
if data_l != None:
name = dic_key.strip('list')
for i in range(len(data_l)):
self.d[f'{name}{i}'] = self.d[dic_key][i]
if i > nb_author:
break
def update_df(self, article):
self.d = {}
self.article = article
self.d['abstract'] = self.get_abstract()
self.d['pmid'] = str(article["MedlineCitation"]["PMID"])
self.d['title'] = article["MedlineCitation"]['Article']['ArticleTitle']
self.d['date'] = to_datetime(article["MedlineCitation"]["DateCompleted"])
self.d['journal'] = article["MedlineCitation"]["MedlineJournalInfo"]['MedlineTA']
self.d['author_list'] = self.get_author_list()
self.d['affiliation_list'] = self.get_affiliation_list()
self.explode_data("affiliation_list")
self.d['author_list'] = self.get_author_list()
self.explode_data("author_list")
self.df = self.df.append(self.d, ignore_index=True)
| 2.9375 | 3 |
Examples/SPO/SPO_Robotarm.py | wanxinjin/Safe-PDP | 15 | 12757672 | <reponame>wanxinjin/Safe-PDP<gh_stars>10-100
from SafePDP import SafePDP
from SafePDP import PDP
from JinEnv import JinEnv
from casadi import *
import scipy.io as sio
import matplotlib.pyplot as plt
import time
import random
# --------------------------- load environment ----------------------------------------
env = JinEnv.RobotArm()
env.initDyn(m1=1, m2=1, l1=1, l2=1, g=0)
env.initCost(wq1=0.1, wq2=0.1, wdq1=0.1, wdq2=0.1, wu=0.01)
env.initConstraints(max_u=1., max_q=pi)
dt = 0.2
horizon = 25
init_state = [-pi / 2, 3 * pi / 4, 0, 0]
# --------------------------- create Safe PDP OPT object ----------------------------------------
optimizer = SafePDP.CSysOPT()
optimizer.setStateVariable(env.X)
optimizer.setControlVariable(env.U)
dyn = env.X + dt * env.f
optimizer.setDyn(dyn)
optimizer.setPathCost(env.path_cost)
optimizer.setFinalCost(env.final_cost)
optimizer.setPathInequCstr(env.path_inequ)
gamma = 1e-2
optimizer.convert2BarrierOC(gamma=gamma)
# ----create constrained OC object for result comparison and neural policy initialization ---------------
coc = SafePDP.COCsys()
coc.setStateVariable(optimizer.state)
coc.setControlVariable(optimizer.control)
coc.setDyn(optimizer.dyn)
coc.setPathCost(optimizer.path_cost)
coc.setFinalCost(optimizer.final_cost)
coc.setPathInequCstr(optimizer.path_inequ_cstr)
coc_sol = coc.ocSolver(init_state=init_state, horizon=horizon)
print('constrained cost', coc_sol['cost'])
# env.play_animation(l1=1, l2=1, dt=dt, state_traj=coc_sol['state_traj_opt'])
# plt.plot(coc_sol['control_traj_opt'], label='Control by COC')
# plt.show()
# --------------------------- Safe Policy Optimization ----------------------------------------
# set the neural policy
optimizer.setNeuralPolicy(hidden_layers=[4])
# initialize the policy by supervised learning from OC solution traj, a good initialization can avoid local minima
nn_seed =100 # e.g. 100, 50, 500, 600, 700
init_parameter = SafePDP.Traning_NN(optimizer.neural_policy_fn, coc_sol['state_traj_opt'],
0.60 * coc_sol['control_traj_opt'], display=False, max_iter=10000,
seed=nn_seed) # 0.60 is to make the control input small then the initial policy is more likely initially feasible,
current_parameter = init_parameter
# optimization parameter setting
max_iter = 2000
loss_barrier_trace, loss_trace = [], []
parameter_trace = np.empty((max_iter, init_parameter.size))
control_traj, state_traj = 0, 0
lr = 0.5e-2
# start policy optimization
for k in range(max_iter):
# one iteration of safe policy optimization
cost_barrier, cost, dp, state_traj, control_traj, = optimizer.step(init_state=init_state, horizon=horizon,
control_auxvar_value=current_parameter,
damping_flag=True, damping_lambda=1)
# storage
loss_barrier_trace += [cost_barrier]
loss_trace += [cost]
parameter_trace[k, :] = current_parameter
# update
current_parameter -= lr * dp
# print
if k % 100 == 0:
print('Iter #:', k, 'Loss_barrier:', cost_barrier, 'Loss:', cost)
# save the results
if True:
save_data = {'parameter_trace': parameter_trace,
'loss_trace': loss_trace,
'loss_barrier_trace': loss_barrier_trace,
'gamma': gamma,
'coc_sol': coc_sol,
'lr': lr,
'init_parameter': init_parameter,
'nn_seed': nn_seed}
np.save('./Results/SPO_Robotarm_trial_2.npy', save_data)
# check and visualize the learning result
env.play_animation(l1=1, l2=1, dt=dt, state_traj=state_traj)
plt.plot(coc_sol['control_traj_opt'], label='Control by COC')
plt.plot(control_traj, label='Control by Neural Policy')
plt.legend()
plt.show()
| 2.046875 | 2 |
aldjemy/table.py | holtgrewe/aldjemy | 0 | 12757673 | <filename>aldjemy/table.py
from django.apps import apps
from django.conf import settings
from sqlalchemy import Column, Table, types, ForeignKey
from sqlalchemy.dialects.postgresql import ARRAY, UUID, JSONB, DATERANGE
def simple(typ):
return lambda field: typ()
def varchar(field):
return types.String(length=field.max_length)
def foreign_key(field):
parent_model = field.related_model
target = parent_model._meta
target_table = target.db_table
target_pk = target.pk.column
return types.Integer, ForeignKey("%s.%s" % (target_table, target_pk))
def array_type(field):
"""
Allows conversion of Django ArrayField to SQLAlchemy Array.
Takes care of mapping the type of the array element.
"""
internal_type = field.base_field.get_internal_type()
# currently no support for multi-dimensional arrays
if internal_type in DATA_TYPES and internal_type != "ArrayField":
sub_type = DATA_TYPES[internal_type](field)
else:
raise RuntimeError("Unsupported array element type")
return ARRAY(sub_type)
DATA_TYPES = {
"AutoField": simple(types.Integer),
"BigAutoField": simple(types.BigInteger),
"BooleanField": simple(types.Boolean),
"CharField": varchar,
"CommaSeparatedIntegerField": varchar,
"DateField": simple(types.Date),
"DateTimeField": simple(types.DateTime),
"DecimalField": lambda field: types.Numeric(
scale=field.decimal_places, precision=field.max_digits
),
"DurationField": simple(types.Interval),
"FileField": varchar,
"FilePathField": varchar,
"FloatField": simple(types.Float),
"IntegerField": simple(types.Integer),
"BigIntegerField": simple(types.BigInteger),
"IPAddressField": lambda field: types.CHAR(length=15),
"NullBooleanField": simple(types.Boolean),
"OneToOneField": foreign_key,
"ForeignKey": foreign_key,
"PositiveIntegerField": simple(types.Integer),
"PositiveSmallIntegerField": simple(types.SmallInteger),
"SlugField": varchar,
"SmallIntegerField": simple(types.SmallInteger),
"TextField": simple(types.Text),
"TimeField": simple(types.Time),
# PostgreSQL-specific types
"ArrayField": array_type,
"UUIDField": simple(UUID),
"JSONField": simple(JSONB),
"DateRangeField": simple(DATERANGE),
}
def generate_tables(metadata):
# Update with user specified data types
COMBINED_DATA_TYPES = dict(DATA_TYPES)
COMBINED_DATA_TYPES.update(getattr(settings, "ALDJEMY_DATA_TYPES", {}))
models = apps.get_models(include_auto_created=True)
for model in models:
name = model._meta.db_table
qualname = (metadata.schema + "." + name) if metadata.schema else name
if qualname in metadata.tables or model._meta.proxy:
continue
columns = []
model_fields = [
(f, f.model if f.model != model else None)
for f in model._meta.get_fields()
if not f.is_relation or f.one_to_one or (f.many_to_one and f.related_model)
]
private_fields = model._meta.private_fields
for field, parent_model in model_fields:
if field not in private_fields:
if parent_model:
continue
try:
internal_type = field.get_internal_type()
except AttributeError:
continue
if internal_type in COMBINED_DATA_TYPES and hasattr(field, "column"):
typ = COMBINED_DATA_TYPES[internal_type](field)
if not isinstance(typ, (list, tuple)):
typ = [typ]
columns.append(
Column(field.column, *typ, primary_key=field.primary_key)
)
Table(name, metadata, *columns)
| 2.609375 | 3 |
tests/test_R6_GameLoader.py | RainbowRedux/RainbowSixFileConverters | 6 | 12757674 | <reponame>RainbowRedux/RainbowSixFileConverters
"""Tests the RSEGameLoader module with Rainbow Six (1998)"""
import logging
import unittest
from FileUtilities.Settings import load_settings
from RainbowFileReaders import RSEGameLoader
from RainbowFileReaders.R6Constants import RSEEngineVersions, RSEGameVersions
TEST_SETTINGS_FILE = "test_settings.json"
logging.basicConfig(level=logging.CRITICAL)
class R6MAPTests(unittest.TestCase):
"""Test Gameloader on Rainbow Six (1998)"""
def test_eagle_watch_detection(self):
"""Tests recognising Eagle Watch"""
settings = load_settings(TEST_SETTINGS_FILE)
eagleWatchGame = RSEGameLoader.RSEGameLoader()
loadedGameSuccessfully = eagleWatchGame.load_game(settings["gamePath_R6_EW"])
self.assertTrue(loadedGameSuccessfully, "Failed to load game with Eagle Watch")
self.assertEqual(eagleWatchGame.get_mod_list(), ["Eagle Watch"], "Failed to detect eagle watch")
normalGame = RSEGameLoader.RSEGameLoader()
loadedGameSuccessfully = normalGame.load_game(settings["gamePath_R6"])
self.assertTrue(loadedGameSuccessfully, "Failed to load original game")
self.assertEqual(normalGame.get_mod_list(), [], "Detected a mod where there shouldn't be one")
def test_game_detection(self):
"""Tests recognising Rainbow Six"""
settings = load_settings(TEST_SETTINGS_FILE)
invalidGame = RSEGameLoader.RSEGameLoader()
loadedGameSuccessfully = invalidGame.load_game("/ThisPathWillNEverWork/")
self.assertFalse(loadedGameSuccessfully, "Incorrectly reported that an invalid game loaded")
normalGame = RSEGameLoader.RSEGameLoader()
loadedGameSuccessfully = normalGame.load_game(settings["gamePath_R6"])
self.assertTrue(loadedGameSuccessfully, "Failed to load original game")
self.assertEqual(normalGame.game_name, "Rainbow Six", "Didn't recognise game name")
self.assertEqual(normalGame.game_version, RSEGameVersions.RAINBOW_SIX, "Didn't recognise game version")
self.assertEqual(normalGame.engine_version, RSEEngineVersions.SHERMAN, "Didn't recognise engine version")
| 2.546875 | 3 |
ohmystars/tests.py | wolfg1969/oh-my-stars | 76 | 12757675 | import unittest
from index import update_inverted_index
__author__ = 'guoyong'
class IndexTest(unittest.TestCase):
def setUp(self):
self.index = {
'python': []
}
def test_update_inverted_index_empty(self):
update_inverted_index(self.index, 'python', 1, 2, 3)
self.assertEqual([1, 2, 3], self.index.get('python'))
def test_update_inverted_index_duplicate_item(self):
update_inverted_index(self.index, 'python', 1, 2, 3)
update_inverted_index(self.index, 'python', 3)
self.assertEqual([1, 2, 3], self.index.get('python'))
def test_update_inverted_index_sorted(self):
update_inverted_index(self.index, 'python', 3, 1, 2)
self.assertEqual([1, 2, 3], self.index.get('python'))
| 3.375 | 3 |
src/atcoder/abc030/c/sol_0.py | kagemeka/competitive-programming | 1 | 12757676 | <gh_stars>1-10
import typing
import sys
import numpy as np
import numba as nb
@nb.njit((nb.i8, nb.i8, nb.i8[:], nb.i8[:]), cache=True)
def solve(
x: int,
y: int,
a: np.ndarray,
b: np.ndarray,
) -> typing.NoReturn:
n, m = len(a), len(b)
t = 0
cnt = 0
i = j = 0
while True:
while i < n and a[i] < t: i += 1
if i == n: break
t = a[i] + x
while j < m and b[j] < t: j += 1
if j == m: break
t = b[j] + y
cnt += 1
print(cnt)
def main() -> typing.NoReturn:
n, m = map(int, input().split())
x, y = map(int, input().split())
a = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
b = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
solve(x, y, a, b)
main() | 2.359375 | 2 |
PINp/2015/TITOV_I_V/task_9_20.py | YukkaSarasti/pythonintask | 0 | 12757677 | <reponame>YukkaSarasti/pythonintask<gh_stars>0
# Задача 9
# Создайте игру, в которой компьютер выбирает какое-либо слово, а игрок должен
#его отгадать. Компьютер сообщает игроку, сколько букв в слове, и дает пять попыток
#узнать, есть ли какая-либо буква в слове, причем программа может отвечать только
#"Да" и "Нет". Вслед за тем игрок должен попробовать отгадать слово.
# <NAME>.
# 02.06.2016
import random
spisokWord = ("слон", "аватар", "мяч", "суперудар", "реал", "баскетбол", "яблоко")
zagadka = random.choice(spisokWord)
print ("Длина слова - ", len(zagadka))
for i in range(5):
print("\n")
userLeter = input ("Введите букву - ")
if userLeter in zagadka:
print ("Да")
else:
print ("Нет")
if (input("\nВведите ответ - ") == zagadka):
print ("Неплохо для магла)")
else:
print ("Просто вы из этих (")
print (zagadka)
input ("\n Нажмите ENTER для выхода")
| 3.765625 | 4 |
application/services/PatientService.py | MickeyPa/soen344 | 0 | 12757678 | from application.TDG import PatientTDG
from passlib.hash import sha256_crypt
import datetime
# Returns True if patient exists
def patientExists(hcnumber):
return PatientTDG.find(hcnumber=hcnumber) is not None
# Returns Patient if found
def getPatient(hcnumber):
patient = PatientTDG.find(hcnumber=hcnumber)
if patient is None:
return None
else:
return dict(patient)
# Returns true if patient is authenticated
def authenticate(hcnumber, password):
verified = False
user = getPatient(hcnumber)
if user is not None:
verified = sha256_crypt.verify(password, user['password_hash'])
return verified
# Returns True if patient is created
def createPatient(hcnumber, fname, lname, birthday, gender, phone, email, address, password, lastAnnual):
reponse = False
if patientExists(hcnumber):
reponse = False # if patient exists then return false
else:
# hash password
password_hash = sha256_crypt.hash(password)
# format the dates
if lastAnnual:
lastannualSplit = lastAnnual.split("-")
lastAnnual = datetime.datetime.strptime(lastannualSplit[0] + lastannualSplit[1] + lastannualSplit[2], '%Y%m%d').date()
else:
lastAnnual = None
bdaySplit = birthday.split("-")
birthday = datetime.datetime.strptime(bdaySplit[0] + bdaySplit[1] + bdaySplit[2], '%Y%m%d').date()
PatientTDG.create(hcnumber=hcnumber, fname=fname, lname=lname, birthday=birthday, gender=gender, phone=phone, email=email, address=address, password_hash=password_hash, lastAnnual=lastAnnual)
reponse = True
return reponse
# Returns true if patient can book an annual appointment. If not, return false.
# Checks when last annual was (must be at least over a year ago).
def canBookAnnual(hcnumber):
if getPatient(hcnumber)['lastAnnual'] is None:
return True
else:
annual = getPatient(hcnumber)['lastAnnual']
now = datetime.datetime.now()
if (now-annual).days >= 365:
return True
else:
return False
# returns true if patient's annual has been changed
# TO DO: update this method to change lastAnnual to the day of the appointment, not the day of the booking
def updateAnnual(hcnumber, date):
if getPatient(hcnumber) is None:
return False
else:
if date is not None:
lastannualSplit = date.split("-")
date = datetime.datetime.strptime(lastannualSplit[0] + lastannualSplit[1] + lastannualSplit[2], '%Y%m%d').date()
PatientTDG.update(hcnumber=hcnumber, date=date)
return True
else:
PatientTDG.update(hcnumber=hcnumber, date=None)
return True
| 2.859375 | 3 |
tools/recursive_read.py | tony1945/EMGMonitor | 1 | 12757679 | <reponame>tony1945/EMGMonitor
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from numpy import dot, cumsum, where, array_split, savetxt, fromfile, float64, mean, array, sqrt, abs, sum, transpose, reshape, zeros, append
from numpy.fft import fft
from os import listdir, mkdir
from os.path import isfile, join
from scipy.signal import iirnotch, butter, lfilter
import shutil
def prepareFilter(w0, fs):
b, a = iirnotch(w0, 10) # 60Hz notch
d, c = butter(8, 15/(fs/2), 'highpass')
f, e = butter(8, 120/(fs/2), 'lowpass')
return b, a, d, c, f, e
def addFilter(b, a, data):
filtered_data = lfilter(b, a, data)
return filtered_data
def meanfreq(x, win_size):
sz = int(win_size / 2) + 1
pxx = abs(fft(x)) ** 2
ps = pxx[1:sz] * 2e-06
pwr = sum(ps)
meanfreq = dot(ps, range(1, sz)) / pwr
return meanfreq
def medfreq(x, win_size):
sz = int(win_size / 2) + 1
pxx = abs(fft(x)) ** 2
ps = pxx[1:sz] * 2e-06
cs = cumsum(ps)
pwr = sum(ps)
medfreq = where(cs >= pwr * 0.5)[0][0]
return medfreq
def rms(x):
x2 = x*x
rms = sqrt(sum(x2)/x.size)
return rms
def arv(x):
arv = mean(abs(x))
return arv
#Simple context manager for directory operations
#attrib to <NAME> https://stackoverflow.com/a/13197763
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
if __name__ == '__main__':
# sub = [name for name in os.listdir(".") if os.path.isdir(name)]
for dirpath, dirnames, filenames in os.walk("."):
if not dirnames:
# print(dirpath, "has 0 subdirectories and", len(filenames), "files")
with cd(dirpath):
# print(os.getcwd())
print("Processing ", dirpath)
# binPath = 'rawdata/'
binPath = './'
binNumber = 0
try:
binNumber = len([name for name in listdir(binPath) if isfile(join(binPath, name))])
except FileNotFoundError:
print("rawdata directory not found.\n")
print('Find bins: ', binNumber)
# rmtree throws an error if data directory not extant
try:
shutil.rmtree('./RAW')
except FileNotFoundError:
print("Data directory not found.\nCreating new directory...")
mkdir('./RAW')
fileNumbers = binNumber
fs = 4000 # Sampling rate
f0 = 60 # Frequency to be removed from signal (Hz)
w0 = f0/(fs/2)
b, a, d, c, f, e = prepareFilter(w0, fs)
win_len = 4000
max_freq = 500
rawSize = 4000
num_win = int(rawSize / win_len)
print('Number of wins: ', num_win)
MEF = []
MDF = []
ARV = []
RMS = []
RAW = []
raw_out = array([])
for i in range(fileNumbers):
binpath = []
binpath.append(binPath + str(i) + '.bin')
test_str = "".join(binpath)
raw = fromfile(test_str, dtype=float64)
sub_raw = raw[:fs * num_win] # transforms raw data into 1 sec windows
sub = array_split(sub_raw, num_win)
for m in range(num_win):
inwin = sub[m]
dataAF = inwin
dataAF = addFilter(d, c, dataAF)
dataAF = addFilter(b, a, dataAF)
dataAF = addFilter(f, e, dataAF)
dataAF = addFilter(b, a, dataAF)
savetxt("RAW/"+str(i)+'.csv', array(dataAF))
MEF.append(meanfreq(dataAF, win_len))
MDF.append(medfreq(dataAF, win_len))
ARV.append(arv(dataAF))
RMS.append(rms(dataAF))
# print(reshape(dataAF, (1, rawSize)).shape)
# RAW.append(transpose(dataAF))
# RAW.append(reshape(dataAF, (1, rawSize)))
# RAW.append(dataAF)
# RAW = RAW.rstrip()
raw_out = append(raw_out, dataAF)
# print(raw_out.shape)
savetxt("ARV1.csv", array(ARV))
savetxt("RMS1.csv", array(RMS))
savetxt("MEF1.csv", array(MEF))
savetxt("MDF1.csv", array(MDF))
# savetxt("RAW_full.csv", RAW)
savetxt("RAW.csv", array(raw_out))
print("Complete, saved CSV files for ", dirpath)
| 2.296875 | 2 |
backend/urls.py | ranwise/djangochannel | 45 | 12757680 | from django.urls import path, include
urlpatterns = [
# API
path('', include('backend.api.v2.urls')),
]
| 1.421875 | 1 |
svg_visible.py | hvfrancesco/freestylesvg | 52 | 12757681 | <reponame>hvfrancesco/freestylesvg<gh_stars>10-100
# License : MIT
# Author : <NAME>, <NAME>
# Date : 2014-03-26
import os
import re
import bpy
from freestyle import *
from freestyle.functions import *
from freestyle.predicates import *
from freestyle.types import *
from freestyle.shaders import *
from parameter_editor import *
from freestyle.chainingiterators import *
try:
import xml.etree.cElementTree as et
except ImportError:
import xml.etree.ElementTree as et
# change this values to change visible lines style, default is black lines with 2px thickness
color = "black"
width = 2
_HEADER = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n
"""
_ROOT = '<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" width="%d" height="%d"></svg>\n'
SVG_NS = "http://www.w3.org/2000/svg"
et.register_namespace("", SVG_NS)
et.register_namespace("sodipodi", "http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd")
et.register_namespace("inkscape", "http://www.inkscape.org/namespaces/inkscape")
scene = getCurrentScene()
current_frame = scene.frame_current
w = scene.render.resolution_x * scene.render.resolution_percentage / 100
h = scene.render.resolution_y * scene.render.resolution_percentage / 100
path = re.sub(r'\.blend$|$', '.svg' , bpy.data.filepath)
# write header if it does not yet exists
try:
with open(path) as f:
pass
except IOError:
f = open(path, "w")
f.write(_HEADER)
f.write(_ROOT % (w,h))
f.close()
# select
upred = AndUP1D(QuantitativeInvisibilityUP1D(0),
OrUP1D(pyNatureUP1D(Nature.SILHOUETTE),
pyNatureUP1D(Nature.CREASE),
ContourUP1D()))
Operators.select(upred)
# chain
Operators.bidirectional_chain(ChainSilhouetteIterator())
# sort
Operators.sort(pyZBP1D())
# shade and write svg
tree = et.parse(path)
root = tree.getroot()
class SVGPathShader(StrokeShader):
def shade(self, stroke):
xml_string = '<path fill="none" stroke="%s" stroke-width="%d" d="\nM '
for v in stroke:
x, y = v.point
xml_string += '%.3f,%.3f ' % (x, h - y)
xml_string += '" />'
xml_string = xml_string % (color, width)
visible_element = et.XML(xml_string)
group_visible.append(visible_element)
shaders_list = [
SamplingShader(50),
SVGPathShader(),
ConstantColorShader(0, 0, 1),
ConstantThicknessShader(10)
]
# layer for the frame
if tree.find(".//{http://www.w3.org/2000/svg}g[@id='frame_%06d']" % current_frame) is None:
layer_frame = et.XML('<g id="frame_%06d"></g>' % current_frame)
layer_frame.set('{http://www.inkscape.org/namespaces/inkscape}groupmode', 'layer')
layer_frame.set('{http://www.inkscape.org/namespaces/inkscape}label', 'frame_%06d' % current_frame)
root.append(layer_frame)
else:
layer_frame = tree.find(".//{http://www.w3.org/2000/svg}g[@id='frame_%06d']" % current_frame)
# layer for visible lines
layer_visible = et.XML('<g id="layer_visible"></g>')
layer_visible.set('{http://www.inkscape.org/namespaces/inkscape}groupmode', 'layer')
layer_visible.set('{http://www.inkscape.org/namespaces/inkscape}label', 'visible')
layer_frame.append(layer_visible)
group_visible = et.XML('<g id="visible"></g>' )
layer_visible.append(group_visible)
Operators.create(TrueUP1D(), shaders_list)
# prettifies
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(root)
# write SVG to file
tree.write(path, encoding='UTF-8', xml_declaration=True)
| 1.898438 | 2 |
setup.py | JEdward7777/GsFileLock | 0 | 12757682 | <reponame>JEdward7777/GsFileLock
from distutils.core import setup
setup(
name='GsFileLock',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['gsfilelock','gsfilelock.test'],
url='https://github.com/JEdward7777/GsFileLock',
license='LICENSE.txt',
description='Google Storage File locking library',
long_description=open('README.txt').read(),
)
| 1.226563 | 1 |
src/conformity.py | terror/golf | 0 | 12757683 | <gh_stars>0
# https://open.kattis.com/problems/conformity
import collections
print((lambda ans: ans.count(ans[0])*ans[0])(sorted(collections.Counter([tuple(sorted(list(map(int, input().split())))) for i in range(int(input()))]).values(), reverse=True)))
| 2.65625 | 3 |
2021_legrest/legrest_recommendation_dtr.py | elecun/mlpack | 0 | 12757684 | '''
@brief Leg-Rest Pos Recommendataion with DecisionTree Regressor
@author <NAME> <<EMAIL>>
@date 2021. 05. 21
'''
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import progressbar
'''
Presets & Hyper-parameters
'''
CONFIGURATION_FILE_PATH = "./data/train/data_config.csv"
DATASET_PATH = "./data/train/"
pd.set_option('display.width', 200) # for display width
# FEATURE_LENGTH = 30 # n-dimensional data feature only use
# NUMBER_OF_SAMPLES = 299 # number of augmented data
# FEATURE_MAX_LENGTH = 115 # Maximum feature length
# NUMBER_OF_RANDOM_SELECTION = 5
# MAX_TRAIN_ITERATION = -1 # infinity
'''
1. Load configuration file
'''
data_config = pd.read_csv(CONFIGURATION_FILE_PATH, header=0, index_col=0)
'''
2. data extraction
'''
X = data_config.loc[:, ['user_height', 'user_weight', 'user_age']]
bmr = 66.47+(13.75*X['user_weight'])+(5*X['user_height'])-(6.76*X['user_age'])
bmi = X['user_weight']/(X['user_height']/100*X['user_height']/100)
X["bmr"] = bmr
X["bmi"] = bmi
ys = data_config.loc[:, ['bestfit_angle_standard']]
yr = data_config.loc[:, ['bestfit_angle_relax']]
del X["user_age"]
'''
DecisionTree Regression Model
'''
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(ys), test_size=0.33, shuffle=True)
print("------ Regression Model Evaluation (@standard) ------")
model_standard = DecisionTreeRegressor(
criterion = "mse",
max_depth=50,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R2 Score with Trainset (@standard) :", model_standard.score(X_train, y_train))
print("* R2 Score with Testset (@standard) :", model_standard.score(X_test, y_test))
print("* Feature Impotances (@standard) :")
for name, value in zip(X_train.columns, model_standard.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
print("------ Regression Model Evaluation (@relax) ------")
model_relax = DecisionTreeRegressor(
criterion = "mse", # mean square error
max_depth=50,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R-squared Score with Trainset (@relax) :", model_relax.score(X_train, y_train))
print("* R-squared Score with Testset (@relax) :", model_relax.score(X_test, y_test))
print("* Feature Impotances (@relax) :")
for name, value in zip(X_train.columns, model_relax.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
'''
Output File Generation
'''
# min_age = 20
# max_age = 80
# ages = np.array([min_age+i for i in range(max_age-min_age+1)])
ages = np.arange(20, 80, step=10)
# min_height = 150
# max_height = 190
# heights = np.array([min_height+i for i in range(max_height-min_height+1)])
heights = np.arange(150, 190, step=10)
# min_weight = 40
# max_weight = 100
# weights = np.array([min_weight+i for i in range(max_weight-min_weight+1)])
weights = np.arange(40, 100, step=10)
print(X.head())
bar = progressbar.ProgressBar(maxval=len(ages)*len(heights)*len(weights), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
output_standard = pd.DataFrame(columns=['height','weight','legrest'])
output_relax = pd.DataFrame(columns=['height','weight','legrest'])
count = 0
for a in ages:
for h in heights:
for w in weights:
bmr = 66.47+(13.75*w)+(5*h)-(6.76*a)
bmi = w/(h/100*h/100)
pvs = model_standard.predict([[h,w,bmr,bmi]])
pvr = model_relax.predict([[h,w,bmr,bmi]])
output_standard = output_standard.append({'height':h, 'weight':w, 'legrest':pvs[0]}, ignore_index=True)
output_relax = output_relax.append({'height':h, 'weight':w, 'legrest':pvr[0]}, ignore_index=True)
count = count+1
bar.update(count)
bar.finish()
output_standard.to_csv('result_standard.csv', index=False)
output_relax.to_csv('result_relax.csv', index=False)
print("saved results") | 2.671875 | 3 |
main.py | johnnylord/shopee_banner_checker | 0 | 12757685 | import os
import os.path as osp
import re
import time
import shutil
import argparse
import subprocess
import multiprocessing
import cv2
import numpy as np
import pandas as pd
from requests_html import HTML
from selenium import webdriver
def check_banner(args):
valid = False
stage_dir = args[0]
banner_dir = args[1]
# Read banners to check
banners = [ cv2.imread(osp.join(banner_dir, banner))
for banner in os.listdir(banner_dir)
if not banner.startswith('.') ]
count = len(banners)
# Check downloaded images one by one
for path in [ osp.join(stage_dir, f) for f in os.listdir(stage_dir) ]:
# Read image
img = cv2.imread(path)
if img is None:
continue
# Match with banner
for banner in banners:
img = cv2.resize(img, (banner.shape[1], banner.shape[0]))
ref = banner.astype('float')
tar = img.astype('float')
# Determine image volume
volume = 1
for v in img.shape:
volume *= v
# Perform difference between two image
diff = np.sum(np.abs(ref-tar)) / volume
if diff < 10:
count -= 1
# Early stopping
if count <= 0:
valid = True
break
return (osp.basename(stage_dir), valid)
def main(args):
# Read target sellers to check their banner
with open(args['input'], 'r') as f:
sellers = [ line.strip('\n') for line in f.readlines() ]
seller_names = [ osp.basename(seller) for seller in sellers ]
# Instantiate chrome webdriver with default page google.com opened
mobile_emulation = { "deviceName": "iPhone X" }
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
# chrome_options.add_experimental_option("mobileEmulation", mobile_emulation)
driver = webdriver.Chrome(args['driver'], options=chrome_options)
driver.get('http://google.com')
# Load every seller pages
for name, seller in zip(seller_names, sellers):
print(f"Open page '{name}'")
driver.execute_script(f"window.open('about:blank', '{name}');")
driver.switch_to.window(name)
driver.get(seller)
time.sleep(3)
# Parse every opened pages
pattern = r"https://cf.shopee.tw/file/[\d\w]+"
for name in seller_names:
# Create Staging directory for each seller
stage_dir = osp.join(args['stage'], name)
shutil.rmtree(stage_dir, ignore_errors=True)
os.makedirs(stage_dir)
# Extract links of each loaded images
driver.switch_to.window(name)
html = driver.page_source
imgs = re.findall(pattern, html)
# Download each loaded images
print(f"Download images in '{driver.current_url}'")
procs = []
for img in imgs:
cmdline = f'wget -O {osp.join(stage_dir, osp.basename(img))} {img}'
proc = subprocess.Popen(
cmdline,
shell=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
procs.append(proc)
# Wait for download completion
for proc in procs:
proc.wait()
proc.terminate()
# Exit the driver
driver.quit()
# Check banners with multiple workers
stages = [
osp.join(args['stage'], seller)
for seller in os.listdir(args['stage'])
if not seller.startswith('.')
]
banners = [ args['banner'] ]*len(stages)
tasks = list(zip(stages, banners))
pool = multiprocessing.Pool(multiprocessing.cpu_count())
results = pool.map(check_banner, tasks)
data = { 'seller': [], 'result': [] }
for result in results:
data['seller'].append(result[0])
data['result'].append(result[1])
df = pd.DataFrame(data, columns=['seller', 'result'])
df.to_csv(args['output'], index=False)
print(f"Export result to {args['output']}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="list of urls of target sellers")
parser.add_argument("--output", default="report.txt", help="report file")
parser.add_argument("--banner", default="banner", help="directory containing banners need to check")
parser.add_argument("--stage", default="stage", help="staging directories to hold download images")
parser.add_argument("--driver", default="driver/chromedriver")
args = vars(parser.parse_args())
main(args)
| 2.578125 | 3 |
src/561.py | kkspeed/LeetCode | 0 | 12757686 | """
LeetCode #561:
https://leetcode.com/problems/array-partition-i/description/
"""
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum(sorted(nums)[::2])
| 3.140625 | 3 |
backend/app/services.py | teramundi/onceonly | 0 | 12757687 | import base64
import secrets
import hashlib
import requests
from requests.models import HTTPError
from datetime import date, datetime, time, timedelta
from typing import List
from sqlalchemy.exc import IntegrityError
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
from app.settings import settings
from app.errors import CaptchaError, InvalidDaysValueException
from app.repository import LogRepository, SecretRepository
from app.models import Log, Secret
class CaptchaValidator:
def __init__(self, captcha_secret_key: str, captcha_bypass_code: str):
self.captcha_secret_key = captcha_secret_key
self.captcha_bypass_code = captcha_bypass_code
def validate(self, value: str):
if value == self.captcha_bypass_code:
return
try:
payload = {
'secret': self.captcha_secret_key,
'response': value,
}
resp = requests.post("https://www.google.com/recaptcha/api/siteverify", data=payload)
resp.raise_for_status()
if not resp.json()['success']:
raise CaptchaError("Captcha validation error")
except HTTPError:
raise CaptchaError("Error trying to validate reCaptcha")
class RandomPasswdGenerator:
CHARS = 'ABCDEFGHIJKLMNOPQRSTUVXWYZabcdefghijklmnopqrstuvxwyz0123456789-_~.'
def __init__(self, size: int) -> None:
self.size = size
def generate(self) -> str:
return ''.join(secrets.choice(self.CHARS) for _ in range(self.size))
class SaltGenerator:
def __init__(self, size: int) -> None:
self.size = size
def generate(self) -> bytes:
return base64.urlsafe_b64encode(secrets.token_bytes(self.size))
class Crypt:
def __init__(self, salt: str, passwd: str) -> str:
self.salt = salt
self.passwd = passwd
def generate_key(self) -> str:
kdf = Scrypt(
salt=self.salt,
length=32,
n=2**18,
r=8,
p=1
)
key = kdf.derive(self.passwd.encode())
return base64.urlsafe_b64encode(key)
def encrypt(self, secret: str) -> str:
f = Fernet(self.generate_key())
return f.encrypt(secret.encode())
def decrypt(self, secret: bytes) -> str:
f = Fernet(self.generate_key())
return f.decrypt(secret)
class LogService:
def __init__(self, repository: LogRepository) -> None:
self.repository = repository
def create_log(
self,
action: str,
client_address: str,
client_user_agent: str,
secret: Secret = None,
password: str = <PASSWORD>,
) -> Log:
log = Log()
log.action = action
log.created_at = datetime.now()
log.client_address = client_address
log.client_user_agent = client_user_agent
log.password = password
if secret:
log.secret_passwd_hash = secret.passwd_hash
log.secret_subject = secret.subject
log.secret_created_at = secret.created_at
log.secret_expires_in = secret.expires_in
return self.repository.save(log)
def get_logs(
self,
limit: int,
offset: int=0,
action: str = None,
secret_subject: str = None,
client_address: str = None,
client_user_agent: datetime = None,
start_date: date = None,
end_date: date = None,
) -> List[Log]:
start_datetime = None
if start_date:
start_datetime = datetime.combine(start_date, time(0,0,0))
end_datetime= None
if end_date:
end_datetime = datetime.combine(end_date, time(23, 59, 59))
return self.repository.get_all(
offset=offset,
limit=limit,
action=action,
secret_subject=secret_subject,
client_address=client_address,
client_user_agent=client_user_agent,
start_datetime=start_datetime,
end_datetime=end_datetime,
)
class SecretService:
def __init__(
self,
repository: SecretRepository,
passwd_generator: RandomPasswdGenerator,
salt_generator: SaltGenerator,
log_service: LogService,
secret_min_days: int,
secret_max_days: int,
) -> None:
self.repository = repository
self.passwd_generator = passwd_generator
self.salt_generator = salt_generator
self.log_service = log_service
self.secret_min_days = secret_min_days
self.secret_max_days = secret_max_days
def create_secret(
self,
subject: str,
body: str,
days: int,
client_address: str,
client_user_agent: str,
) -> Secret:
if days < self.secret_min_days or days > self.secret_max_days:
raise InvalidDaysValueException()
saved = False
secret = Secret()
secret.subject = subject
while not saved:
try:
secret.password = self.passwd_<PASSWORD>.generate()
secret.key_salt = self.salt_generator.generate()
crypt = Crypt(secret.key_salt, secret.password)
secret.body = crypt.encrypt(body)
secret.passwd_hash = self.hash(secret.password)
now = datetime.now()
secret.created_at = now
secret.expires_in = now + timedelta(days)
secret = self.repository.save(secret)
saved = True
except IntegrityError:
# passwd_hash unique constraint
pass
except Exception as e:
if settings.debug:
print('DEBUG >> {}'.format(e))
self.log_service.create_log(
secret=secret,
action=Log.ADD,
client_address=client_address,
client_user_agent=client_user_agent
)
return secret
def reveal_secret(
self,
password: str,
client_address: str,
client_user_agent: str,
) -> Secret:
passwd_hash = self.hash(password)
secret = self.repository.get_by_passwd_hash(passwd_hash)
if secret:
crypt = Crypt(secret.key_salt, password)
secret.body = crypt.decrypt(secret.body)
self.repository.remove(secret)
is_expired = secret.expires_in < datetime.now()
action = Log.EXPIRED if is_expired else Log.VIEW
self.log_service.create_log(
secret=secret,
action=action,
password=password,
client_address=client_address,
client_user_agent=client_user_agent
)
if is_expired:
secret = None
else:
self.log_service.create_log(
action=Log.VIEW,
password=password,
client_address=client_address,
client_user_agent=client_user_agent
)
return secret
def hash(self, value: str) -> str:
return hashlib.sha256(value.encode()).hexdigest()
| 2.4375 | 2 |
salad/Viterbi_Decoder_salad/eval.py | giulio93/NeuralNetwork-Viterbi | 0 | 12757688 | #!/usr/bin/python
import argparse
import glob
import re
def recog_file(filename, ground_truth_path):
# read ground truth
gt_file = ground_truth_path + re.sub('.*/','/',filename) + '.txt'
with open(gt_file, 'r') as f:
ground_truth = f.read().split('\n')[0:-1]
f.close()
# read recognized sequence
with open(filename, 'r') as f:
recognized = f.read().split('\n')[5].split() # framelevel recognition is in 6-th line of file
f.close()
n_frame_errors = 0
for i in range(len(recognized)):
if not recognized[i] == ground_truth[i]:
n_frame_errors += 1
return n_frame_errors, len(recognized)
### MAIN #######################################################################
### arguments ###
### --recog_dir: the directory where the recognition files from inferency.py are placed
### --ground_truth_dir: the directory where the framelevel ground truth can be found
parser = argparse.ArgumentParser()
parser.add_argument('--recog_dir', default='results')
parser.add_argument('--ground_truth_dir', default='data/groundTruth')
args = parser.parse_args()
filelist = glob.glob(args.recog_dir + '/*')
print('Evaluate %d video files...' % len(filelist))
n_frames = 0
n_errors = 0
# loop over all recognition files and evaluate the frame error
for filename in filelist:
errors, frames = recog_file(filename, args.ground_truth_dir)
n_errors += errors
n_frames += frames
# print frame accuracy (1.0 - frame error rate)
print('frame accuracy: %f' % (1.0 - float(n_errors) / n_frames))
| 3.09375 | 3 |
services/lights-on.py | santiagofacchini/home-automation | 0 | 12757689 | <reponame>santiagofacchini/home-automation
import os
from HueClient import Light
from dotenv import load_dotenv
# Load environment variables from .env file (must be in ~)
load_dotenv(f'{os.environ["HOME"]}/.env')
# Hue client
hue_user = os.environ['HUE_USER']
hue_ip = os.environ['HUE_IP']
comedor = Light(hue_user, hue_ip, 1)
sala_de_estar = Light(hue_user, hue_ip, 4)
# dormitorio = Light(hue_user, hue_ip, 3)
comedor.turn_on()
sala_de_estar.turn_on()
# dormitorio.turn_on()
| 2.421875 | 2 |
!DepthBundleApp/ConvertBinaries.py | princeton-computational-imaging/Handheld-Neural-Depth-Refinement | 26 | 12757690 | import argparse
import numpy as np
import struct
from matplotlib import gridspec
import matplotlib.pyplot as plt
from glob import glob
import os
from os.path import join
from natsort import natsorted
from skimage.transform import resize
import re
from tqdm import tqdm
""" Code to process depth/image/pose binaries the ios DepthBundleRecorder app into more useable .npz files.
Usage: python ConvertBinaries.py -d data_folder_with_binaries
Output: a folder data_processed_folder_with_binaries containing the processed depth bundles
"""
def read_header(header):
h = re.sub("\[|\]|\(|\)|\s|\'", "", str(header)) # Strip all delims but <> and commas
h = h.split("<ENDHEADER>")[0] # Snip empty end of header
timestamp = float(h.split("Time:")[1].split(",")[0])
euler_angles = np.array(h.split("EulerAngles:SIMD3<Float>")[1].split(",")[0:3], dtype=np.float32)
world_pose = np.array(h.split("WorldPose:simd_float4x4")[1].split(",")[0:16], dtype=np.float32).reshape((4,4))
intrinsics = np.array(h.split("Intrinsics:Optionalsimd_float3x3")[1].split(",")[0:9], dtype=np.float32).reshape((3,3))
world_to_camera = np.array(h.split("WorldToCamera:Optionalsimd_float4x4")[1].split(",")[0:16], dtype=np.float32).reshape((4,4))
return {'timestamp' : timestamp,
'euler_angles' : euler_angles,
'world_pose' : world_pose.T,
'intrinsics' : intrinsics.T,
'world_to_camera' : world_to_camera.T}
def load_info(info_name):
with open(info_name, mode='rb') as file:
file_content = file.read()
header = file_content[:1024] # 1024 bit header
return read_header(header)
def load_depth(depth_name):
with open(depth_name, mode='rb') as file:
file_content = file.read()
header = file_content[:1024] # 1024 bit header
file_content = file_content[1024:]
file_content = struct.unpack('f'* ((len(file_content)) // 4), file_content)
depth = np.reshape(file_content, (192,256))
depth = np.flip(depth.T, 1).astype(np.float32)
return depth, header
def load_conf(conf_name):
with open(conf_name, mode='rb') as file:
file_content = file.read()
file_content = struct.unpack('B'* ((len(file_content))), file_content)
conf = np.reshape(file_content, (192,256))
conf = np.flip(conf.T, 1).astype(np.uint8)
return conf
def load_img(img_name):
with open(img_name, mode='rb') as file:
file_content = file.read()
Y = file_content[:1920*1440]
Y = struct.unpack('B' * ((len(Y))), Y)
Y = np.reshape(Y, (1440,1920))
Y = np.flip(Y.T, 1)
UV = file_content[1920*1440:]
UV = struct.unpack('B' * ((len(UV))), UV)
U,V = UV[0::2], UV[1::2]
U,V = np.reshape(U, (720,960)), np.reshape(V, (720,960))
U,V = np.flip(U.T, 1), np.flip(V.T, 1)
# Re-Center U,V channels
Y,U,V = Y.astype(np.float32), (U.astype(np.float32) - 128), (V.astype(np.float32) - 128)
U,V = resize(U, (1920,1440), order=0), resize(V, (1920,1440), order=0)
# Convert YUV 420 to RGB
R = Y + (V*1/0.6350)
B = Y + (U*1/0.5389)
G = (Y - 0.2126*R - 0.0722*B)*(1/0.7152)
img = np.stack((R,G,B), axis=-1)
img[img<0] = 0
img[img>255] = 255
img = img.astype(np.uint8)
return img
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', default=None, type=str, required=True, help='Data directory')
args = parser.parse_args()
bundle_names = natsorted(glob(join(args.d, "*")))
for bundle_name in bundle_names:
print("Processing {0}.".format(bundle_name.split("/")[-1]))
if "-poses" not in bundle_name:
# Process image + depth bundle
depth_names = natsorted(glob(join(bundle_name, "depth*.bin")))
img_names = natsorted(glob(join(bundle_name, "image*.bin")))
conf_names = natsorted(glob(join(bundle_name, "conf*.bin")))
save_path = bundle_name.replace("data", "data_processed")
os.makedirs(save_path, exist_ok=True)
npz_file = {}
for i, (img_name, depth_name, conf_name) in tqdm(enumerate(zip(img_names, depth_names, conf_names))):
img = load_img(img_name)
depth, header = load_depth(depth_name)
info = read_header(header)
conf = load_conf(conf_name)
if i == 0:
ref_time = info['timestamp']
info['timestamp'] -= ref_time
npz_file["img_{0}".format(i)] = img
npz_file["depth_{0}".format(i)] = depth
npz_file["conf_{0}".format(i)] = conf
npz_file["info_{0}".format(i)] = info
npz_file["num_frames"] = len(img_names)
# Save first frame preview
fig = plt.figure(figsize=(14, 30))
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1,1,1.12])
ax1 = plt.subplot(gs[0,0])
ax1.imshow(npz_file['img_0'])
ax1.axis('off')
ax1.set_title("Image")
ax2 = plt.subplot(gs[0,1])
ax2.imshow(npz_file['conf_0'], cmap="gray")
ax2.axis('off')
ax2.set_title("Confidence")
ax3 = plt.subplot(gs[0,2])
d = ax3.imshow(npz_file['depth_0'], cmap="Spectral", vmin=0, vmax=7)
ax3.axis('off')
ax3.set_title("Depth")
fig.colorbar(d, fraction=0.055, label="Depth [m]")
plt.savefig(join(save_path, "frame_first.png"), bbox_inches='tight', pad_inches=0.05, facecolor='white')
plt.close()
# Save last frame preview
fig = plt.figure(figsize=(14, 30))
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1,1,1.12])
ax1 = plt.subplot(gs[0,0])
ax1.imshow(npz_file['img_{0}'.format(len(img_names) - 1)])
ax1.axis('off')
ax1.set_title("Image")
ax2 = plt.subplot(gs[0,1])
ax2.imshow(npz_file['conf_{0}'.format(len(img_names) - 1)], cmap="gray")
ax2.axis('off')
ax2.set_title("Confidence")
ax3 = plt.subplot(gs[0,2])
d = ax3.imshow(npz_file['depth_{0}'.format(len(img_names) - 1)], cmap="Spectral", vmin=0, vmax=7)
ax3.axis('off')
ax3.set_title("Depth")
fig.colorbar(d, fraction=0.055, label="Depth [m]")
plt.savefig(join(save_path, "frame_last.png"), bbox_inches='tight', pad_inches=0.05, facecolor='white')
plt.close()
# Save bundle
np.savez(join(save_path, "frame_bundle"), **npz_file)
else:
# Process only poses + info bundle
info_names = natsorted(glob(join(bundle_name, "info*.bin")))
save_path = bundle_name.replace("data", "data_processed")
os.makedirs(save_path, exist_ok=True)
npz_file = {}
for i, info_name in tqdm(enumerate(info_names)):
info = load_info(info_name)
if i == 0:
ref_time = info['timestamp']
info['timestamp'] -= ref_time
npz_file["info_{0}".format(i)] = info
npz_file["num_frames"] = len(info_names)
# Save bundle
np.savez(join(save_path, "info_bundle"), **npz_file)
if __name__ == '__main__':
main() | 2.03125 | 2 |
zxtools/zeus2txt.py | fossabot/zxtools | 13 | 12757691 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 <NAME>
# http://www.codeatcpp.com
#
# Licensed under the BSD 3-Clause license.
# See LICENSE file in the project root for full license information.
#
""" Convert Zeus Z80 assembler file to a plain text """
import argparse
import logging
import io
from zxtools import CHUNK_SIZE
from zxtools.common import default_main
CODE_ALIGN_WIDTH = 35
def show_info(*parsed_args):
"""Show some statistic about Zeus file"""
# TODO Implement this function
return parsed_args
def read_file(src_file):
"""Read source file for future processing"""
with src_file:
while True:
chunk = src_file.read(CHUNK_SIZE)
if chunk:
for cur_char in chunk:
yield cur_char
else:
break
ASM_FIRST_TOKEN = 128
ASM_META = [
"A", "ADC ", "ADD ", "AF'", "AF", "AND ", "B", "BC", "BIT ", "C",
"CALL ", "CCF", "CP ", "CPD", "CPDR", "CPI", "CPIR", "CPL", "D", "DAA",
"DE", "DEC ", "DEFB ", "DEFM ", "DEFS ", "DEFW ", "DI", "DISP ", "DJNZ ",
"E", "EI", "ENT", "EQU ", "EX ", "EXX", "H", "HALT", "HL", "I", "IM ",
"IN ", "INC ", "IND", "INDR", "INI", "INIR", "IX", "IY", "JP ", "JR ",
"L", "LD ", "LDD", "LDDR", "LDI", "LDIR", "M", "NC", "NEG", "NOP", "NV",
"NZ", "OR ", "ORG ", "OTDR", "OTIR", "OUT ", "OUTD", "OUTI", "P", "PE",
"PO", "POP ", "PUSH ", "R", "RES ", "RET", "RETI", "RETN", "RL ", "RLA",
"RLC ", "RLCA", "RLD", "RR ", "RRA", "RRC ", "RRCA", "RRD", "RST ",
"SBC ", "SCF", "SET ", "SLA ", "SP", "SRA ", "SRL ", "SUB ", "V", "XOR ",
"Z"]
def convert_file(parsed_args):
""" Convert Zeus Z80 assembler file specified in zeus_file to the plain
text and print it to the output_file """
logger = logging.getLogger('convert_file')
process_string = False
strnum_lo = False, 0
tab = False
output = parsed_args.output_file
strnum = 0
cur_buffer = ""
cur_line = io.StringIO()
for cur_char in read_file(parsed_args.zeus_file):
if process_string:
cur_buffer += "0x%02X " % cur_char
if not cur_char: # End of string
process_string = False
strnum_lo = False, 0
cur_str = cur_line.getvalue()
print(cur_str, end="", file=output)
if parsed_args.include_code:
print(" "*(CODE_ALIGN_WIDTH-len(cur_str))+";",
"0x%04X " % strnum + cur_buffer, file=output)
else:
print(file=output)
continue
if tab:
print(" "*cur_char, end="", file=cur_line)
tab = False
continue
if cur_char == 0x0A:
tab = True
continue
if cur_char < ASM_FIRST_TOKEN: # Printable character
print(chr(cur_char), end="", file=cur_line)
continue
try:
print(ASM_META[cur_char-ASM_FIRST_TOKEN], end="", file=cur_line)
except IndexError:
logger.warning("Token not defined: 0x%02X (%d), at line %05d. "
"Skipped.", cur_char, cur_char, strnum)
else:
if not strnum_lo[0]:
strnum_lo = True, cur_char
else:
strnum = strnum_lo[1] + cur_char*256
if strnum == 0xFFFF: # End of file
print(file=output)
break
cur_line = io.StringIO()
cur_line.truncate(0)
cur_buffer = ""
print("%05d" % strnum, end=" ", file=cur_line)
process_string = True
output.close()
def create_parser():
""" Parse command line arguments """
parser = argparse.ArgumentParser(
description="Zeus Z80 assembler files converter")
parser.add_argument(
'-v', '--verbose', help="Increase output verbosity",
action='store_true')
subparsers = parser.add_subparsers(help="Available commands")
subparsers.required = False
info_parser = subparsers.add_parser(
'info',
help="Show information about the specified Zeus Z80 assembler file")
info_parser.add_argument(
'zeus_file', metavar='zeus-file', type=argparse.FileType('rb', 0),
help="Input file with Zeus Z80 assembler (usually FILENAME.$C)")
info_parser.set_defaults(func=show_info)
convert_parser = subparsers.add_parser(
'convert', help="Convert Zeus Z80 assembler file to a plain text file")
convert_parser.add_argument(
'zeus_file', metavar='zeus-file', type=argparse.FileType('rb', 0),
help="Input file with Zeus Z80 assembler (usually FILENAME.$C)")
convert_parser.add_argument(
'output_file', metavar='output-file',
type=argparse.FileType('w'), help="Path to the output file")
convert_parser.add_argument(
'--include-code', dest='include_code',
action='store_true', help="Include original code in the output file")
convert_parser.set_defaults(func=convert_file)
return parser
def main():
"""Entry point"""
default_main(create_parser())
if __name__ == '__main__':
main()
| 2.671875 | 3 |
backend/mopidy_jukebox/util.py | qurben/mopidy-jukebox | 0 | 12757692 | <reponame>qurben/mopidy-jukebox
from models import Vote
def votes_json(track):
votes = Vote.select().where(Vote.track_uri == track.uri)
for vote in votes:
yield {
'user': vote.user.name,
'uid': vote.user.uid,
'picture': vote.user.picture,
'time': vote.timestamp.isoformat()
}
def track_json(track):
"""
Generate JSON from a Mopidy track
:param track: A mopidy.models.Track
:return:
"""
return {
'track_name': track.name,
'track_uri': track.uri,
'artists': [artist.name for artist in track.artists],
'album': track.album.name,
'images': [image for image in track.album.images],
'votes': list(votes_json(track))
}
| 2.859375 | 3 |
class3_exercise1.py | nokn0k/Net_AutomationScripts | 0 | 12757693 | <reponame>nokn0k/Net_AutomationScripts
#!/usr/bin/env python3
ARP = [
{'mac_addr': '0062.ec29.70fe', 'ip_addr': '10.220.88.1', 'interface': 'gi0/0/0'},
{'mac_addr': 'c89c.1dea.0eb6', 'ip_addr': '10.220.88.20', 'interface': 'gi0/0/0'},
{'mac_addr': 'a093.5141.b780', 'ip_addr': '10.220.88.22', 'interface': 'gi0/0/0'},
{'mac_addr': '0001.00ff.0001', 'ip_addr': '10.220.88.37', 'interface': 'gi0/0/0'},
{'mac_addr': '0002.00ff.0001', 'ip_addr': '10.220.88.38', 'interface': 'gi0/0/0'},
]
print(ARP)
x = type(ARP)
print(x)
y = len(ARP)
print(y)
| 2.25 | 2 |
my_fit.py | richmanbtc/bot_snippets | 6 | 12757694 | <reponame>richmanbtc/bot_snippets
import inspect
import lightgbm as lgb
import xgboost as xgb
def my_fit(model, *args, **kwargs):
if kwargs.get('fit_context') is not None:
fit_context = kwargs['fit_context']
if isinstance(model, lgb.LGBMRegressor) or isinstance(model, lgb.LGBMClassifier):
kwargs['eval_set'] = [(fit_context['X_val'], fit_context['y_val'])]
if 'sample_weight_val' in fit_context and fit_context['sample_weight_val'] is not None:
kwargs['eval_sample_weight'] = [fit_context['sample_weight_val']]
kwargs['early_stopping_rounds'] = fit_context['early_stopping_rounds']
kwargs['verbose'] = False
del kwargs['fit_context']
print('early stopping is used lgbm')
if isinstance(model, xgb.XGBRegressor) or isinstance(model, xgb.XGBClassifier):
kwargs['eval_set'] = [(fit_context['X_val'], fit_context['y_val'])]
if 'sample_weight_val' in fit_context and fit_context['sample_weight_val'] is not None:
kwargs['eval_sample_weight'] = [fit_context['sample_weight_val']]
kwargs['early_stopping_rounds'] = fit_context['early_stopping_rounds']
kwargs['verbose'] = False
del kwargs['fit_context']
print('early stopping is used xgb')
argspec = inspect.getfullargspec(model.fit)
# print(argspec)
if 'fit_context' in kwargs and 'fit_context' not in argspec.args:
del kwargs['fit_context']
# print(model)
# print(kwargs.keys())
# print(argspec.args)
# print(argspec)
#
# if 'sample_weight' in kwargs and 'sample_weight' not in argspec.args:
# del kwargs['sample_weight']
return model.fit(*args, **kwargs)
| 2.03125 | 2 |
matches/apis.py | rmishra7/cms | 0 | 12757695 |
from rest_framework import generics, serializers
from .models import Match
from .serializers import MatchSerializer
class MatchListApi(generics.ListAPIView):
"""
match queryset/list API
"""
model = Match
serializer_class = MatchSerializer
def get_queryset(self):
queryset = self.model.objects.all()
kwargs = {}
for key, vals in self.request.GET.lists():
if key not in [x.name for x in self.model._meta.fields] and key not in ['page_size', 'page', 'ordering', 'teams']:
raise serializers.ValidationError("Invalid query param passed: " + str(key))
for v in vals:
kwargs[key] = v
if 'page_size' in kwargs:
kwargs.pop('page_size')
if 'page' in kwargs:
kwargs.pop('page')
if 'ordering' in kwargs:
kwargs.pop('ordering')
if 'teams' in kwargs:
kwargs['team1__in'] = kwargs['teams'].split(',')
kwargs['team2__in'] = kwargs['teams'].split(',')
kwargs.pop('teams')
print (kwargs)
queryset = queryset.filter(**kwargs)
if self.request.query_params.get('ordering', None) not in [None, ""]:
ordering = self.request.query_params.get('ordering', None)
return queryset.order_by(ordering)
return queryset
| 2.1875 | 2 |
hqme/ext/platform/youtube.py | zeroday0619/HQME | 2 | 12757696 | <filename>hqme/ext/platform/youtube.py
import asyncio
import functools
import youtube_dl
from hqme.exceptions.youtube import (
CategoryNotFound,
ChannelIdNotFound,
ChannelTitleNotFound,
ChannelUrlNotFound,
CommentCountNotFound,
DescriptionNotFound,
DislikeCountNotFound,
DurationNotFound,
LikeCountNotFound,
SearchListNotFound,
TegsNotFound,
ThumbnailNotFound,
TitleNotFound,
UploadDateNotFound,
UploaderIdNotFound,
UploaderNotFound,
UploaderUrlNotFound,
VideoUrlNotFound,
ViewCountNotFound,
)
from validator_collection.checkers import is_url
from youtube_dl import YoutubeDL
youtube_dl.utils.bug_reports_message = lambda: ""
yt_dl = YoutubeDL(params={})
class YouTube:
"""
You can use this class to get information about a youtube video.
"""
def __init__(
self,
url: str,
geo_bypass: bool = True,
quiet: bool = True,
ignoreerrors: bool = True,
no_playlist: bool = True,
no_warnings: bool = True,
simulate: bool = True,
skip_download: bool = True,
nocheckcertificate: bool = True,
no_part: bool = True,
updatetime: bool = True,
) -> None:
"""
Args:
url: The url of the video.
geo_bypass: Bypass geographic restriction.
quiet: If True, youtube_dl will not print anything to stdout.
ignoreerrors: If True, youtube_dl will not stop when it encounters an error.
no_playlist: If True, youtube_dl will not download the playlist.
no_warnings: If True, youtube_dl will not print anything to stdout.
simulate: If True, youtube_dl will not download the video.
skip_download: If True, youtube_dl will not download the video.
nocheckcertificate: If True, youtube_dl will not verify the server certificate.
no_part: If True, youtube_dl will not download the video.
updatetime: If True, youtube_dl will not download the video.
"""
self.url = url
self.ydl = yt_dl
self.ydl.params["format"] = "bestaudio/best"
self.ydl.params["geo-bypass"] = geo_bypass
self.ydl.params["quiet"] = quiet
self.ydl.params["ignoreerrors"] = ignoreerrors
self.ydl.params["noplaylist"] = no_playlist
self.ydl.params["no_warnings"] = no_warnings
self.ydl.params["simulate"] = simulate
self.ydl.params["skip_download"] = skip_download
self.ydl.params["nocheckcertificate"] = nocheckcertificate
self.ydl.params["nopart"] = no_part
self.ydl.params["updatetime"] = updatetime
self.ydl.params["default_search"] = "auto"
self.data = None
async def sync(self) -> None:
"""
Sync the data of the video.
"""
loop = asyncio.get_running_loop()
partial = functools.partial(self.ydl.extract_info, self.url, download=False)
self.data = await loop.run_in_executor(None, partial)
async def get_title(self) -> str:
"""
Get the title of the video.
Returns:
The title of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_title = entries.get("title")
if type(_title) is not str:
raise TitleNotFound("No title of the video.")
else:
title = str(_title)
return title
async def get_description(self) -> str:
"""
Get the description of the video.
Returns:
The description of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"]
_description = entries.get("description")
if type(_description) is None:
raise DescriptionNotFound("No description of the video.")
else:
description = str(_description)
return description
async def get_duration(self) -> int:
"""
Get the duration of the video.
Returns:
The duration of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_duration = entries.get("duration")
if _duration is None:
raise DurationNotFound("No duration of the video.")
else:
duration = int(_duration)
return duration
async def get_uploader(self) -> str:
"""
Get the uploader of the video.
Returns:
The uploader of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_uploader = entries.get("uploader")
if _uploader is None:
raise UploaderNotFound("No uploader of the video.")
else:
uploader = str(_uploader)
return uploader
async def get_upload_date(self) -> str:
"""
Get the upload date of the video.
Returns:
The upload date of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_upload_date = entries.get("upload_date")
if _upload_date is None:
raise UploadDateNotFound("No upload date of the video.")
else:
upload_date = str(_upload_date)
return upload_date
async def get_upload_time(self) -> str:
"""
Get the upload time of the video.
Returns:
The upload time of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_upload_time = entries.get("upload_time")
if _upload_time is None:
raise UploadDateNotFound("No upload time of the video.")
else:
upload_time = str(_upload_time)
return upload_time
async def get_thumbnail(self) -> str:
"""
Get the thumbnail of the video.
Returns:
The thumbnail of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_thumbnail = entries.get("thumbnail")
if _thumbnail is None:
raise ThumbnailNotFound("No thumbnail of the video.")
else:
thumbnail = str(_thumbnail)
return thumbnail
async def get_view_count(self) -> int:
"""
Get the view count of the video.
Returns:
The view count of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_view_count = entries.get("view_count")
if _view_count is None:
raise ViewCountNotFound("No view count of the video.")
else:
view_count = int(_view_count)
return view_count
async def get_like_count(self) -> int:
"""
Get the like count of the video.
Returns:
The like count of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_like_count = entries.get("like_count")
if _like_count is None:
raise LikeCountNotFound("No like count of the video.")
else:
like_count = int(_like_count)
return like_count
async def get_dislike_count(self) -> int:
"""
Get the dislike count of the video.
Returns:
The dislike count of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
dislike_count = entries.get("dislike_count")
if dislike_count is None:
raise DislikeCountNotFound("No dislike count of the video.")
else:
dislike_count = int(dislike_count)
return dislike_count
async def get_comment_count(self) -> int:
"""
Get the comment count of the video.
Returns:
The comment count of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_comment_count = entries.get("comment_count")
if _comment_count is None:
raise CommentCountNotFound("No comment count of the video.")
else:
comment_count = int(_comment_count)
return comment_count
async def get_categories(self) -> list:
"""
Get the categories of the video.
Returns:
The categories of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_categories = entries.get("categories")
if _categories is None:
raise CategoryNotFound("No categories of the video.")
else:
categories = list(_categories)
return categories
async def get_tags(self) -> list:
"""
Get the tags of the video.
Returns:
The tags of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_tags = entries.get("tags")
if _tags is None:
raise TegsNotFound("No tags of the video.")
else:
tags = list(_tags)
return tags
async def get_uploader_id(self) -> str:
"""
Get the uploader id of the video.
Returns:
The uploader id of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_uploader_id = entries.get("uploader_id")
if _uploader_id is None:
raise UploaderIdNotFound("No uploader id of the video.")
else:
uploader_id = str(_uploader_id)
return uploader_id
async def get_uploader_url(self) -> str:
"""
Get the uploader url of the video.
Returns:
The uploader url of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_uploader_url = entries.get("uploader_url")
if _uploader_url is None:
raise UploaderUrlNotFound("No uploader url of the video.")
else:
uploader_url = str(_uploader_url)
return uploader_url
async def get_channel_id(self) -> str:
"""
Get the channel id of the video.
Returns:
The channel id of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_channel_id = entries.get("channel_id")
if _channel_id is None:
raise ChannelIdNotFound("No channel id of the video.")
else:
channel_id = str(_channel_id)
return channel_id
async def get_channel_url(self) -> str:
"""
Get the channel url of the video.
Returns:
The channel url of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_channel_url = entries.get("channel_url")
if _channel_url is None:
raise ChannelUrlNotFound("No channel url of the video.")
else:
channel_url = str(_channel_url)
return channel_url
async def get_channel_title(self) -> str:
"""
Get the channel title of the video.
Returns:
The channel title of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_channel_title = entries.get("channel_title")
if _channel_title is None:
raise ChannelTitleNotFound("No channel title of the video.")
else:
channel_title = str(_channel_title)
return channel_title
async def get_video_url(self) -> str:
"""
Get the video url of the video.
Returns:
The video url of the video.
"""
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"][0]
_url = entries.get("url")
if _url is None:
raise VideoUrlNotFound("No url of the video.")
else:
url = str(_url)
return url
class YouTubeSearch(YouTube):
"""
Class for searching YouTube.
"""
def __init__(self, source: str, max_result: int = 10) -> None:
"""
Youtebe Search
``````````````
Args:
source: Youtbe search query. query is youtube url or search word.
max_result: Maximum number of results. default is 10.
"""
self.source = "%s%s:%s" % ("ytsearch", max_result, "".join(source))
super(YouTubeSearch, self).__init__(url=self.source)
async def parse_duration(self, duration: int) -> str:
"""
Get the duration of the video.
Args:
duration: The duration of the video.
Returns:
The duration of the video.
"""
value = None
if duration > 0:
minutes, seconds = divmod(duration, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
duration = []
_duration = duration.append
if days > 0:
_duration(f"{days} days")
if hours > 0:
_duration(f"{hours} hours")
if minutes > 0:
_duration(f"{minutes} minutes")
if seconds > 0:
_duration(f"{seconds} seconds")
value = ", ".join(duration)
elif duration == 0:
value = "LIVE STREAM"
return value
async def get_search_list(self) -> list:
"""
Get the search list of the video.
Returns:
The search list of the query.
"""
if not is_url(self.url):
if self.data is None:
await self.sync()
if "entries" in self.data:
entries = self.data["entries"]
else:
raise SearchListNotFound("Not Found")
search_list = []
_search_list = search_list.append
for entry in entries:
_search_list(
{
"title": entry.get("title"),
"url": entry.get("url"),
"thumbnail": entry.get("thumbnail"),
"description": entry.get("description"),
"webpage_url": entry.get("webpage_url"),
"uploader": entry.get("uploader"),
"uploader_url": entry.get("uploader_url"),
"uploader_id": entry.get("uploader_id"),
"channel_url": entry.get("channel_url"),
"view_count": entry.get("view_count"),
"like_count": entry.get("like_count"),
"dislike_count": entry.get("dislike_count"),
"duration": await self.parse_duration(entry.get("duration")),
"upload_date": entry.get("upload_date"),
"age_limit": entry.get("age_limit"),
}
)
return search_list
else:
raise Warning("Invalid url.")
| 2.96875 | 3 |
src/apps/sitio/forms.py | yrrodriguezb/wellsolutions | 0 | 12757697 | from django import forms
class ContactForm(forms.Form):
nombre = forms.CharField(max_length=100)
email = forms.CharField( max_length=100)
mensaje = forms.CharField(widget=forms.Textarea) | 1.921875 | 2 |
chapter_10/nn_by_hand.py | rkneusel9/MathForDeepLearning | 23 | 12757698 | <reponame>rkneusel9/MathForDeepLearning
#
# file: nn_by_hand.py
#
# Implement a simple feedforward neural network with
# backprop and gradient descent.
#
# RTK, 02-Feb-2021
# Last update: 02-Feb-2021
#
################################################################
import numpy as np
from sklearn.datasets import load_iris
def BuildDataset():
"""Create the dataset"""
# Get the dataset keeping the first two features
iris = load_iris()
x = iris["data"][:,:2]
y = iris["target"]
# Standardize and keep only classes 0 and 1
x = (x - x.mean(axis=0)) / x.std(axis=0)
i0 = np.where(y == 0)[0]
i1 = np.where(y == 1)[0]
x = np.vstack((x[i0],x[i1]))
# Train and test data
xtrn = np.vstack((x[:35],x[50:85]))
ytrn = np.array([0]*35 + [1]*35)
xtst = np.vstack((x[35:50],x[85:]))
ytst = np.array([0]*15+[1]*15)
idx = np.argsort(np.random.random(70))
xtrn = xtrn[idx]
ytrn = ytrn[idx]
idx = np.argsort(np.random.random(30))
xtst = xtst[idx]
ytst = ytst[idx]
return xtrn, ytrn, xtst, ytst
################################################################
# sigmoid
#
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
################################################################
# Forward
#
def Forward(net, x):
"""Pass the data through the network"""
out = np.zeros(x.shape[0])
for k in range(x.shape[0]):
z0 = net["w0"]*x[k,0] + net["w2"]*x[k,1] + net["b0"]
a0 = sigmoid(z0)
z1 = net["w1"]*x[k,0] + net["w3"]*x[k,1] + net["b1"]
a1 = sigmoid(z1)
out[k] = net["w4"]*a0 + net["w5"]*a1 + net["b2"]
return out
################################################################
# Evaluate
#
def Evaluate(net, x, y):
"""Evaluate the network"""
out = Forward(net, x)
tn = fp = fn = tp = 0
pred = []
for i in range(len(y)):
c = 0 if (out[i] < 0.5) else 1
pred.append(c)
if (c == 0) and (y[i] == 0):
tn += 1
elif (c == 0) and (y[i] == 1):
fn += 1
elif (c == 1) and (y[i] == 0):
fp += 1
else:
tp += 1
return tn,fp,fn,tp,pred
################################################################
# GradientDescent
#
def GradientDescent(net, x, y, epochs, eta):
"""Perform gradient descent"""
for e in range(epochs):
# Pass over training set accumulating deltas
dw0 = dw1 = dw2 = dw3 = dw4 = dw5 = db0 = db1 = db2 = 0.0
for k in range(len(y)):
# Forward pass
z0 = net["w0"]*x[k,0] + net["w2"]*x[k,1] + net["b0"]
a0 = sigmoid(z0)
z1 = net["w1"]*x[k,0] + net["w3"]*x[k,1] + net["b1"]
a1 = sigmoid(z1)
a2 = net["w4"]*a0 + net["w5"]*a1 + net["b2"]
# Backward pass
db2 += a2 - y[k]
dw4 += (a2 - y[k]) * a0
dw5 += (a2 - y[k]) * a1
db1 += (a2 - y[k]) * net["w5"] * a1 * (1 - a1)
dw1 += (a2 - y[k]) * net["w5"] * a1 * (1 - a1) * x[k,0]
dw3 += (a2 - y[k]) * net["w5"] * a1 * (1 - a1) * x[k,1]
db0 += (a2 - y[k]) * net["w4"] * a0 * (1 - a0)
dw0 += (a2 - y[k]) * net["w4"] * a0 * (1 - a0) * x[k,0]
dw2 += (a2 - y[k]) * net["w4"] * a0 * (1 - a0) * x[k,1]
# Use average deltas to update the network
m = len(y)
net["b2"] = net["b2"] - eta * db2 / m
net["w4"] = net["w4"] - eta * dw4 / m
net["w5"] = net["w5"] - eta * dw5 / m
net["b1"] = net["b1"] - eta * db1 / m
net["w1"] = net["w1"] - eta * dw1 / m
net["w3"] = net["w3"] - eta * dw3 / m
net["b0"] = net["b0"] - eta * db0 / m
net["w0"] = net["w0"] - eta * dw0 / m
net["w2"] = net["w2"] - eta * dw2 / m
# Training done, return the updated network
return net
################################################################
# main
#
def main():
"""Build and train a simple neural network"""
epochs = 1000 # training epochs
eta = 0.1 # learning rate
# Get the train/test data
xtrn, ytrn, xtst, ytst = BuildDataset()
# Initialize the network
net = {}
net["b2"] = 0.0
net["b1"] = 0.0
net["b0"] = 0.0
net["w5"] = 0.0001*(np.random.random() - 0.5)
net["w4"] = 0.0001*(np.random.random() - 0.5)
net["w3"] = 0.0001*(np.random.random() - 0.5)
net["w2"] = 0.0001*(np.random.random() - 0.5)
net["w1"] = 0.0001*(np.random.random() - 0.5)
net["w0"] = 0.0001*(np.random.random() - 0.5)
# Do a forward pass to get initial performance
tn0,fp0,fn0,tp0,pred0 = Evaluate(net, xtst, ytst)
# Gradient descent
net = GradientDescent(net, xtrn, ytrn, epochs, eta)
# Final model performance
tn,fp,fn,tp,pred = Evaluate(net, xtst, ytst)
# Summarize performance
print()
print("Training for %d epochs, learning rate %0.5f" % (epochs, eta))
print()
print("Before training:")
print(" TN:%3d FP:%3d" % (tn0, fp0))
print(" FN:%3d TP:%3d" % (fn0, tp0))
print()
print("After training:")
print(" TN:%3d FP:%3d" % (tn, fp))
print(" FN:%3d TP:%3d" % (fn, tp))
print()
if (__name__ == "__main__"):
main()
| 3.546875 | 4 |
PyObjCTest/test_nsset.py | Khan/pyobjc-framework-Cocoa | 132 | 12757699 | <gh_stars>100-1000
from PyObjCTools.TestSupport import *
import objc
from Foundation import *
class TestNSSetInteraction(TestCase):
def __testRepeatedAllocInit( self ):
for i in range(1,1000):
a = NSSet.alloc().init()
def __testContains( self ):
x = NSSet.setWithArray_( ["foo", "bar", "baz"] )
self.assert_( "foo" in x )
self.assert_( "notfoo" not in x )
def __testIteration( self ):
x = NSSet.setWithArray_( ["foo", "bar", "baz"] )
for i in x:
self.assert_( i in x )
self.assert_( x.containsObject_( i ) )
def test_varargsConstruction(self):
w = NSSet.setWithObjects_(0,1,2,3,None)
x = NSSet.alloc().initWithObjects_(0,1,2,3,None)
y = NSSet.setWithObjects_count_(range(10), 4)
z = NSSet.alloc().initWithObjects_count_(range(10), 4)
#a = NSSet.alloc().initWithObjects_count_(range(4), None)
self.assert_(len(w) == 4)
self.assert_(len(x) == 4)
self.assert_(len(y) == 4)
self.assert_(len(z) == 4)
#self.assert_(len(a) == 4)
self.assert_(0 in w)
self.assert_(1 in x)
self.assert_(2 in y)
self.assert_(3 in z)
#self.assert_(3 in a)
def test_varargsConstruction2(self):
w = NSMutableSet.setWithObjects_(0,1,2,3,None)
x = NSMutableSet.alloc().initWithObjects_(0,1,2,3,None)
y = NSMutableSet.setWithObjects_count_(range(10), 4)
z = NSMutableSet.alloc().initWithObjects_count_(range(10), 4)
self.assert_(len(w) == 4)
self.assert_(len(x) == 4)
self.assert_(len(y) == 4)
self.assert_(len(z) == 4)
self.assert_(0 in w)
self.assert_(1 in x)
self.assert_(2 in y)
self.assert_(3 in z)
class TestVariadic (TestCase):
def testSetWithObjects(self):
o = NSSet.setWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSSet.setWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.setWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
o = NSMutableSet.setWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
def testInitWithObjects(self):
o = NSSet.alloc().initWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSSet.alloc().initWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.alloc().initWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
o = NSMutableSet.alloc().initWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
def testSetWithObjectsCount(self):
o = NSSet.setWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
self.assert_(4 not in o)
o = NSSet.setWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSMutableSet.setWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.setWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
def testInitWithObjectsCount(self):
o = NSSet.alloc().initWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
self.assert_(4 not in o)
o = NSSet.alloc().initWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSMutableSet.alloc().initWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.alloc().initWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
def testMethods(self):
self.assertResultIsBOOL(NSSet.containsObject_)
self.assertResultIsBOOL(NSSet.intersectsSet_)
self.assertResultIsBOOL(NSSet.isEqualToSet_)
self.assertResultIsBOOL(NSSet.isSubsetOfSet_)
self.assertArgIsIn(NSSet.setWithObjects_count_, 0)
self.assertArgSizeInArg(NSSet.setWithObjects_count_, 0, 1)
self.assertArgIsIn(NSSet.initWithObjects_count_, 0)
self.assertArgSizeInArg(NSSet.initWithObjects_count_, 0, 1)
self.assertArgIsBOOL(NSSet.initWithSet_copyItems_, 1)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertArgIsBlock(NSSet.enumerateObjectsUsingBlock_, 0, b'v@o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSSet.enumerateObjectsWithOptions_usingBlock_, 1, b'v@o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSSet.objectsPassingTest_, 0, objc._C_NSBOOL + b'@o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSSet.objectsWithOptions_passingTest_, 1, objc._C_NSBOOL + b'@o^' + objc._C_NSBOOL)
if __name__ == '__main__':
main()
| 2.53125 | 3 |
tests/test_core.py | gururajrkatti/tinyalign | 2 | 12757700 | from tinyalign import edit_distance, hamming_distance
import random
import pytest
STRING_PAIRS = [
('', ''),
('', 'A'),
('A', 'A'),
('AB', ''),
('AB', 'ABC'),
('TGAATCCC', 'CCTGAATC'),
('ANANAS', 'BANANA'),
('SISSI', 'MISSISSIPPI'),
('GGAATCCC', 'TGAGGGATAAATATTTAGAATTTAGTAGTAGTGTT'),
('TCTGTTCCCTCCCTGTCTCA', 'TTTTAGGAAATACGCC'),
('TGAGACACGCAACATGGGAAAGGCAAGGCACACAGGGGATAGG', 'AATTTATTTTATTGTGATTTTTTGGAGGTTTGGAAGCCACTAAGCTATACTGAGACACGCAACAGGGGAAAGGCAAGGCACA'),
('TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA', 'TTTTAGGAAATACGCCTGGTGGGGTTTGGAGTATAGTGAAAGATAGGTGAGTTGGTCGGGTG'),
('A', 'TCTGCTCCTGGCCCATGATCGTATAACTTTCAAATTT'),
('GCGCGGACT', 'TAAATCCTGG'),
]
def py_edit_distance(s, t):
"""
Pure-Python edit distance
"""
m = len(s)
n = len(t)
costs = list(range(m + 1))
for j in range(1, n + 1):
prev = costs[0]
costs[0] += 1
for i in range(1, m + 1):
c = min(
prev + int(s[i-1] != t[j-1]),
costs[i] + 1,
costs[i-1] + 1,
)
prev = costs[i]
costs[i] = c
return costs[-1]
def random_string():
return ''.join(random.choice('AC') for _ in range(random.randint(0, 20)))
RANDOM_STRING_PAIRS = [(random_string(), random_string()) for _ in range(10000)]
def test_edit_distance():
assert edit_distance('', '') == 0
assert edit_distance('', 'A') == 1
assert edit_distance('A', 'B') == 1
assert edit_distance('A', 'A') == 0
assert edit_distance('A', 'AB') == 1
assert edit_distance('BA', 'AB') == 2
for s, t in STRING_PAIRS + RANDOM_STRING_PAIRS:
assert edit_distance(s, '') == len(s)
assert edit_distance('', s) == len(s)
assert edit_distance(s, t) == edit_distance(t, s)
assert edit_distance(s, t) == py_edit_distance(s, t)
def assert_banded(s, t, maxdiff):
banded_dist = edit_distance(s, t, maxdiff=maxdiff)
true_dist = edit_distance(s, t)
if true_dist > maxdiff:
assert banded_dist > maxdiff
else:
assert banded_dist == true_dist
def test_edit_distance_banded():
for maxdiff in range(5):
assert_banded('ABC', '', maxdiff)
for s, t in STRING_PAIRS:
assert_banded(s, '', maxdiff)
assert_banded('', s, maxdiff)
assert_banded(s, t, maxdiff)
assert_banded(t, s, maxdiff)
def test_hamming_distance():
assert hamming_distance('', '') == 0
assert hamming_distance('A', 'A') == 0
assert hamming_distance('HELLO', 'HELLO') == 0
assert hamming_distance('ABC', 'DEF') == 3
assert hamming_distance('ABCXDEF', 'ABCYDEF') == 1
def test_hamming_distance_incorrect_length():
with pytest.raises(IndexError):
hamming_distance('A', 'BC')
| 2.6875 | 3 |
sovrin/test/persistence/test_identity_graph.py | sovrin-foundation/old-sovrin | 3 | 12757701 | import time
from datetime import datetime, timedelta
from ledger.util import F
from plenum.common.txn import TXN_TIME
from sovrin.persistence.identity_graph import IdentityGraph
def testMakeResultTxnTimeString():
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: 'some-datetime'
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
def testMakeResultTxnTimeDatetime():
dt = datetime.now()
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: dt
}
assert IdentityGraph.makeResult(0, oRecordData)[TXN_TIME] == int(time.mktime(dt.timetuple()))
def testMakeResultTxnTimeDatetimeInvalidPast():
dt = datetime(1999, 1, 1)
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: dt
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
def testMakeResultTxnTimeDatetimeInvalidFuture():
dt = datetime.now() + timedelta(1)
oRecordData = {
F.seqNo.name: 1,
TXN_TIME: dt
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
def testMakeResultTxnTimeNone():
from datetime import datetime
dt = datetime.now()
oRecordData = {
F.seqNo.name: 1,
}
assert TXN_TIME not in IdentityGraph.makeResult(0, oRecordData)
| 2.203125 | 2 |
src/nisqai/layer/_product_ansatz_test.py | obliviateandsurrender/nisqai-dev | 14 | 12757702 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nisqai.layer._product_ansatz import ProductAnsatz
def test_basic():
"""Tests that a ProductAnsatz can be instantiated."""
# create an product ansatz on four qubits
ansatz = ProductAnsatz(4)
# check that the number of qubits is correct
assert ansatz.num_qubits == 4
def test_params():
"""Tests the params attribute has the correct shape."""
# create a product ansatz
ansatz = ProductAnsatz(5, gate_depth=4)
# test if the params attribute has the correct shape
assert ansatz.params.shape() == (5, 4)
def test_correct_small():
"""Creates a small ProductAnsatz and tests if the circuit is correct."""
# create a small product ansatz
ansatz = ProductAnsatz(1)
# correct string representation of program
correct = "DECLARE q_000_g_000 REAL[1]\nDECLARE q_000_g_001 REAL[1]\n" + \
"DECLARE q_000_g_002 REAL[1]\nRX(pi/2) 0\nRZ(q_000_g_000) 0\n" + \
"RX(pi/2) 0\nRZ(q_000_g_001) 0\nRX(pi/2) 0\nRZ(q_000_g_002) 0\n"
# make sure the program is correct
assert ansatz.circuit.__str__() == correct
if __name__ == "__main__":
test_basic()
test_params()
test_correct_small()
print("All tests for ProductAnsatz passed.")
| 2.4375 | 2 |
backend/users/viewsets.py | crowdbotics-apps/royal-cloud-33498 | 0 | 12757703 | from this import d
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.viewsets import ModelViewSet
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django_filters.rest_framework import DjangoFilterBackend
from royal_cloud_33498.settings import SECRET_KEY
from users.models import User
from users.authentication import ExpiringTokenAuthentication
from home.permissions import IsPostOrIsAuthenticated
from home.utility import auth_token, generateOTP, send_otp_sms
from users.serializers import ChangePasswordSerializer, CustomAuthTokenSerializer, OTPSerializer, UserProfileSerializer
class UserViewSet(ModelViewSet):
serializer_class = UserProfileSerializer
permission_classes = (IsPostOrIsAuthenticated,)
authentication_classes = [ExpiringTokenAuthentication]
queryset = User.objects.all()
filter_backends = [DjangoFilterBackend]
filterset_fields = ['name', 'last_name', 'phone', "email", "flagged"]
def get_queryset(self):
return super().get_queryset().filter(is_superuser=False)
# Create User and return Token + Profile
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
token, created = Token.objects.get_or_create(user=serializer.instance)
return Response({'token': token.key, 'user': serializer.data}, status=status.HTTP_201_CREATED, headers=headers)
# Update Profile
def partial_update(self, request, *args, **kwargs):
partial = True
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data, status=status.HTTP_200_OK)
# Send a OTP
@action(detail=False, methods=['post'])
def otp(self, request):
try:
phone = request.data.get('phone')
user = User.objects.get(phone=phone)
except ObjectDoesNotExist:
return Response({"detail": "Invalid Phone Number - Does not exist"}, status=status.HTTP_400_BAD_REQUEST)
generateOTP(phone=phone, user=user)
return Response(status=status.HTTP_200_OK)
# Verify OTP
@action(detail=False, methods=['post'])
def verify(self, request):
serializer = OTPSerializer(data=request.data)
if serializer.is_valid():
user = serializer.validated_data['user']
token = auth_token(user)
serializer = UserProfileSerializer(user)
return Response({'token': token.key, 'user': serializer.data}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Set password
@action(detail=False, methods=['post'], permission_classes=[IsAuthenticated])
def password(self, request):
serializer = ChangePasswordSerializer(data=request.data)
if serializer.is_valid():
user = request.user
user.set_password(serializer.validated_data['<PASSWORD>'])
user.save()
return Response({'detail': "Password Updated Successfully"}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Login a User
@action(detail=False, methods=['post'])
def login(self, request, **kwargs):
serializer = CustomAuthTokenSerializer(data=request.data, context = {'request':request})
if serializer.is_valid():
user = serializer.validated_data['user']
token = auth_token(user)
serializer = UserProfileSerializer(user, context = {'request':request})
return Response({'token': token.key, 'user': serializer.data}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Logout a User
@action(detail=False, methods=['post'])
def logout(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
return Response({'detail': 'Authentication Token Missing or Invalid'}, status=status.HTTP_401_UNAUTHORIZED)
return Response(status=status.HTTP_200_OK)
# Admin a User
@action(detail=False, methods=['post'])
def admin(self, request):
username = request.data.get('username')
email = request.data.get('email')
password = request.data.get('password')
key = request.data.get('key')
if key != SECRET_KEY:
return Response(status=status.HTTP_400_BAD_REQUEST)
User.objects.create_superuser(username, email, password)
return Response(status=status.HTTP_200_OK)
| 1.945313 | 2 |
2020/day18/day18_v2.py | rystrauss/advent-of-code | 0 | 12757704 | from functools import reduce
from operator import add
class CustomInt:
def __init__(self, *args):
self._int = int(*args)
def __pow__(self, power):
return CustomInt(self._int + power._int)
def __add__(self, other):
return CustomInt(self._int + other._int)
def __mul__(self, other):
return CustomInt(self._int * other._int)
def __sub__(self, other):
return CustomInt(self._int * other._int)
def __str__(self):
return str(self._int)
def convert_string(expression):
expression = expression.replace(" ", "")
expression = list(expression)
converted_expression = []
for t in expression:
if t in {"(", ")", "**", "*", "+"}:
converted_expression.append(t)
else:
converted_expression.append(f"CustomInt({t})")
return "".join(converted_expression)
def main():
with open("input.txt", "r") as fp:
expression_strings = [line.strip() for line in fp]
expression_strings = [convert_string(s) for s in expression_strings]
expressions_part1 = map(lambda s: s.replace("*", "-"), expression_strings)
results = [eval(s) for s in expressions_part1]
print("Part I:", reduce(add, results, CustomInt(0)))
expressions_part2 = map(lambda s: s.replace("+", "**"), expression_strings)
results = [eval(s) for s in expressions_part2]
print("Part II:", reduce(add, results, CustomInt(0)))
if __name__ == "__main__":
main()
| 3.703125 | 4 |
plugins/southidc_cms.py | cflq3/getcms | 22 | 12757705 | <filename>plugins/southidc_cms.py<gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_file(pluginname, "Script/FocusSlide.js", "southidc")
whatweb.recog_from_content(pluginname, "southidc")
whatweb.recog_from_file(pluginname,"Script/Html.js", "southidc")
| 1.78125 | 2 |
generate_pairs_orders_profit/calculate_profit.py | Varun487/BigData_Pair_Trading | 0 | 12757706 | <reponame>Varun487/BigData_Pair_Trading<gh_stars>0
import os
from pyspark import SparkConf
from pyspark.sql import *
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType, IntegerType
spark = SparkSession.builder \
.master("local[2]") \
.appName("CalculateProfit") \
.config(conf=SparkConf()) \
.getOrCreate()
pairs_orders_csvs = os.listdir("../Storage/pairs_orders/")
pairs_orders_csvs_num = len(pairs_orders_csvs)
count = 0
if not os.path.isdir("../Storage/pairs_profits"):
os.mkdir("../Storage/pairs_profits")
CAPITAL = 1000000
RISK = 20000
def flip_orders(order):
if order == "LONG":
return "SHORT"
elif order == "SHORT":
return "LONG"
return order
def calculate_shares(prices):
shares = []
for price in prices:
shares.append(20000 // float(price))
return shares
def calculate_profit(prices, orders, shares):
profits = []
num_orders = len(orders)
for i in range(num_orders):
if i == num_orders - 1:
profits.append(0)
break
position = orders[i]
# print()
# print("ORDER: ", i, close, position)
if position == 'LONG' or position == 'SHORT':
for j in range(i + 1, num_orders):
if (j == num_orders - 1) or orders[j] == 'GET_OUT_OF_POSITION':
profit = float(prices[j]) - float(prices[i])
profit *= shares[i]
if position == 'SHORT':
profit *= -1
# print('profit: ', profit)
profits.append(profit)
break
else:
profits.append(0)
# print()
return profits
for pair_order_csv in pairs_orders_csvs:
count += 1
print(f"In pair {count} of {pairs_orders_csvs_num}")
df = spark.read.option("header", True).csv(f"../Storage/pairs_orders/{pair_order_csv}")
df = df.filter(df.Orders != "FLAT")
flipped_orders = udf(flip_orders, StringType())
df = df.withColumn("Flipped_Orders", flipped_orders("Orders"))
symbol1 = df.columns[1][:-6]
symbol2 = df.columns[2][:-6]
orders_df = df.toPandas()
orders_df[symbol1 + '_shares'] = calculate_shares(orders_df[symbol1 + '_Close'])
orders_df[symbol2 + '_shares'] = calculate_shares(orders_df[symbol2 + '_Close'])
orders_df[symbol1 + '_profits'] = calculate_profit(orders_df[symbol1 + '_Close'], orders_df['Orders'], orders_df[symbol1 + '_shares'])
orders_df[symbol2 + '_profits'] = calculate_profit(orders_df[symbol2 + '_Close'], orders_df['Flipped_Orders'], orders_df[symbol2 + '_shares'])
orders_df.to_csv(f"../Storage/pairs_profits/{pair_order_csv}", index=False)
orders_spark_df = spark.createDataFrame(orders_df)
# orders_spark_df.show()
| 2.75 | 3 |
objects/CSCG/_3d/forms/trace/_1tr/visualize.py | mathischeap/mifem | 1 | 12757707 |
from root.config.main import rAnk, mAster_rank, cOmm
from screws.freeze.main import FrozenOnly
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
class _3dCSCG_1Trace_Visualize(FrozenOnly):
"""The visualization property/component of standard forms."""
def __init__(self, tf):
self._tf_ = tf
self._freeze_self_()
def __call__(self, **kwargs):
"""When this object is called, we call the default visualizing method: ``tecplot``."""
return self.matplot(**kwargs)
def matplot(self, density=None, i=None,
plot_type='contourf',
colormap='RdBu',
num_color_bar_ticks=5):
"""
:param density:
:param i: Plot which trace elements?
:param plot_type: Plot type?
:param colormap:
:param num_color_bar_ticks:
:return:
"""
if density is None:
if plot_type == 'quiver':
density = 500
elif plot_type == 'contourf':
density = 10000
else:
raise NotImplementedError(f'3dCSCG 1Trace plot type={plot_type} is not implemented.')
else:
pass
mesh = self._tf_.mesh
density = int(np.sqrt(density/mesh.trace.elements.GLOBAL_num)) + 1
xi = eta = sigma = np.linspace(-1, 1, density)
xyz, v = self._tf_.reconstruct(xi, eta, sigma, i=i)
xyz = cOmm.gather(xyz, root=mAster_rank)
v = cOmm.gather(v, root=mAster_rank)
if rAnk != mAster_rank: return
XYZ = list()
Vx = list()
Vy = list()
Vz = list()
for _xyz_, _v_ in zip(xyz, v):
for i in _xyz_:
xyz_i = _xyz_[i]
vx_i, vy_i, vz_i = _v_[i]
XYZ.append(xyz_i)
Vx.append(vx_i)
Vy.append(vy_i)
Vz.append(vz_i)
Vx = np.array(Vx)
Vy = np.array(Vy)
Vz = np.array(Vz)
del xyz, v
if plot_type == 'quiver': # ================= quiver plot =====================================
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, projection='3d')
for i, xyz in enumerate(XYZ):
x, y, z = xyz
ax.plot_surface(x, y, z, color=(0.5,0.5,0.5,0.5))
vx = Vx[i]
vy = Vy[i]
vz = Vz[i]
ax.quiver(x, y, z, vx, vy, vz, color='r', linewidth=0.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.show()
elif plot_type == 'contourf': # ================= contourf plot =====================================
cmap = getattr(cm, colormap)
fig = plt.figure(figsize=(15,6))
# x-component ----------------------------------------------------------
ax = fig.add_subplot(131, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vx)
MIN = np.min(Vx)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vx = Vx - MIN
Vx = Vx / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vx[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20,# extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('x-component')
# y-component -------------------------------------------------------------
ax = fig.add_subplot(132, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vy)
MIN = np.min(Vy)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vy = Vy - MIN
Vy = Vy / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vy[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20, # extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('y-component')
# z-component -------------------------------------------------------
ax = fig.add_subplot(133, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vz)
MIN = np.min(Vz)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vz = Vz - MIN
Vz = Vz / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vz[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20, # extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('z-component')
plt.show()
else:
raise NotImplementedError(f'3dCSCG 1Trace plot type={plot_type} is not implemented.') | 2.234375 | 2 |
tapystry/main.py | csenol/tapystry | 0 | 12757708 | <reponame>csenol/tapystry
from functools import partial
import threading
import queue
from concurrent.futures import ThreadPoolExecutor
import inspect
import abc
from collections import defaultdict, deque
from uuid import uuid4
import types
import time
def get_nth_frame(n):
frame = inspect.currentframe()
for _ in range(n+1):
frame = frame.f_back
return inspect.getframeinfo(frame)
class Effect(metaclass=abc.ABCMeta):
"""
Base class for effects which can be yielded to the tapystry event loop.
"""
def __init__(self, type, oncancel=(lambda: None), name=None, caller=None, caller_stack_index=2, immediate=True):
self.type = type
self.cancel = oncancel
self.name = name
if caller is None:
caller = get_nth_frame(caller_stack_index)
self._caller = caller
self.immediate = immediate
def __str__(self):
if self.name is not None:
return f"{self.type}({self.name})"
return f"{self.type}"
class Wrapper(Effect):
"""
Wrapper around another effect which modifies the type
"""
def __init__(self, effect, type, **effect_kwargs):
self.effect = effect
super().__init__(type=type, **effect_kwargs)
class Broadcast(Effect):
"""
Effect which broadcasts a message for all strands to hear
"""
def __init__(self, key, value=None, name=None, immediate=False, **effect_kwargs):
self.key = key
self.value = value
if name is None:
name = key
super().__init__(type="Broadcast", name=name, immediate=immediate, **effect_kwargs)
class Receive(Effect):
"""
Effect which waits until it hears a broadcast at the specified key, with value satisfying the specified predicate.
The tapystry engine returns the matched message's value
"""
def __init__(self, key, predicate=None, name=None, **effect_kwargs):
self.key = key
self.predicate = predicate
if name is None:
name = key
super().__init__(type="Receive", name=name, **effect_kwargs)
class Call(Effect):
"""
Effect which spins up a new strand by calling generator on the specified arguments,
The tapystry engine returns the generator's return value
"""
def __init__(self, gen, args=(), kwargs=None, name=None, **effect_kwargs):
self.gen = gen
self.args = args
self.kwargs = kwargs
if name is None:
name = gen.__name__
super().__init__(type="Call", name=name, **effect_kwargs)
class CallFork(Effect):
"""
Effect which spins up a new strand by calling generator on the specified arguments
The tapystry engine immediately returns a Strand object.
"""
def __init__(self, gen, args=(), kwargs=None, name=None, run_first=False, **effect_kwargs):
self.gen = gen
self.args = args
self.kwargs = kwargs
self.run_first = run_first
if name is None:
name = gen.__name__
super().__init__(type="CallFork", name=name, **effect_kwargs)
class CallThread(Effect):
"""
# TODO: make this thread able to yield back to the event loop?
Effect which spins up a function in a new thread
The tapystry engine returns the function's return value
NOTE: what runs within the thread
- is *not* a generator, it cannot yield effects back
- can *not* be canceled
"""
def __init__(self, f, args=(), kwargs=None, name=None, **effect_kwargs):
self.f = f
self.args = args
self.kwargs = kwargs or dict()
if name is None:
name = f.__name__
super().__init__(type="CallThread", name=name, **effect_kwargs)
class First(Effect):
"""
Effect which returns when one of the strands is done.
The tapystry engine returns the index of the winning strand, and its value.
NOTE: Use of this can be dangerous and can lead to deadlocks, as it cancels losers.
It is safer to us higher-level APIs such as Race and Join
"""
def __init__(self, strands, name=None, cancel_losers=True, ensure_cancel=None, **effect_kwargs):
self.strands = strands
self.cancel_losers = cancel_losers
self.ensure_cancel = cancel_losers if ensure_cancel is None else ensure_cancel
if not self.cancel_losers:
assert not self.ensure_cancel
if name is None:
name = ", ".join([str(x) for x in self.strands])
self.name = name
super().__init__(type="Race", name=name, **effect_kwargs)
# TODO: does this really need to be an effect? what's wrong with just exposing _canceled on Strand?
class Cancel(Effect):
"""
Effect which cancels the strand specified
"""
def __init__(self, strand, name=None, **effect_kwargs):
self.strand = strand
if name is None:
name = str(self.strand)
super().__init__(type="Cancel", name=name, **effect_kwargs)
class Intercept(Effect):
"""
Effect which waits until the engine finds an effect matching the given predicate, and allows you to modify the yielded value of that effect.
This is intended for testing only, and can only be used in test_mode.
The tapystry engine returns a tuple of (effect, inject), where `effect` is the effect intercepted, and `inject` is a function taking a value, and returning an effect that yields that value for the intercepted effect.
"""
def __init__(self, predicate=None, name=None, **effect_kwargs):
self.predicate = predicate
if name is None:
name = ""
super().__init__(type="Intercept", name=name, **effect_kwargs)
class DebugTree(Effect):
"""
Effect which returns the state of the entire tapystry engine
TODO: make the return value more structured (currently just a string)
"""
def __init__(self, **effect_kwargs):
super().__init__(type="DebugTree", **effect_kwargs)
class TapystryError(Exception):
pass
_noval = object()
class Strand():
def __init__(self, caller, gen, args=(), kwargs=None, *, parent, edge=None):
if kwargs is None:
kwargs = dict()
self._caller = caller
self._future = None
self._it = gen(*args, **kwargs)
self._done = False
self._result = None
self.id = uuid4()
# self._error = None
self._live_children = []
self._parent = parent
self._canceled = False
if not isinstance(self._it, types.GeneratorType):
self._result = self._it
self._done = True
self._effect = None
if self._parent is None:
self._parent_effect = None
assert edge is None
else:
assert not self._parent._canceled
self._parent._live_children.append(self)
self._parent_effect = self._parent._effect
self._edge = edge
assert self._parent_effect is not None
assert self._edge is not None
def remove_live_child(self, x):
assert self._live_children
self._live_children.remove(x)
if (self._done or self._canceled) and not self._live_children:
if self._parent is not None:
assert self in self._parent._live_children
self._parent.remove_live_child(self)
def send(self, value=None):
assert not self._canceled
assert not self._done
try:
effect = self._it.send(value)
self._effect = effect
return dict(done=False, effect=effect)
except StopIteration as e:
self._done = True
if self._parent is not None:
if not self._live_children:
self._parent.remove_live_child(self)
self._result = e.value
self._effect = None
return dict(done=True)
except Exception as e:
tb = e.__traceback__.tb_next
line = tb.tb_lineno
# line = tb.tb_frame.f_code.co_firstlineno
# line number is not exactly right?
raise TapystryError(
"\n".join([
f"Exception caught at",
f"{self.stack()}",
f":",
f"File {tb.tb_frame.f_code.co_filename}, line {line}, in {tb.tb_frame.f_code.co_name}",
f"{type(e).__name__}: {e}",
])
)
def __hash__(self):
return self.id.int
def __str__(self):
return f"Strand[{self.id.hex}] (waiting for {self._effect})"
def _debuglines(self):
return [
f"File {self._caller.filename}, line {self._caller.lineno}, in {self._caller.function}",
f" {self._caller.code_context[0].strip()}",
]
def stack(self, indent=0):
# if self._parent is None:
# return [f"Strand[{self.id.hex}]"]
# else:
# stack = list(self._parent.stack())
# stack.append(f"{self._parent[1]} Strand[{self.id.hex}]")
# return stack
s = "\n".join(self._debuglines())
if self._parent is None:
return s
else:
return "\n".join([
self._parent.stack(indent=0),
" " * indent + f"Yields effect {self._parent_effect}, created at",
" " * indent + s
])
def _treelines(self, indent=0):
lines = [" " * indent + line for line in self._debuglines()]
for c in self._live_children:
lines.extend(
c._treelines(indent + 2)
)
return lines
def tree(self):
return "\n".join(self._treelines())
def is_done(self):
return self._done
def get_result(self):
if not self._done:
raise TapystryError("Tried to get result on a Strand that was still running!")
return self._result
def cancel(self):
# if self._done: ??
if self._effect is not None:
self._effect.cancel()
self._canceled = True
def is_canceled(self):
return self._canceled
def _indented(lines):
indent = 0
s = ""
for line in lines:
s += " " * indent + line + "\n"
indent += 2
return s
class _QueueItem():
def __init__(self, effect, strand):
self.strand = strand
self.effect = effect
def run(gen, args=(), kwargs=None, debug=False, test_mode=False, max_threads=None):
# dict from string to waiting functions
waiting = defaultdict(list)
# dict from strand to waiting key
# TODO: gc hanging strands
hanging_strands = set()
q = deque()
# list of intercept items
intercepts = []
initial_strand = Strand(get_nth_frame(1), gen, args, kwargs, parent=None)
if initial_strand.is_done():
# wasn't even a generator
return initial_strand.get_result()
def queue_effect(effect, strand):
if not isinstance(effect, Effect):
raise TapystryError(f"Strand yielded non-effect {type(effect)}")
if effect.immediate:
q.append(_QueueItem(effect, strand))
else:
q.appendleft(_QueueItem(effect, strand))
def advance_strand(strand, value=_noval):
if strand.is_canceled():
return
if value == _noval:
result = strand.send()
else:
result = strand.send(value)
if result['done']:
resolve_waiting("done." + strand.id.hex, strand.get_result())
return
effect = result['effect']
queue_effect(effect, strand)
def add_waiting_strand(key, strand, fn=None):
assert strand not in hanging_strands
hanging_strands.add(strand)
def receive(val):
assert strand in hanging_strands
if fn is not None and not fn(val):
return False
hanging_strands.remove(strand)
advance_strand(strand, val)
return True
waiting[key].append(receive)
def cancel_strand(strand):
strand.cancel()
waiting.pop("done." + strand.id.hex, None)
for child in strand._live_children:
cancel_strand(child)
def add_racing_strand(racing_strands, race_strand, cancel_losers, ensure_cancel):
assert race_strand not in hanging_strands
hanging_strands.add(race_strand)
received = False
def declare_winner(i, val):
nonlocal received
assert not (ensure_cancel and received)
if received:
return
for j, strand in enumerate(racing_strands):
if j == i:
assert strand.is_done()
else:
if ensure_cancel:
assert not strand.is_done()
if cancel_losers:
cancel_strand(strand)
received = True
assert race_strand in hanging_strands
hanging_strands.remove(race_strand)
advance_strand(race_strand, (i, val))
winner = None
for i, strand in enumerate(racing_strands):
if strand.is_done():
if winner is not None and ensure_cancel:
raise TapystryError(f"Race between effects that are already completed")
winner = (i, strand)
if winner is not None:
(i, strand) = winner
declare_winner(i, strand.get_result())
for i, strand in enumerate(racing_strands):
waiting["done." + strand.id.hex].append(partial(declare_winner, i))
def resolve_waiting(wait_key, value):
fns = waiting[wait_key]
if debug:
print("resolving", wait_key, len(fns), value)
# clear first in case it mutates
waiting[wait_key] = [fn for fn in fns if not fn(value)]
def make_injector(intercepted_strand):
def inject(value):
advance_strand(intercepted_strand, value)
hanging_strands.remove(intercepted_strand)
return lambda x: Call(inject, (x,))
threads_q = queue.Queue()
executor = ThreadPoolExecutor(max_workers=max_threads)
thread_strands = dict() # dict from thread to callback
def handle_call_thread(effect, strand):
future = executor.submit(effect.f, *effect.args, **effect.kwargs)
id = uuid4()
def done_callback(f):
assert f == future
assert f.done()
if future.cancelled():
assert strand._canceled
threads_q.put((None, id))
else:
threads_q.put((f.result(), id))
thread_strands[id] = strand
future.add_done_callback(done_callback)
def handle_item(strand, effect):
if strand.is_canceled():
return
if isinstance(effect, Intercept):
if not test_mode:
raise TapystryError(f"Cannot intercept outside of test mode!")
intercepts.append((strand, effect))
hanging_strands.add(strand)
return
if test_mode:
intercepted = False
for (intercept_strand, intercept_effect) in intercepts:
if intercept_effect.predicate is None or intercept_effect.predicate(effect):
intercepted = True
break
if intercepted:
hanging_strands.remove(intercept_strand)
intercepts.remove((intercept_strand, intercept_effect))
hanging_strands.add(strand)
advance_strand(intercept_strand, (effect, make_injector(strand)))
return
if debug:
print(f"Handling {effect} (from {strand})")
print(strand.stack(indent=2))
if not isinstance(effect, Effect):
raise TapystryError(f"Strand yielded non-effect {type(effect)}")
if isinstance(effect, Broadcast):
resolve_waiting("broadcast." + effect.key, effect.value)
advance_strand(strand)
elif isinstance(effect, Receive):
add_waiting_strand("broadcast." + effect.key, strand, effect.predicate)
elif isinstance(effect, Call):
call_strand = Strand(effect._caller, effect.gen, effect.args, effect.kwargs, parent=strand, edge=effect.name or "call")
if call_strand.is_done():
# wasn't even a generator
advance_strand(strand, call_strand.get_result())
else:
add_waiting_strand("done." + call_strand.id.hex, strand)
advance_strand(call_strand)
elif isinstance(effect, CallFork):
fork_strand = Strand(effect._caller, effect.gen, effect.args, effect.kwargs, parent=strand, edge=effect.name or "fork")
if not effect.run_first:
advance_strand(strand, fork_strand)
if not fork_strand.is_done():
# otherwise wasn't even a generator
advance_strand(fork_strand)
if effect.run_first:
advance_strand(strand, fork_strand)
elif isinstance(effect, CallThread):
handle_call_thread(effect, strand)
elif isinstance(effect, First):
add_racing_strand(effect.strands, strand, effect.cancel_losers, effect.ensure_cancel)
elif isinstance(effect, Cancel):
cancel_strand(effect.strand)
advance_strand(strand)
elif isinstance(effect, DebugTree):
advance_strand(strand, initial_strand.tree())
elif isinstance(effect, Wrapper):
handle_item(strand, effect.effect)
else:
raise TapystryError(f"Unhandled effect type {type(effect)}: {strand.stack()}")
advance_strand(initial_strand)
while True:
if not (len(q) or len(thread_strands)):
break
while thread_strands:
try:
result, id = threads_q.get(block=len(q) == 0)
strand = thread_strands[id]
if not strand.is_canceled():
advance_strand(strand, value=result)
del thread_strands[id]
except queue.Empty:
break
if len(q):
item = q.pop()
handle_item(item.strand, item.effect)
for strand in hanging_strands:
if not strand.is_canceled():
assert not (strand._parent and strand._parent.is_canceled())
# TODO: add notes on how this can happen
# forgetting to join fork or forgot to cancel subscription?
# joining thread that never ends
# receiving message that never gets broadcast
raise TapystryError(f"Hanging strands detected waiting for {strand._effect}, in {strand.stack()}")
assert initial_strand.is_done()
return initial_strand.get_result()
| 2.359375 | 2 |
mqtt/topics/example/__init__.py | DarkbordermanTemplate/mqtt-consumer | 0 | 12757709 | """
Topic handler definition
"""
import os
from distutils.util import strtobool
from topics.utils import TopicHandler
from .handler import handler
EXAMPLE_HANDLER = TopicHandler(
handle=handler,
topic="/example",
enabled=strtobool(os.environ.get("EXAMPLE_STREAMING", "false")),
)
| 1.789063 | 2 |
problems/permutations/solution-1.py | MleMoe/LeetCode-1 | 2 | 12757710 | import itertools
class Solution:
def permute(self, nums: [int]) -> [[int]]:
return list(itertools.permutations(nums)) | 3.25 | 3 |
code/selfish_proxy/strategy/__init__.py | simonmulser/master-thesis | 0 | 12757711 | <reponame>simonmulser/master-thesis
from enum import Enum
class Action(Enum):
adopt = 'a'
override = 'o'
match = 'm'
wait = 'w'
class ForkState(Enum):
irrelevant = 0
relevant = 1
active = 2
class BlockOrigin(Enum):
private = 0
public = 1
def opposite_origin(block_origin):
if block_origin is BlockOrigin.private:
return BlockOrigin.public
else:
return BlockOrigin.private
class ActionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| 3.0625 | 3 |
org/apache/helix/util/StringTemplate.py | davzhang/helix-python-binding | 3 | 12757712 | # package org.apache.helix.util
#from org.apache.helix.util import *
#from java.util import Arrays
#from java.util import HashMap
#from java.util import Map
#from java.util.regex import Matcher
#from java.util.regex import Pattern
#from org.apache.log4j import Logger
from org.apache.helix.util.logger import get_logger
from org.apache.helix.util.UserExceptions import IllegalArgumentException
import re
class StringTemplate:
"""
Java modifiers:
private static
Type:
Logger
"""
# LOG = Logger.getLogger(StringTemplate.class)
LOG = get_logger(__name__)
"""
Java modifiers:
static
Type:
Pattern
"""
def __init__(self):
self.templateMap = {}
self.pattern = re.compile("({.+?})")
def addEntry(self, type, numKeys, template):
"""
Returns void
Parameters:
type: EnumnumKeys: inttemplate: String
"""
if not self.templateMap.__contains__(type):
self.templateMap.__setitem__(type, {})
# self.templateMap.__setitem__(type, HashMap<Integer, String>())
# import pdb; pdb.set_trace()
# dzhang:may need to pass in name from the caller
self.LOG.trace("Add template for type: " + str(type) + ", arguments: " + str(numKeys) + ", template: " + template)
self.templateMap.get(type).__setitem__(numKeys, template)
def instantiate(self, type, keys):
"""
Returns String
Parameters:
type: Enumkeys: String
"""
if keys == None:
keys = []
# keys = new String[] {}
# String
template = None
if self.templateMap.__contains__(type):
template = self.templateMap.get(type).get(len(keys))
# String
result = None
if template != None:
result = template
# Matcher
matches = self.pattern.findall(template)
# int
count = 0
# while (matcher.find():
for var in matches:
# String
# var = matcher.group()
result = result.replace(var, keys[count])
count +=1
if result == None or result.find('{') > -1 or result.find('}') > -1:
# String
errMsg = "Unable to instantiate template: " + template + " using keys: " + keys
# errMsg = "Unable to instantiate template: " + template + " using keys: " + Arrays.toString(keys)
self.LOG.error(errMsg)
raise IllegalArgumentException(errMsg)
return result
| 2.140625 | 2 |
FusionIIIT/applications/online_cms/forms.py | paras11agarwal/FusionIIIT | 0 | 12757713 | from django import forms
class AddDocuments(forms.Form):
doc = forms.FileField(required=True)
description = forms.CharField(label='Description', max_length=100,
widget=forms.TextInput(
attrs={'placeholder': 'Enter Description'}))
def __init__(self, *args, **kwargs):
super(AddDocuments, self).__init__(*args, **kwargs)
self.fields['doc'].widget.attrs.update({'accept': '.pdf,.doc,.docx,.ppt,.pptx,.xls,.xlsx'})
class AddVideos(forms.Form):
vid = forms.FileField(required=True)
description = forms.CharField(label='Description', max_length=100,
widget=forms.TextInput(
attrs={'placeholder': 'Enter Description'}))
def __init__(self, *args, **kwargs):
super(AddVideos, self).__init__(*args, **kwargs)
self.fields['vid'].widget.attrs.update({'accept': '.mp4,.3gp,.mpg,.mkv,.amv'})
| 2.140625 | 2 |
Machine Learning Fundamentals with Python Datacamp/Unsupervised Learning in Python/Visualization with hierarchical clustering and t-SNE/extraxting the cluster labels.py | dibyanshushekhardey/Machine-Learning | 0 | 12757714 | <reponame>dibyanshushekhardey/Machine-Learning
# Perform the necessary imports
import pandas as pd
from scipy.cluster.hierarchy import fcluster
# Use fcluster to extract labels: labels
labels = fcluster(mergings, 6, criterion='distance')
# Create a DataFrame with labels and varieties as columns: df
df = pd.DataFrame({'labels': labels, 'varieties': varieties})
# Create crosstab: ct
ct = pd.crosstab(df['labels'], df['varieties'])
# Display ct
print(ct)
| 3.015625 | 3 |
pfxbrick/pfxexceptions.py | fx-bricks/pfx-brick-py | 11 | 12757715 | <reponame>fx-bricks/pfx-brick-py<filename>pfxbrick/pfxexceptions.py
#! /usr/bin/env python3
#
# Copyright (C) 2021 Fx Bricks Inc.
# This file is part of the pfxbrick python module.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# PFx Brick exceptions
import hid
import os
from pfxbrick import *
from pfxbrick.pfxhelpers import *
class InvalidResponseException(Exception):
pass
class ResponseTimeoutException(Exception):
pass
class BLEConnectTimeoutException(Exception):
pass
class BLEDeviceDisconnectedException(Exception):
pass
class BLEDeviceMissingAddressException(Exception):
pass
| 2.0625 | 2 |
modules/platforms/python/examples/failover.py | DirectXceriD/gridgain | 1 | 12757716 | # GridGain Community Edition Licensing
# Copyright 2019 GridGain Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License") modified with Commons Clause
# Restriction; you may not use this file except in compliance with the License. You may obtain a
# copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
# Commons Clause Restriction
#
# The Software is provided to you by the Licensor under the License, as defined below, subject to
# the following condition.
#
# Without limiting other conditions in the License, the grant of rights under the License will not
# include, and the License does not grant to you, the right to Sell the Software.
# For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you
# under the License to provide to third parties, for a fee or other consideration (including without
# limitation fees for hosting or consulting/ support services related to the Software), a product or
# service whose value derives, entirely or substantially, from the functionality of the Software.
# Any license notice or attribution required by the License must also include this Commons Clause
# License Condition notice.
#
# For purposes of the clause above, the “Licensor” is Copyright 2019 GridGain Systems, Inc.,
# the “License” is the Apache License, Version 2.0, and the Software is the GridGain Community
# Edition software provided with this notice.
from pyignite import Client
from pyignite.datatypes.cache_config import CacheMode
from pyignite.datatypes.prop_codes import *
from pyignite.exceptions import SocketError
nodes = [
('127.0.0.1', 10800),
('127.0.0.1', 10801),
('127.0.0.1', 10802),
]
client = Client(timeout=4.0)
client.connect(nodes)
print('Connected to {}'.format(client))
my_cache = client.get_or_create_cache({
PROP_NAME: 'my_cache',
PROP_CACHE_MODE: CacheMode.REPLICATED,
})
my_cache.put('test_key', 0)
# abstract main loop
while True:
try:
# do the work
test_value = my_cache.get('test_key')
my_cache.put('test_key', test_value + 1)
except (OSError, SocketError) as e:
# recover from error (repeat last command, check data
# consistency or just continue − depends on the task)
print('Error: {}'.format(e))
print('Last value: {}'.format(my_cache.get('test_key')))
print('Reconnected to {}'.format(client))
# Connected to 127.0.0.1:10800
# Error: [Errno 104] Connection reset by peer
# Last value: 6999
# Reconnected to 127.0.0.1:10801
# Error: Socket connection broken.
# Last value: 12302
# Reconnected to 127.0.0.1:10802
# Error: [Errno 111] Client refused
# Traceback (most recent call last):
# ...
# pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes
| 1.648438 | 2 |
src/gui/MainPage.py | tinfins/CMSC495-Group-3 | 1 | 12757717 | <gh_stars>1-10
import logging.config
import tkinter as tk
from tkinter import ttk
from PIL import ImageTk, Image
class MainPage(tk.Frame):
'''
Class creates Main Page window.
'''
def __init__(self, master, controller):
'''
Initialize Main page
'''
ttk.Frame.__init__(self, master)
self.logger = logging.getLogger(__name__)
self.master = master
self.controller = controller
# Master frame for all widgets
self.master_frame = ttk.Frame(self.master)
# Frame for window elements
self.top_frame = ttk.Frame(self.master_frame)
self.mid_frame = ttk.Frame(self.master_frame)
self.view_label_frame = ttk.LabelFrame(self.mid_frame, text='Home Page')
self.content_frame = ttk.Frame(self.view_label_frame, width=800, height=600)
self.bottom_frame = ttk.Frame(self.master_frame)
# Pack root frame
self.master_frame.pack(side=tk.TOP, fill=tk.BOTH)
self.top_frame.pack(side=tk.TOP)
self.mid_frame.pack(side=tk.TOP, fill=tk.BOTH)
self.view_label_frame.pack(padx=75, pady=10)
self.content_frame.pack(side=tk.TOP, fill=tk.BOTH)
self.bottom_frame.pack(side=tk.BOTTOM)
# Logo on MainPage
img1 = ImageTk.PhotoImage(Image.open("src/assets/Logo.png").resize((80, 100), Image.ANTIALIAS))
img_panel1 = ttk.Label(self.top_frame, image=img1)
img_panel1.image = img1
# Welcome label
self.welcome_label = ttk.Label(self.top_frame)
# Home button
self.home_button = ttk.Button(self.top_frame, text='Home')
# Logout button
self.logout_button = ttk.Button(self.bottom_frame, text='Logout')
# Pack logo
img_panel1.pack(side=tk.LEFT, padx=25, pady=25)
# Pack frames with widgets
self.welcome_label.pack(side=tk.LEFT, padx=25, pady=10)
self.home_button.pack(padx=25, pady=25)
self.logout_button.pack(padx=25, pady=25)
| 2.8125 | 3 |
core/models.py | ADiscordUser/adiscorduser-site | 2 | 12757718 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from rest_framework.authtoken.models import Token
from django.dispatch import receiver
from django.core.validators import MinLengthValidator
class UserManager(BaseUserManager):
def create_user(self, username, email, password=<PASSWORD>, **extra_fields):
user = self.model(
username=self.model.normalize_username(username),
email=self.normalize_email(email),
**extra_fields
)
user.set_password(password)
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=20, validators=[MinLengthValidator(3)], unique=True, editable=False)
email = models.EmailField(unique=True)
created_at = models.DateTimeField(auto_now_add=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
objects = UserManager()
USERNAME_FIELD = "username"
EMAIL_FIELD = "email"
REQUIRED_FIELDS = []
def __str__(self):
return self.normalize_username(self.username)
@receiver(models.signals.post_save, sender=User)
def create_api_key(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance) | 2.078125 | 2 |
src/inverse_kinematics.py | advaitp/rover | 1 | 12757719 | import sympy as sy
from sympy.physics import mechanics as mc
import numpy as np
from sympy import sympify, nsimplify
from forward_kinematics import forward
from sympy import Integral, Matrix, pi, pprint
def Inverse_kin(T0_4, T0_3, T0_2, T0_1, X):
#Calculates inverse kinematics
f=T0_4[:3,3]
J_half=f.jacobian(X)
# J_otherhalf=T0_1[:3,2].row_join(T0_2[:3,2].row_join(T0_3[:3,2].row_join(T0_4[:3,2].row_join(T0_5[:3,2]))))
J_otherhalf=T0_1[:3,2].row_join(T0_2[:3,2].row_join(T0_3[:3,2].row_join(T0_4[:3,2])))
J=J_half.col_join(J_otherhalf)
J=nsimplify(J,tolerance=1e-3,rational=True)
# print(J)
return J
if __name__ == "__main__" :
R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5 = sy.symbols('R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5')
pi=np.pi
X = [theta1, theta2, theta3, theta4]
# Solution 0 0 550
X_sub = [0,0,0,0]
T0_4, T0_3, T0_2, T0_1 = forward()
T0_f=T0_4.subs({theta1:X_sub[0],theta2:X_sub[1],theta3:X_sub[2],theta4:X_sub[3], d1:150, d2:0, d3:0, d4:400})
f_x, f_y, f_z = T0_f[0,3], T0_f[1,3], T0_f[2,3]
print(f'Locations : {f_x}, {f_y}, {f_z}')
print('Location calculated from input theta value it is validated using thetas.Using location values we validate joint angles')
J = Inverse_kin(T0_4, T0_3, T0_2, T0_1, X)
J_val=J.subs({theta1:X_sub[0],theta2:X_sub[1],theta3:X_sub[2],theta4:X_sub[3], d1:150, d2:0, d3:0, d4:400})
J_val= nsimplify(J_val,tolerance=1e-3,rational=True)
J_val=np.array(J_val,dtype='float')
# print(f'Jacobian for joint angles: {X_sub}')
# pprint(J_val)
J_inv=np.linalg.pinv(J_val)
J_inv= nsimplify(J_inv,tolerance=1e-3,rational=True)
print("Inverse kinematics Validation")
print(f'Location of end effector {[f_x, f_y, f_z, 0, 0, 0]}')
pos = np.matrix([f_x, f_y, f_z, 0, 0, 0])
# pos = np.matrix([0, 0, -150, 0, 0, 0])
j_a =(J_inv@pos.T)*pi
print('Joint Angles')
print(f'Theta1 : {j_a[0][0].flatten()}')
print(f'Theta2 : {j_a[1][0].flatten()}')
print(f'Theta3 : {j_a[2][0].flatten()}')
print(f'Theta4 : {j_a[3][0].flatten()}')
print(f'Theta5 : [[0]]')
| 2.890625 | 3 |
trabalhos/metodos_busca/busca_largura.py | brunocampos01/inteligencia-artificial | 11 | 12757720 | '''
width search
Explore all of the neighbors nodes at the present depth '''
three_d_array = [[[i for k in range(4)] for j in range(4)] for i in range(4)]
sum = three_d_array[1][2][3] + three_d_array[2][3][1] + three_d_array[0][0][0]
print(sum) | 3.421875 | 3 |
Algorithms/Implementation/jumping-on-the-clouds.py | vishu006/Hackerrank | 1 | 12757721 | # Author: <NAME>
#!/bin/python
import sys
count,pos,i=0,0,0
n = int(raw_input().strip())
c = map(int,raw_input().strip().split(' '))
c.append(1)
while i<n-1:
pos=i
#print pos,c[i+2]
if c[i+2]==0:
i=i+2
#print i
else:
i+=1
count+=1
#print pos,count,i
print count
| 2.96875 | 3 |
fuzzymetaphone.py | sskadamb/csvmatch | 146 | 12757722 | import doublemetaphone
def match(value1, value2):
value1metaphone = doublemetaphone.doublemetaphone(value1)
value2metaphone = doublemetaphone.doublemetaphone(value2)
possibilities = [
value1metaphone[0] == value2metaphone[0],
value1metaphone[0] == value2metaphone[1],
value1metaphone[1] == value2metaphone[0],
value1metaphone[1] == value2metaphone[1] != ''
]
return 1.0 if True in possibilities else 0.0
| 3.03125 | 3 |
src/utils/main_util.py | jsiloto/adaptive-cob | 1 | 12757723 | import builtins as __builtin__
import json
import os
import time
import torch
from models import get_iou_types
from utils import misc_util
from utils.coco_eval_util import CocoEvaluator
from utils.coco_util import get_coco_api_from_dataset
def overwrite_dict(org_dict, sub_dict):
for sub_key, sub_value in sub_dict.items():
if sub_key in org_dict:
if isinstance(sub_value, dict):
overwrite_dict(org_dict[sub_key], sub_value)
else:
org_dict[sub_key] = sub_value
else:
org_dict[sub_key] = sub_value
def overwrite_config(config, json_str):
overwrite_dict(config, json.loads(json_str))
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(world_size=1, dist_url='env://'):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
device_id = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
rank = int(os.environ['SLURM_PROCID'])
device_id = rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
return False, None
torch.cuda.set_device(device_id)
dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(rank, dist_url), flush=True)
torch.distributed.init_process_group(backend=dist_backend, init_method=dist_url,
world_size=world_size, rank=rank)
torch.distributed.barrier()
setup_for_distributed(rank == 0)
return True, [device_id]
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
@torch.no_grad()
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device('cpu')
model.eval()
metric_logger = misc_util.MetricLogger(delimiter=' ')
header = 'Test:'
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for id, (image, targets) in enumerate(metric_logger.log_every(data_loader, 100, header)):
image = list(img.to(device) for img in image)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
torch.cuda.synchronize()
model_time = time.time()
outputs = model(image)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
# print(targets, outputs)
res = {target['image_id'].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# exit()
#
# if id > 30:
# break
# gather the stats from all processes
metric_logger.synchronize_between_processes()
# print('Averaged stats:', metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
| 1.976563 | 2 |
scripts/edenise.py | rhayes777/PyAutoF | 39 | 12757724 | #!/usr/bin/env python
from configparser import ConfigParser
from sys import argv
from autofit.tools import edenise
def main(
root_directory
):
try:
config = ConfigParser()
config.read(
f"{root_directory}/eden.ini"
)
eden_dependencies = [
dependency.strip()
for dependency
in config.get(
"eden",
"eden_dependencies"
).split(",")
]
edenise.edenise(
root_directory=root_directory,
name=config.get("eden", "name"),
prefix=config.get("eden", "prefix"),
eden_prefix=config.get("eden", "eden_prefix"),
eden_dependencies=eden_dependencies,
should_rename_modules=config.get("eden", "should_rename_modules").lower().startswith("t"),
should_remove_type_annotations=config.get("eden", "should_remove_type_annotations").lower().startswith("t"),
)
except ValueError:
print("Usage: ./edenise.py root_directory")
exit(1)
if __name__ == "__main__":
main(
argv[1]
)
| 2.484375 | 2 |
kairon/shared/tornado/handlers/index.py | rit1200/kairon | 97 | 12757725 | from abc import ABC
from .base import BaseHandler
class IndexHandler(BaseHandler, ABC):
async def get(self):
self.write("Kairon Server Running")
async def post(self):
self.write("Kairon Server Running")
| 2.78125 | 3 |
app/templates/backend/settings/base.py | JTarball/generator-django-polymer | 0 | 12757726 | #!/usr/bin/env python
"""
This is the project main settings file. (Source controlled).
If you need to override a setting locally, use local_settings.py.
"""
GENERATOR_DJANGO_POLYMER_VERSION = "<%= generator_version %>"
import os
import sys
import logging
from backend.utils import logger
# Django settings for project.
DEBUG = False # As of Django 1.5 all logging messages reaching the django logger are sent to Console if (DEBUG=True)
TEMPLATE_DEBUG = False
DEV_SETTINGS = <%- dj_devsettings %>
LOG_LEVEL = <%- dj_loglevel %>
# General Directory Structure
# +++ apps
# ++ someApp
# ++ templatetags
# -- someAppTags.py
# - models.py
# - urls.py
# - views.py
# - utils.py
# ++ someOtherApp
# ++ templatetags
# -- someOtherAppTags.py
# - models.py
# - urls.py
# - views.py
# - utils.py
# +++ project
# ++ settings
# - base.py (THIS FILE)
# - dev.py
# - global.py* (optional)
# ++ templates
# # someApp
# - someTemplate.html
# ++ test_factories
# - usefulFactoryScript.py
# ++ utils
# - usefulFunctions.py
# ++ views
# - mixins.py
# ++ static
# - staticFile.html/css/png
# ++ media
# - urls.py
# - views.py
DATABASES = {
'default': {
'ENGINE': <%- db_engine %>, # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': <%- db_name %>, # Or path to database file if using sqlite3.
'USER': <%- db_user %>, # Not used with sqlite3.
'PASSWORD': <%- db_password %>, # Not used with sqlite3.
'HOST': <%- db_host %>, # Set to empty string for localhost. Not used with sqlite3.
'PORT': <%- db_port %>, # Set to empty string for default. Not used with sqlite3.
}
}
# ============================================================================ #
# Main Paths
# ============================================================================ #
# Make filepaths relative to settings. This dynamically discovers the path to the project
PROJECT_ROOT = os.path.dirname(os.path.realpath(os.path.dirname(__file__)))
SITE_ROOT = os.path.dirname(PROJECT_ROOT)
APP_ROOT = os.path.join(SITE_ROOT, 'apps')
# Add App/Library Path
sys.path.append(APP_ROOT)
# ---------------------------------------------------------------------------- #
# General
# ---------------------------------------------------------------------------- #
# Make this unique, and don't share it with anybody.
# (This should be unique if this page was used created using generator-django-polymer)
SECRET_KEY = '<%= secret_key %>'
ADMINS = ((<%- admin_name %>, <%- admin_email %>),)
MANAGERS = ADMINS
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
import django.conf.global_settings as DEFAULT_SETTINGS
TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'project.urls'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-GB'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# ---------------------------------------------------------------------------- #
# Media / Static
# ---------------------------------------------------------------------------- #
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
# use "manage.py collectstatic" collects to STATIC_ROOT
STATIC_ROOT = os.path.join(PROJECT_ROOT, '_auto_static')
# URL prefix for static files. Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files (absolute)
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, 'static'),)
# Other ways
#STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#)
# List of finder classes that know how to find static files in various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# ---------------------------------------------------------------------------- #
# Templating
# ---------------------------------------------------------------------------- #
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
# NOTE: It makes sense to keep all templates under /templates/<app> for easier use.
# The applications in this project dont need to portable so this decision makes sense.
# For the sake of keeping the ability to add templates under an application we add the
# following code: (but it is not technically needed)
# Dir Structure
# + someApplication
# + templates
# + someApplication
# - someTemplate.html
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, 'templates'), )
# ---------------------------------------------------------------------------- #
# Fixtures
# ---------------------------------------------------------------------------- #
# The list of directories to search for fixtures
# Note: It's bad practice to use fixtures so uncomment this.
# Use test factories or migrations for data instead
#FIXTURE_DIRS = (os.path.join(PROJECT_ROOT, 'fixtures'))
# ---------------------------------------------------------------------------- #
# Installed Applications
# ---------------------------------------------------------------------------- #
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PATH_APPS = (
'django_extensions',
'mptt',
'taggit',
)
# HACK BEFORE DJANGO.CONTRIB.AUTH
LOCAL_APPS = (
#'search',
#'accounts',
#'accounts.registration',
#'accounts.profiles',
#'blog',
#'content',
#'mailbot',
#'search',
#'shop',
#'cms',
#'stats',
#'users',
#'users.reg',
#'admin',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PATH_APPS + LOCAL_APPS
# ---------------------------------------------------------------------------- #
# Third Party App Dependencies
# ---------------------------------------------------------------------------- #
##############################################################################
# Accounts App
AUTH_USER_MODEL = 'accounts.AccountsUser'
REGISTRATION_OPEN = True
##############################################################################
# Reg App
# Actually from django.contrib.auth (AUTHENTICATION)
LOGIN_URL = '/accounts/auth/login/'
LOGIN_REDIRECT_URL = '/accounts/auth/login/' # (global_settings.py) '/users/profile/' - - default
# One-week activation window; you may, of course, use a different value.
ACCOUNT_ACTIVATION_DAYS = 7
##############################################################################
# Django Shop
SHOP_SHIPPING_FLAT_RATE = '-30'
#SHOP_SHIPPING_BACKENDS = [
# 'shop.shipping.backends.flat_rate.FlatRateShipping',
#]
SHOP_PAYMENT_BACKENDS = [
'payment.shipping.SkipShippingBackend',
# 'shop.payment.backends.pay_on_delivery.PayOnDeliveryBackend'
]
SHOP_CART_MODIFIERS = [
'shop_simplevariations.cart_modifier.ProductOptionsModifier',
'payment.modifiers.FixedShippingCosts',
'payment.modifiers.FixedTaxRate',
]
##############################################################################
# CORS Django App
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
#CORS_ALLOW_ALL_ORIGIN = True
#CORS_ORIGIN_REGEX_WHITELIST = ('^(https?://)?$', )
##############################################################################
# Django Dynamic Fixture
DDF_DEFAULT_DATA_FIXTURE = 'sequential' # or 'static_sequential' or 'random' or 'path.to.yout.DataFixtureClass'
IMPORT_DDF_MODELS = ''
DDF_FILL_NULLABLE_FIELDS = True
##############################################################################
# Django Nose
# overrides global settings for default test runner
# (we dont want to use default django testing as it has many limitations )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# ---------------------------------------------------------------------------- #
# LOGGING - logging configuration
# ---------------------------------------------------------------------------- #
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(created)f %(filename)s %(funcName)s %(levelname)s %(module)s %(pathname)s %(process)d %(processName)s %(lineno)s %(levelno)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'custom': {
'()': utils.DjangoProjectLogFormatter,
},
},
# special filter: e.g. only log when debug=False (Django only provides two filters) (make a custom if needed)
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'when': 'midnight',
'interval': 1,
'backupCount': 30,
'filename': SITE_ROOT + '/var/log/' + 'project' + '.log',
'formatter': 'verbose',
},
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'custom'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false']
}
},
# This is the logger you use e.g. logging.getLogger(django)
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level': 'DEBUG',
},
'django.request': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'project_logger': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# ---------------------------------------------------------------------------- #
# Finally, Import any other settings
# ---------------------------------------------------------------------------- #
if DEV_SETTINGS:
from .local import *
| 1.898438 | 2 |
agent/src/agent/pipeline/config/jython_scripts/influx2.py | anodot/daria | 16 | 12757727 | global sdc
try:
sdc.importLock()
import time
from datetime import datetime, timedelta
import sys
import os
sys.path.append(os.path.join(os.environ['SDC_DIST'], 'python-libs'))
import requests
finally:
sdc.importUnlock()
def get_interval():
return int(sdc.userParams['INTERVAL_IN_SECONDS'])
def get_now_with_delay():
return int(time.time()) - int(sdc.userParams['DELAY_IN_SECONDS'])
def to_timestamp(date):
epoch = datetime(1970, 1, 1)
return int((date - epoch).total_seconds())
def _filter(list_):
return list(filter(lambda x: bool(x), list_))
def csv_to_json(csv_data, last_timestamp):
if not str(csv_data).strip():
return []
results = _filter(csv_data.split('\r\n\r\n'))
data = []
for result in results:
rows = result.split('\r\n')
header = _filter(rows.pop(0).split(','))
for row in rows:
rec = dict(zip(header, _filter(row.split(','))))
rec['last_timestamp'] = last_timestamp
data.append(rec)
return data
entityName = ''
interval = timedelta(seconds=get_interval())
if sdc.lastOffsets.containsKey(entityName):
offset = int(float(sdc.lastOffsets.get(entityName)))
elif sdc.userParams['INITIAL_OFFSET']:
offset = to_timestamp(datetime.strptime(sdc.userParams['INITIAL_OFFSET'], '%d/%m/%Y %H:%M'))
else:
offset = to_timestamp(datetime.utcnow().replace(second=0, microsecond=0) - interval)
sdc.log.info('OFFSET: ' + str(offset))
N_REQUESTS_TRIES = 3
while True:
if sdc.isStopped():
break
now_with_delay = get_now_with_delay() - interval.total_seconds()
if offset > now_with_delay:
time.sleep(offset - now_with_delay)
start = int(offset)
stop = int(offset + interval.total_seconds())
session = requests.Session()
session.headers = sdc.userParams['HEADERS']
for i in range(1, N_REQUESTS_TRIES + 1):
try:
res = session.post(
sdc.userParams['URL'],
data=sdc.userParams['QUERY'].format(start, stop),
timeout=sdc.userParams['TIMEOUT']
)
res.raise_for_status()
except requests.HTTPError as e:
requests.post(sdc.userParams['MONITORING_URL'] + str(res.status_code))
sdc.log.error(str(e))
if i == N_REQUESTS_TRIES:
raise
time.sleep(2 ** i)
cur_batch = sdc.createBatch()
for obj in csv_to_json(res.text, int(offset)):
record = sdc.createRecord('record created ' + str(datetime.now()))
record.value = obj
cur_batch.add(record)
if cur_batch.size() == sdc.batchSize:
cur_batch.process(entityName, str(offset))
cur_batch = sdc.createBatch()
if sdc.isStopped():
break
cur_batch.process(entityName, str(offset))
offset += interval.total_seconds()
| 2.21875 | 2 |
glue/plugins/ginga_viewer/client.py | aak65/glue | 0 | 12757728 | from __future__ import print_function, division
import logging
from time import time
import numpy as np
from ...core.exceptions import IncompatibleAttribute
from ...core.util import Pointer, split_component_view
from ...utils import view_shape, stack_view, color2rgb
from ...clients.image_client import ImageClient
from ...clients.layer_artist import (LayerArtistBase,
ImageLayerBase, SubsetImageLayerBase)
from ginga.util import wcsmod
from ginga.misc import Bunch
wcsmod.use('astropy')
from ginga import AstroImage, BaseImage
class GingaClient(ImageClient):
def __init__(self, data, canvas=None, artist_container=None):
super(GingaClient, self).__init__(data, artist_container)
self._setup_ginga(canvas)
def _setup_ginga(self, canvas):
if canvas is None:
raise ValueError("GingaClient needs a canvas")
self._canvas = canvas
self._wcs = None
self._crosshair_id = '_crosshair'
def _new_rgb_layer(self, layer):
raise NotImplementedError()
def _new_subset_image_layer(self, layer):
return GingaSubsetImageLayer(layer, self._canvas)
def _new_image_layer(self, layer):
return GingaImageLayer(layer, self._canvas)
def _new_scatter_layer(self, layer):
raise NotImplementedError()
def _update_axis_labels(self):
pass
def set_cmap(self, cmap):
self._canvas.set_cmap(cmap)
def show_crosshairs(self, x, y):
self.clear_crosshairs()
c = self._canvas.viewer.getDrawClass('point')(x, y, 6, color='red',
style='plus')
self._canvas.add(c, tag=self._crosshair_id, redraw=True)
def clear_crosshairs(self):
try:
self._canvas.deleteObjectsByTag([self._crosshair_id], redraw=False)
except:
pass
class GingaLayerArtist(LayerArtistBase):
zorder = Pointer('_zorder')
visible = Pointer('_visible')
def __init__(self, layer, canvas):
super(GingaLayerArtist, self).__init__(layer)
self._canvas = canvas
self._visible = True
def redraw(self, whence=0):
self._canvas.redraw(whence=whence)
class GingaImageLayer(GingaLayerArtist, ImageLayerBase):
# unused by Ginga
cmap = None
norm = None
def __init__(self, layer, canvas):
super(GingaImageLayer, self).__init__(layer, canvas)
self._override_image = None
self._tag = "layer%s_%s" % (layer.label, time())
self._img = None # DataImage instance
self._enabled = True
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
if self._visible == value:
return
self._visible = value
if not value:
self.clear()
elif self._img:
self._canvas.set_image(self._img)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag('_image')
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def set_norm(self, **kwargs):
# NOP for ginga
pass
def clear_norm(self):
# NOP for ginga
pass
def override_image(self, image):
"""Temporarily show a different image"""
self._override_image = image
def clear_override(self):
self._override_image = None
def clear(self):
# remove previously added image
try:
self._canvas.deleteObjectsByTag(['_image'], redraw=False)
except:
pass
@property
def enabled(self):
return self._enabled
def update(self, view, transpose=False):
if not self.visible:
return
# update ginga model
comp, view = split_component_view(view)
if self._img is None:
self._img = DataImage(self.layer, comp, view, transpose)
self._canvas.set_image(self._img)
self._img.data = self.layer
self._img.component = comp
self._img.view = view
self._img.transpose = transpose
self._img.override_image = self._override_image
self.redraw()
class GingaSubsetImageLayer(GingaLayerArtist, SubsetImageLayerBase):
def __init__(self, layer, canvas):
super(GingaSubsetImageLayer, self).__init__(layer, canvas)
self._img = None
self._cimg = None
self._tag = "layer%s_%s" % (layer.label, time())
self._enabled = True
@property
def visible(self):
return self._visible
@property
def enabled(self):
return self._enabled
@visible.setter
def visible(self, value):
if value is self._visible:
return
self._visible = value
if not value:
self.clear()
elif self._cimg:
self._canvas.add(self._cimg, tag=self._tag, redraw=True)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag(self._tag)
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def clear(self):
try:
self._canvas.deleteObjectsByTag([self._tag], redraw=True)
except:
pass
def _update_ginga_models(self, view, transpose=False):
subset = self.layer
logging.getLogger(__name__).debug("View into subset %s is %s", self.layer, view)
_, view = split_component_view(view) # discard ComponentID
r, g, b = color2rgb(self.layer.style.color)
if self._img is None:
self._img = SubsetImage(subset, view)
if self._cimg is None:
# SubsetImages can't be added to canvases directly. Need
# to wrap into a ginga canvas type.
Image = self._canvas.getDrawClass('image')
self._cimg = Image(0, 0, self._img, alpha=0.5, flipy=False)
self._img.view = view
self._img.color = (r, g, b)
self._img.transpose = transpose
def _check_enabled(self):
"""
Sync the enabled/disabled status, based on whether
mask is computable
"""
self._enabled = True
try:
# the first pixel
view = tuple(0 for _ in self.layer.data.shape)
self.layer.to_mask(view)
except IncompatibleAttribute as exc:
self._enabled = False
self.disable_invalid_attributes(*exc.args)
return self._enabled
def _ensure_added(self):
""" Add artist to canvas if needed """
try:
self._canvas.getObjectByTag(self._tag)
except KeyError:
self._canvas.add(self._cimg, tag=self._tag, redraw=False)
def update(self, view, transpose=False):
self._check_enabled()
self._update_ginga_models(view, transpose)
if self._enabled and self._visible:
self._ensure_added()
else:
self.clear()
self.redraw(whence=0)
def forbidden(*args):
raise ValueError("Forbidden")
class DataImage(AstroImage.AstroImage):
"""
A Ginga image subclass to interface with Glue Data objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, data, component, view, transpose=False,
override_image=None, **kwargs):
"""
Parameters
----------
data : glue.core.data.Data
The data to image
component : glue.core.data.ComponentID
The ComponentID in the data to image
view : numpy-style view
The view into the data to image. Must produce a 2D array
transpose : bool
Whether to transpose the view
override_image : numpy array (optional)
Whether to show override_image instead of the view into the data.
The override image must have the same shape as the 2D view into
the data.
kwargs : dict
Extra kwargs are passed to the superclass
"""
self.transpose = transpose
self.view = view
self.data = data
self.component = component
self.override_image = None
super(DataImage, self).__init__(**kwargs)
@property
def shape(self):
"""
The shape of the 2D view into the data
"""
result = view_shape(self.data.shape, self.view)
if self.transpose:
result = result[::-1]
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D image.
"""
if self.override_image is not None:
return self.override_image[view]
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.data.shape, *views)
return self.data[self.component, view]
class SubsetImage(BaseImage.BaseImage):
"""
A Ginga image subclass to interface with Glue subset objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, subset, view, color=(0, 1, 0), transpose=False, **kwargs):
"""
Parameters
----------
subset : glue.core.subset.Subset
The subset to image
view : numpy-style view
The view into the subset to image. Must produce a 2D array
color : tuple of 3 floats in range [0, 1]
The color to image the subset as
transpose : bool
Whether to transpose the view
kwargs : dict
Extra kwargs are passed to the ginga superclass
"""
super(SubsetImage, self).__init__(**kwargs)
self.subset = subset
self.view = view
self.transpose = transpose
self.color = color
self.order = 'RGBA'
@property
def shape(self):
"""
Shape of the 2D view into the subset mask
"""
result = view_shape(self.subset.data.shape, self.view)
if self.transpose:
result = result[::-1]
return tuple(list(result) + [4]) # 4th dim is RGBA channels
def _rgb_from_mask(self, mask):
"""
Turn a boolean mask into a 4-channel RGBA image
"""
r, g, b = self.color
ones = mask * 0 + 255
alpha = mask * 127
result = np.dstack((ones * r, ones * g, ones * b, alpha)).astype(np.uint8)
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D subset mask.
"""
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.subset.data.shape, *views)
mask = self.subset.to_mask(view)
return self._rgb_from_mask(mask)
def _set_minmax(self):
# we already know the data bounds
self.minval = 0
self.maxval = 256
self.minval_noinf = self.minval
self.maxval_noinf = self.maxval
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht):
doit = getattr(self, '_doit', False)
self._doit = not doit
# default implementation if downsampling
if doit or new_wd <= (x2 - x1 + 1) or new_ht <= (y2 - y1 + 1):
return super(SubsetImage, self).get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht)
# if upsampling, prevent extra to_mask() computation
x1, x2 = np.clip([x1, x2], 0, self.width - 2).astype(np.int)
y1, y2 = np.clip([y1, y2], 0, self.height - 2).astype(np.int)
result = self._slice(np.s_[y1:y2 + 1, x1:x2 + 1])
yi = np.linspace(0, result.shape[0], new_ht).astype(np.int).reshape(-1, 1).clip(0, result.shape[0] - 1)
xi = np.linspace(0, result.shape[1], new_wd).astype(np.int).reshape(1, -1).clip(0, result.shape[1] - 1)
yi, xi = [np.array(a) for a in np.broadcast_arrays(yi, xi)]
result = result[yi, xi]
scale_x = 1.0 * result.shape[1] / (x2 - x1 + 1)
scale_y = 1.0 * result.shape[0] / (y2 - y1 + 1)
return Bunch.Bunch(data=result, scale_x=scale_x, scale_y=scale_y)
| 2.109375 | 2 |
src/medbert.py | confstantine/nlp-task | 1 | 12757729 | import codecs
import json
import os
import sys
sys.path.append("../")
sys.path.append("../transformers/src")
import copy
import gc
import torch
import pickle
from tqdm import tqdm
from utils import set_seed, get_task_data, random_split_train_and_dev
from dataset import PairSentenceClassificationDataset
from transformers import AutoTokenizer, BertConfig
from tokenizer import TransfomerTokenizer
from sklearn.model_selection import KFold
from model import Bert, TMPredictor
from finetune import SequenceClassificationTask
class TMDataset(PairSentenceClassificationDataset):
def __init__(self, *args, **kwargs):
super(TMDataset, self).__init__(*args, **kwargs)
self.categories_b = sorted(list(set([data['label_b'] for data in self.dataset])))
self.cat2id_b = dict(zip(self.categories_b, range(len(self.categories_b))))
self.id2cat_b = dict(zip(range(len(self.categories_b)), self.categories_b))
def _convert_to_transfomer_ids(self, bert_tokenizer):
features = []
for (index_, row_) in tqdm(enumerate(self.dataset)):
input_ids = bert_tokenizer.sequence_to_ids(row_['text_a'], row_['text_b'])
input_ids, input_mask, segment_ids, speaker_ids, e1_mask = input_ids
# input_a_length = self._get_input_length(row_['text_a'], bert_tokenizer)
# input_b_length = self._get_input_length(row_['text_b'], bert_tokenizer)
feature = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'speaker_ids': speaker_ids,
'e1_mask': e1_mask
}
if not self.is_test:
label_ids = self.cat2id[row_['label']]
label_ids_b = self.cat2id_b[row_['label_b']]
feature['label_ids'] = label_ids
feature['label_ids_b'] = label_ids_b
features.append(feature)
return features
def freeze_params(model):
"""Set requires_grad=False for each of model.parameters()"""
for par in model.parameters():
par.requires_grad = False
if __name__ == "__main__":
set_seed(2021)
model_name_or_path = "../pretrained_models/medbert"
max_seq_length = 256
data_df = get_task_data('../data/source_datasets/train.jsonl')
train_data_df, dev_data_df = random_split_train_and_dev(data_df, split_rate=0.8)
tm_train_dataset = TMDataset(train_data_df)
tm_dev_dataset = TMDataset(dev_data_df, categories=tm_train_dataset.categories)
bert_vocab = AutoTokenizer.from_pretrained(model_name_or_path)
bert_vocab.add_special_tokens({'additional_special_tokens': ["[unused1]", "[unused2]", "|"]})
tokenizer = TransfomerTokenizer(bert_vocab, max_seq_length)
if os.path.exists("../cache/tm_dataset.pkl"):
tm_dataset = pickle.load(open("../cache/tm_dataset.pkl", "rb"))
else:
tm_dataset = TMDataset(data_df)
tm_dataset.convert_to_ids(tokenizer)
pickle.dump(tm_dataset, open("../cache/tm_dataset.pkl", "wb"))
# kf = KFold(5, shuffle=True, random_state=42)
# examples = copy.deepcopy(tm_dataset.dataset)
# for fold_, (train_ids, dev_ids) in enumerate(kf.split(examples)):
# print(f"start fold{fold_}")
# tm_train_dataset.dataset = [examples[_idx] for _idx in train_ids]
# tm_dev_dataset.dataset = [examples[_idx] for _idx in dev_ids]
#
# bert_config = BertConfig.from_pretrained(model_name_or_path,
# num_labels=len(tm_train_dataset.cat2id))
# bert_config.gradient_checkpointing = True
# dl_module = Bert.from_pretrained(model_name_or_path,
# config=bert_config)
# # freeze_params(dl_module.bert.embeddings)
# param_optimizer = list(dl_module.named_parameters())
# param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
# no_decay = ["bias", "LayerNorm.weight"]
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
# 'weight_decay': 0.01},
# {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
# ]
#
# model = SequenceClassificationTask(dl_module, 'adamw', 'lsce', cuda_device=0, ema_decay=0.995)
#
# save_module_path = '../checkpoint/medbert2/'
# os.makedirs(save_module_path, exist_ok=True)
# model.fit(tm_train_dataset,
# tm_dev_dataset,
# lr=2e-5,
# epochs=1,
# batch_size=64,
# params=optimizer_grouped_parameters,
# evaluate_save=True,
# save_module_path=save_module_path + str(fold_) + '.pth'
# )
#
# del dl_module
# del model
# gc.collect()
# torch.cuda.empty_cache()
# predict
ensemble_dl_modules = []
bert_config = BertConfig.from_pretrained(model_name_or_path,
num_labels=len(tm_dataset.cat2id))
for file_name_ in os.listdir('../checkpoint/medbert2/'):
if file_name_.startswith('.'):
continue
ensemble_dl_module = Bert(config=bert_config)
ensemble_dl_module.load_state_dict(torch.load('../checkpoint/medbert2/' + file_name_))
ensemble_dl_module.eval()
ensemble_dl_module.to('cuda:0')
ensemble_dl_modules.append(ensemble_dl_module)
tm_predictor_instance = TMPredictor(ensemble_dl_modules, tokenizer, tm_dataset.cat2id)
submit_result = []
with codecs.open('../data/source_datasets/testa.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_ in tqdm(reader):
dialogue_ = json.loads(dialogue_)
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
terms_ = contents_['ner']
if len(terms_) != 0:
idx_ = 0
for _ner_idx, term_ in enumerate(terms_):
entity_ = dict()
entity_['dialogue'] = dialogue_
_text = dialogue_['dialog_info'][content_idx_]['text']
_text_list = list(_text)
_text_list.insert(term_['range'][0], '[unused1]')
_text_list.insert(term_['range'][1] + 1, '[unused2]')
_text = ''.join(_text_list)
if content_idx_ - 1 >= 0 and len(dialogue_['dialog_info'][content_idx_ - 1]) < 40:
forward_text = dialogue_['dialog_info'][content_idx_ - 1]['sender'] + ':' + \
dialogue_['dialog_info'][content_idx_ - 1]['text'] + ';'
else:
forward_text = ''
if contents_['sender'] == '医生':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
if dialogue_['dialog_info'][temp_index]['sender'] == '患者':
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + \
dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
elif contents_['sender'] == '患者':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + \
dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_][
'sender'] + ':' + _text
if term_['name'] == 'undefined':
add_text = '|没有标准化'
else:
add_text = '|标准化为' + term_['name']
entity_['text_b'] = term_['mention'] + add_text
entity_['start_idx'] = term_['range'][0]
entity_['end_idx'] = term_['range'][1] - 1
entity_['label'] = term_['attr']
idx_ += 1
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx][
'attr'] = tm_predictor_instance.predict_one_sample([entity_['text_a'], entity_['text_b']])
submit_result.append(dialogue_)
with open('../CHIP-MDCFNPC_test.jsonl', 'w', encoding="utf-8") as output_data:
for json_content in submit_result:
output_data.write(json.dumps(json_content, ensure_ascii=False) + '\n')
| 2.0625 | 2 |
example-data/ecommerce-simple/scripts/genData.py | Alicloud-Academy/big-data | 2 | 12757730 | <reponame>Alicloud-Academy/big-data
#
# ~ Fake Retail Data Generator ~
# This script generates fake item, user, and order data for demonstration purposes
#
# Author: <NAME>
# Created: 2020-11-04
# Updated: 2021-03-15
#
import names, random
userNum = 1000 # Generate 1000 users
orderNum = 10000 # Generate 10000 orders
productNum = 100 # Generate 100 products
#
# Generate users
#
# Pull in list of country names and country codes, which we will
# select from at random when creating new users
f = open('countries.csv', 'r')
countries = [x.split(',') for x in f.read().split('\n')]
f.close()
# Open CSV file to store user data
f = open('users.csv','w')
f.write('user_id,name,age,sex,country,country_code\n') # Header row for CSV file
for i in range(0,userNum):
# Generate user ID (sequential, from 0 to N)
user_id = str(i)
# Randomly select user's age
age = random.randint(18,80)
# Choose user's gender, and generate a name that matches the
# selected gender
if random.random() > 0.5:
sex = 'M'
name = names.get_full_name(gender='male')
else:
sex = 'F'
name = names.get_full_name(gender='female')
# Choose random country of origin for user
index = random.randint(1,len(countries)-1)
location = countries[index][0]
country_code = countries[index][1]
# Write user data out to new row in CSV file
f.write('{},{},{},{},{},{}\n'.format(user_id, name, age, sex, location, country_code))
f.close()
#
# Generate product data
#
def fakename():
# Create a fake, IKEA-style product name by mixing random vowels and consonants
# according to the pattern "CVCCVC"
vowels = 'aeiou'
consonants = 'bcdfghjklmnpqrstvwxyz'
lc = len(consonants) - 1
lv = len(vowels) - 1
product = ''.join([
consonants[random.randint(0,lc)],
vowels[random.randint(0,lv)],
consonants[random.randint(0,lc)],
consonants[random.randint(0,lc)],
vowels[random.randint(0,lv)],
consonants[random.randint(0,lc)]
])
return product.title()
f = open('products.csv', 'w')
f.write('product_id,product_name,price\n') # Header row for CSV file
for i in range(0,productNum):
# Generate ID, name, and price for each product
product_id = str(i)
product_name = fakename()
price = round(random.random() * 1000, 2)
# Write product info out to new line in CSV file
f.write('{},{},{}\n'.format(product_id,product_name,price))
#
# Generate orders
#
f = open('orders.csv', 'w')
f.write('order_id,year,month,product_code,quantity,user_id\n') # Header row for CSV file
for i in range(0,orderNum):
# Generate order ID (sequential, 0 to N)
order_id = str(i)
# Generate year and month for order
year = str(random.randint(2018,2020))
month = str(random.randint(1,12))
# Generate product code, purchase quantity, and user ID of purchaser
# NOTE: "product code" and "user ID" need to fall inside the ranges
# specified in the "for" loops above, as we may want to use this demo data
# to demonstrate full table JOIN operations, which will require a 1-to-1 match
# between users, products, and orders.
product_code = random.randint(0,productNum - 1)
quantity = random.randint(1,10)
user_id = random.randint(0,userNum - 1)
# Write order information to a new line in the CSV file
f.write('{},{},{},{},{},{}\n'.format(order_id, year, month, product_code, quantity, user_id))
f.close()
| 3.671875 | 4 |
libguides/2_tokenize_and_vectorize.py | dlacy/spark_playground | 0 | 12757731 | from pyspark.ml.feature import HashingTF, IDF, Tokenizer, StopWordsRemover, CountVectorizer, RegexTokenizer, Word2Vec
from pyspark.sql import SparkSession
from pyspark.ml.clustering import LDA
spark = SparkSession.builder.appName("tokenizer").getOrCreate()
# Loads data.
raw = spark.read.load("data/libguides_txt.parquet")
# Purge Null / None datas
nullified = raw.na.drop()
guidesDFrame = nullified.select("guide_id","guide_name","page_id","page_name","words")
tokenizer = Tokenizer(inputCol="words", outputCol="word_tokens")
TokenizerData = tokenizer.transform(guidesDFrame)
guidesDFrame = TokenizerData
# Remove Stop Words
# https://spark.apache.org/docs/latest/ml-features.html#stopwordsremover
remover = StopWordsRemover(inputCol="word_tokens", outputCol="stop_removed")
my_sw = ["guide", "books", "database", "meta", "results", "https", "login", "updated", "david", "dillard", "use", "guide", "www", "search", "edu", "guides", "eric", "library", "find", "check", "doc", "check", "administration", "want", "ebsco", "http", "r", "f", "google", "com", "less", "tinyurl", "isbn", "call", "number", "date", "c", "paley", "temple", "research"]
sw = remover.loadDefaultStopWords("english")
remover.setStopWords(sw + my_sw)
StopWordsRemoverData = remover.transform(guidesDFrame)
guidesDFrame = StopWordsRemoverData
cv = CountVectorizer(inputCol="stop_removed", outputCol="CountVectorizer", vocabSize=1000, minDF=1.0, minTF=10.0)
transformer = cv.fit(guidesDFrame)
print(" ----------- ", transformer.vocabulary)
vacabulary = transformer.vocabulary
CountVectorizerData = transformer.transform(guidesDFrame)
guidesDFrame = CountVectorizerData
# Trains a LDA model.
lda = LDA(k=10, maxIter=15, featuresCol="CountVectorizer")
model = lda.fit(guidesDFrame)
print("------------")
model.vocabSize()
print("------------")
model.describeTopics(maxTermsPerTopic=20).show()
topics = model.describeTopics(maxTermsPerTopic=20).collect()
print(topics)
i=0
for topic in topics:
print(topic["topic"])
for word_id in topic["termIndices"]:
print(word_id, " - ", vacabulary[word_id])
print("------------")
ldaData = model.transform(guidesDFrame)
guidesDFrame = ldaData
guidesDFrame.show(10)
| 2.71875 | 3 |
tests/test_filter.py | slouchart/pyetllib | 2 | 12757732 | from unittest import TestCase, main as run_tests
from src.pyetllib.etllib import filtertruefalse
class TestFilter(TestCase):
def test_filter_1(self):
data = list(range(5))
_, evens = filtertruefalse(
lambda x: bool(x % 2),
data
)
self.assertListEqual(list(evens), [0, 2, 4])
def test_filter_2(self):
data = [10, 5, 6, 11, 21, 2, 7]
digits, nondigits = filtertruefalse(
lambda x: 0 <= x <= 9,
data
)
self.assertSetEqual(set(digits), {5, 6, 7, 2})
self.assertSetEqual(set(nondigits), {10, 11, 21})
if __name__ == '__main__':
run_tests(verbosity=2)
| 3.140625 | 3 |
cnmodel/an_model/wrapper.py | asasmal/cnmodel | 1 | 12757733 | <reponame>asasmal/cnmodel
import os
import numpy as np
from ..util import matlab_proc
_proc = None
def get_matlab():
""" Return a running MatlabProcess instance.
"""
global _proc
if _proc is None:
path = os.path.dirname(__file__)
model_path = os.path.join(path, 'model')
_proc = matlab_proc.MatlabProcess(cwd=model_path)
# Try building the model mex files
if _proc.exist('model_IHC') == 0:
print "\nCompiling MEX for auditory periphery model..."
try:
_proc('mexANmodel;')
except Exception as err:
print err.output
print ""
raise RuntimeError(
"An error occurred while compiling the auditory periphery model.\n" +
"The complete output is printed above. " +
"See cnmodel/an_model/model/readme.txt for more information.")
print "Done."
return _proc
def model_ihc(pin, CF, nrep=1, tdres=1e-5, reptime=1, cohc=1, cihc=1, species=1, **kwds):
"""
Return the output of model_IHC() from the AN model
(Zilany, Bruce, Ibrahim and Carney, 2014; requires MATLAB)
This function takes a sound waveform as input and models a single inner
hair cell (IHC). The return values is the IHC potential in volts.
Parameters
----------
pin : array
The input sound wave in Pa sampled at the rate specified by *tdres*
CF : float
The characteristic frequency of the IHC in Hz
nrep : int
The number of times to repeat the stimulus
tdres : float
The binsize in seconds, i.e., the reciprocal of the sampling rate
reptime : float
The time between stimulus repetitions in seconds.
NOTE should be equal to or longer than the duration of pin
cohc : float
The OHC scaling factor: 1 is normal OHC function;
0 is complete OHC dysfunction
cihc : float
The IHC scaling factor: 1 is normal IHC function;
0 is complete IHC dysfunction
species : int
The model species: "1" for cat, "2" for human with BM tuning from
Shera et al. (PNAS 2002), or "3" for human BM tuning from
Glasberg & Moore (Hear. Res. 1990)
"""
# make sure pin is a row vector
pin = pin.reshape(1, pin.size)
# convert all args to double, as required by model_IHC
args = [pin]
for arg in (CF, nrep, tdres, reptime, cohc, cihc, species):
if not isinstance(arg, matlab_proc.MatlabReference):
arg = float(arg)
args.append(arg)
assert reptime >= pin.size * tdres
ml = get_matlab()
fn = ml.model_IHC
fn.nargout = 1 # necessary because nargout(model_IHC) fails
return fn(*args, **kwds)
def model_synapse(vihc, CF, nrep=1, tdres=1e-5, fiberType=0, noiseType=1, implnt=1, **kwds):
"""
Return the output of model_Synapse() from the AN model
(Zilany, Bruce, Ibrahim and Carney, 2014; requires MATLAB)
This function takes an IHC voltage waveform as input (see model_ihc) and
models a synapse and spiral ganglion cell. The return values are:
* meanrate: The estimated instantaneous mean rate (incl. refractoriness)
* varrate: The estimated instantaneous variance in the discharge rate (incl. refractoriness)
* psth: The peri-stimulus time histogram
Parameters
----------
vihc : array
IHC voltage as generated by model_ihc()
CF : float
The characteristic frequency of the IHC in Hz
nrep : int
The number of times to repeat the stimulus
tdres : float
The binsize in seconds, i.e., the reciprocal of the sampling rate
fiberType : int
The type of the fiber based on spontaneous rate (SR) in spikes/s:
1 for Low SR; 2 for Medium SR; 3 for High SR
noiseType : 0 or 1
Fixed fGn (noise will be same in every simulation) or variable fGn:
"0" for fixed fGn and "1" for variable fGn
implnt : 0 or 1
"Approxiate" or "actual" implementation of the power-law functions:
"0" for approx. and "1" for actual implementation
"""
# make sure vihc is a row vector (unless it is a reference to a matlab variable)
if isinstance(vihc, np.ndarray):
vihc = vihc.reshape(1, vihc.size)
# convert all args to double, as required by model_Synapse
args = [vihc]
for arg in (CF, nrep, tdres, fiberType, noiseType, implnt):
if not isinstance(arg, matlab_proc.MatlabReference):
arg = float(arg)
args.append(arg)
ml = get_matlab()
fn = ml.model_Synapse
fn.nargout = 3 # necessary because nargout(model_IHC) fails
return fn(*args, **kwds)
def seed_rng(seed):
"""
Seed the random number generator used by model_ihc and model_synapse.
"""
seed = int(seed)
cmd = "RandStream.setGlobalStream(RandStream('mcg16807','seed',%d));" % seed
ml = get_matlab()
ml(cmd)
| 2.390625 | 2 |
example/17k/k.py | Reimenn/requests-magic | 0 | 12757734 | <reponame>Reimenn/requests-magic
""" 爬取 17k 小说网。
这里只爬 《化龙:开局菜花蛇,用我煲蛇羹?》
"""
import os
from typing import List
from lxml import etree
from requests_magic import \
Spider, Scheduler, Item, Saver, logger
from requests_magic.request import \
Request, Response
class KSpider(Spider):
def start(self):
yield Request(
url="https://www.17k.com/list/3375693.html",
callback=self.parse
)
def parse(self, response: Response, request: Request):
root = etree.HTML(response.text)
a_list = root.xpath(
r'//dl[@class="Volume"]//dd//a'
)
book = root.xpath(r'//h1[@class="Title"]/text()')[0]
for a in a_list:
href: str = a.xpath('@href')[0].strip()
chapter: str = a.xpath('./span/text()')[0].strip()
yield Request(
url=f"https://www.17k.com/{href}",
callback=self.parse_content,
tags={
'book': book,
'chapter': chapter
}
)
def parse_content(self, response: Response, request: Request):
root = etree.HTML(response.text)
result: List[str] = []
p_list = root.xpath(
'//div[@class="readAreaBox content"]/div[@class="p"]/p')
for p in p_list:
result.append(p.xpath('text()')[0])
return Item(
data={
'text': '\n\n'.join(result)
},
tags=request.tags.copy()
)
class KSaver(Saver):
def save(self, item: 'Item'):
dir = f'./{item.tags["book"]}/'
if not os.path.exists(dir):
os.makedirs(dir)
with open(
os.path.join(dir, f'{item.tags["chapter"]}.txt'),
'w',
encoding='utf-8',
) as f:
f.write(item['text'])
Scheduler(KSpider, KSaver).start()
| 2.84375 | 3 |
tests/tensorflow/quantization/test_overflow_issue.py | vshampor/nncf | 1 | 12757735 | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import numpy as np
import tensorflow as tf
from nncf.tensorflow.layers.wrapper import NNCFWrapper
from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS
from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec
from nncf.tensorflow.quantization.quantizers import QuantizerConfig
from nncf.tensorflow.quantization.quantizers import Quantizer
from nncf.tensorflow.quantization.utils import apply_overflow_fix_to_layer
from nncf.common.quantization.structs import QuantizationMode
DIM_SPLIT = 1000
EPS = 1e-6
def check_quantized_values_equals(y_train, y_val, eps, range_len, narrow_range):
diff = np.abs(y_val - y_train)
if np.max(diff) > eps:
# If any point gets in really close to the middle of the quant
# it can changes its quant due to rounding error
outlayers = diff[diff > eps]
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(outlayers - quant_len) < eps).all(), 'Quants are completely different'
assert False, 'Some values moved to the neighbor quant, possibly due to this values gets in ' \
'really close to the middle of the quant. ' \
f'Position of values: {np.where(diff > eps)[0].tolist()}'
@pytest.mark.parametrize('bits,low,range_,narrow_range,ref',
[(7, -1, 2, False, -128 / 127),
(7, -2, 2, True, -2)], ids=['full_range', 'narrow_range'])
def test_min_adj(bits, low, range_, narrow_range, ref):
res = Quantizer._min_adj(bits, low, range_, narrow_range).numpy() # pylint: disable=protected-access
assert abs(res - ref) < EPS
def get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points):
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
if low > EPS:
# Range greater than zero
mid_points = [(i + 1 / 2) * quant_len for i in range(127)]
elif low + range_len < EPS:
# Range lower than zero
mid_points = [-(i + 1 / 2) * quant_len for i in range(127)]
else:
# Range with zero
min_adj = Quantizer._min_adj(7, low, range_len, narrow_range).numpy() # pylint: disable=protected-access
mid_points = [min_adj + (i + 1 / 2) * quant_len for i in range(127)]
new_w = mid_points * int(np.round(0.5 + DIM_SPLIT / 128))
new_w = tf.reshape(tf.constant(new_w[:DIM_SPLIT], dtype=tf.float32), (1, -1))
else:
new_w = tf.reshape(tf.constant(
np.linspace(low - 0.5, low + range_len + 0.5, DIM_SPLIT),
dtype=tf.float32), (1, -1))
return new_w
@pytest.mark.parametrize('per_ch', [False, True], ids=['per_tensor', 'per_channel'])
@pytest.mark.parametrize('init_w_as_middle_points', [False, True], ids=['', 'middle_points'])
@pytest.mark.parametrize('narrow_range', [False, True], ids=['full_range', 'narrow_range'])
class TestQuantizedWeightsEqualAfterFixApplied:
@pytest.mark.parametrize('signedness_to_force', [True, False], ids=['signed', 'unsigned'])
def test_symmetric_quantized_weights_equal_after_fix_applied(self, per_ch, signedness_to_force,
init_w_as_middle_points, narrow_range):
qconfig = QuantizerConfig(
num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signedness_to_force=signedness_to_force,
per_channel=per_ch)
qspec = TFQuantizerSpec.from_config(
qconfig,
narrow_range=narrow_range,
half_range=True)
op_name = 'quantizer'
weight_attr = 'kernel'
layer = tf.keras.layers.Dense(DIM_SPLIT)
layer = NNCFWrapper(layer)
quantizer_cls = NNCF_QUANTIZATION_OPERATIONS.get(qspec.mode)
quantizer = quantizer_cls(op_name, qspec)
layer.registry_weight_operation(weight_attr, quantizer)
layer.build(1)
# Set layer weights
ref_signed_var = -1 if signedness_to_force else 0
ref_scale = 1
low = ref_scale * ref_signed_var
range_len = (1 - ref_signed_var) * ref_scale
new_w = get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points)
layer.get_layer_weight(weight_attr).assign(new_w)
# Check quantizer weights
ops_weights = layer.ops_weights[op_name]
assert (ops_weights['scale_var'].numpy() == ref_scale).all()
assert (ops_weights['signed_var'].numpy() == ref_signed_var).all()
w_int7 = layer(tf.ones((1, 1))).numpy()
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(np.abs(w_int7 - new_w) - quant_len / 2) < 1e-6).all(), 'Middle points calculated incorrectly'
apply_overflow_fix_to_layer(layer, 'kernel', quantizer)
assert not quantizer._half_range # pylint: disable=protected-access
w_int8 = layer(tf.ones((1, 1))).numpy()
check_quantized_values_equals(w_int7, w_int8, EPS, range_len, narrow_range)
@pytest.mark.parametrize('low,range_len', [(-1, 2), (-5, 4), (3, 2)],
ids=['zero_in_range', 'max_less_than_zero', 'low_greater_than_zero'])
def test_asymmetric_quantized_weights_equal_after_fix_applied(self, low, range_len, per_ch,
init_w_as_middle_points, narrow_range):
qconfig = QuantizerConfig(
num_bits=8,
mode=QuantizationMode.ASYMMETRIC,
per_channel=per_ch)
qspec = TFQuantizerSpec.from_config(
qconfig,
narrow_range=narrow_range,
half_range=True)
op_name = 'quantizer'
weight_attr = 'kernel'
layer = tf.keras.layers.Dense(DIM_SPLIT)
layer = NNCFWrapper(layer)
quantizer_cls = NNCF_QUANTIZATION_OPERATIONS.get(qspec.mode)
quantizer = quantizer_cls(op_name, qspec)
layer.registry_weight_operation(weight_attr, quantizer)
layer.build(1)
# Set layer weights
new_w = get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points)
layer.get_layer_weight(weight_attr).assign(new_w)
# Set quantizer weights
if per_ch:
low = tf.repeat(tf.constant([low], dtype=tf.float32), repeats=[DIM_SPLIT])
range_len = tf.repeat(tf.constant([range_len], dtype=tf.float32), repeats=[DIM_SPLIT])
ops_weights = layer.ops_weights[op_name]
ops_weights['input_low_var'].assign(low)
ops_weights['input_range_var'].assign(range_len)
w_int7 = layer(tf.ones((1, 1))).numpy()
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(np.abs(w_int7 - new_w) - quant_len / 2) < EPS).all(), 'Middle points calculated incorrectly'
apply_overflow_fix_to_layer(layer, 'kernel', quantizer)
assert not quantizer._half_range # pylint: disable=protected-access
w_int8 = layer(tf.ones((1, 1))).numpy()
check_quantized_values_equals(w_int7, w_int8, EPS, range_len, narrow_range)
| 2.0625 | 2 |
src/_mappers/exceptions.py | ditansu/mappers | 0 | 12757736 | <reponame>ditansu/mappers<gh_stars>0
class MapperError(Exception):
"""Broken mapper configuration error."""
pass
| 1.578125 | 2 |
example_main.py | tdj-lomf/cpie | 0 | 12757737 | <filename>example_main.py
from cpie import CPie
def sphere(x):
return sum(xi*xi for xi in x)
def objective_func(x):
return min(sphere(x-2)+0.1, 10*sphere(x+2), sphere(x+4)+0.05)
if __name__ == "__main__":
dimension = 2
bounds_min = [-10.0] * dimension
bounds_max = [ 10.0] * dimension
cpie = CPie(bounds_min, bounds_max, Ns=7*dimension)
for i in range(2000):
solution = cpie.sample()
f_value = objective_func(solution)
cpie.update(f_value)
cpie.print()
print("global best x", cpie.best.x)
print("global best f", cpie.best.f)
bests = cpie.get_bests()
for i, b in enumerate(bests):
print("mode", i, " f", b.f)
| 2.859375 | 3 |
src/ks33requests/schemas/s3_api.py | tanbro/ks33requests | 1 | 12757738 | <filename>src/ks33requests/schemas/s3_api.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Mon Jun 10 11:49:52 2019 by generateDS.py version 2.32.0.
# Python 3.6.7 (default, Oct 22 2018, 11:32:17) [GCC 8.2.0]
#
# Command line options:
# ('-f', '')
# ('-o', 's3_api.py')
# ('-s', 's3_sub.py')
# ('--super', 's3_api')
#
# Command line arguments:
# schemas/AmazonS3.xsd
#
# Command line:
# generateDS.py -f -o "s3_api.py" -s "s3_sub.py" --super="s3_api" schemas/AmazonS3.xsd
#
# Current working directory (os.getcwd()):
# ks33requests
#
import base64
import datetime as datetime_
import decimal as decimal_
import os
import re as re_
import sys
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for an example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
# Additionally, the generatedsnamespaces module can contain a python
# dictionary named GenerateDSNamespaceTypePrefixes that associates element
# types with the namespace prefixes that are to be added to the
# "xsi:type" attribute value. See the exportAttributes method of
# any generated element type and the generation of "xsi:type" for an
# example of the use of this table.
# An example table:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceTypePrefixes = {
# "ElementtypeC": "aaa:",
# "ElementtypeD": "bbb:",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
try:
from generatedsnamespaces import GenerateDSNamespaceTypePrefixes as GenerateDSNamespaceTypePrefixes_
except ImportError:
GenerateDSNamespaceTypePrefixes_ = {}
#
# The super-class for enum types
#
try:
from enum import Enum
except ImportError:
Enum = object
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_parse_string(self, input_data, node=None, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_parse_integer(self, input_data, node=None, input_name=''):
try:
ival = int(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'requires integer: %s' % exp)
return ival
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_parse_float(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'requires float or double: %s' % exp)
return fval_
def gds_validate_float(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return value
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_decimal(self, input_data, input_name=''):
return ('%0.10f' % input_data).rstrip('0')
def gds_parse_decimal(self, input_data, node=None, input_name=''):
try:
decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return input_data
def gds_validate_decimal(self, input_data, node=None, input_name=''):
try:
value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return value
def gds_format_decimal_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_decimal_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of decimal values')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_parse_double(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'requires float or double: %s' % exp)
return fval_
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_parse_boolean(self, input_data, node=None, input_name=''):
if input_data in ('true', '1'):
bval = True
elif input_data in ('false', '0'):
bval = False
else:
raise_parse_error(node, 'requires boolean')
return bval
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0',):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"),)
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
content = etree_.tostring(node, encoding="unicode")
return content
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
# Django ETL transform hooks.
def gds_djo_etl_transform(self):
pass
def gds_djo_etl_transform_db_obj(self, dbobj):
pass
# SQLAlchemy ETL transform hooks.
def gds_sqa_etl_transform(self):
return 0, None
def gds_sqa_etl_transform_db_obj(self, dbobj):
pass
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name,))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if node is not None:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline,)
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name_=name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_data_type(self, data_type):
self.data_type = data_type
def get_data_type_chain(self):
return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container):
self.container = container
def get_container(self):
return self.container
def set_child_attrs(self, child_attrs):
self.child_attrs = child_attrs
def get_child_attrs(self):
return self.child_attrs
def set_choice(self, choice):
self.choice = choice
def get_choice(self):
return self.choice
def set_optional(self, optional):
self.optional = optional
def get_optional(self):
return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class MetadataDirective(Enum):
COPY = 'COPY'
REPLACE = 'REPLACE'
class MfaDeleteStatus(Enum):
ENABLED = 'Enabled'
DISABLED = 'Disabled'
class Payer(Enum):
BUCKET_OWNER = 'BucketOwner'
REQUESTER = 'Requester'
class Permission(Enum):
READ = 'READ'
WRITE = 'WRITE'
READ_ACP = 'READ_ACP'
WRITE_ACP = 'WRITE_ACP'
FULL_CONTROL = 'FULL_CONTROL'
class StorageClass(Enum):
STANDARD = 'STANDARD'
REDUCED_REDUNDANCY = 'REDUCED_REDUNDANCY'
GLACIER = 'GLACIER'
UNKNOWN = 'UNKNOWN'
class VersioningStatus(Enum):
ENABLED = 'Enabled'
SUSPENDED = 'Suspended'
class CreateBucket(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
**kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.AccessControlList = AccessControlList
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CreateBucket)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CreateBucket.subclass:
return CreateBucket.subclass(*args_, **kwargs_)
else:
return CreateBucket(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_AccessControlList(self):
return self.AccessControlList
def set_AccessControlList(self, AccessControlList):
self.AccessControlList = AccessControlList
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def hasContent_(self):
if (
self.Bucket is not None or
self.AccessControlList is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CreateBucket',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CreateBucket')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CreateBucket')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CreateBucket', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CreateBucket'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CreateBucket',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.AccessControlList is not None:
self.AccessControlList.export(outfile, level, namespaceprefix_, namespacedef_='', name_='AccessControlList',
pretty_print=pretty_print)
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'AccessControlList':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.AccessControlList = obj_
obj_.original_tagname_ = 'AccessControlList'
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
# end class CreateBucket
class MetadataEntry(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Name=None, Value=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Name = Name
self.Value = Value
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MetadataEntry)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MetadataEntry.subclass:
return MetadataEntry.subclass(*args_, **kwargs_)
else:
return MetadataEntry(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Name(self):
return self.Name
def set_Name(self, Name):
self.Name = Name
def get_Value(self):
return self.Value
def set_Value(self, Value):
self.Value = Value
def hasContent_(self):
if (
self.Name is not None or
self.Value is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='MetadataEntry',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MetadataEntry')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MetadataEntry')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='MetadataEntry', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MetadataEntry'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='MetadataEntry',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sName>%s</%sName>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Name), input_name='Name')),
namespaceprefix_, eol_))
if self.Value is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sValue>%s</%sValue>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Value), input_name='Value')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Name')
value_ = self.gds_validate_string(value_, node, 'Name')
self.Name = value_
elif nodeName_ == 'Value':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Value')
value_ = self.gds_validate_string(value_, node, 'Value')
self.Value = value_
# end class MetadataEntry
class CreateBucketResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, CreateBucketReturn=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.CreateBucketReturn = CreateBucketReturn
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CreateBucketResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CreateBucketResponse.subclass:
return CreateBucketResponse.subclass(*args_, **kwargs_)
else:
return CreateBucketResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CreateBucketReturn(self):
return self.CreateBucketReturn
def set_CreateBucketReturn(self, CreateBucketReturn):
self.CreateBucketReturn = CreateBucketReturn
def hasContent_(self):
if (
self.CreateBucketReturn is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CreateBucketResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CreateBucketResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CreateBucketResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CreateBucketResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CreateBucketResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='CreateBucketResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.CreateBucketReturn is not None:
self.CreateBucketReturn.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='CreateBucketReturn', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CreateBucketReturn':
obj_ = CreateBucketResult.factory(parent_object_=self)
obj_.build(child_)
self.CreateBucketReturn = obj_
obj_.original_tagname_ = 'CreateBucketReturn'
# end class CreateBucketResponse
class Status(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Code=None, Description=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Code = Code
self.Description = Description
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Status)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Status.subclass:
return Status.subclass(*args_, **kwargs_)
else:
return Status(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Code(self):
return self.Code
def set_Code(self, Code):
self.Code = Code
def get_Description(self):
return self.Description
def set_Description(self, Description):
self.Description = Description
def hasContent_(self):
if (
self.Code is not None or
self.Description is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Status', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Status')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Status')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='Status', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Status'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Status',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Code is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCode>%s</%sCode>%s' % (
namespaceprefix_, self.gds_format_integer(self.Code, input_name='Code'), namespaceprefix_, eol_))
if self.Description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDescription>%s</%sDescription>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Description), input_name='Description')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Code' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Code')
ival_ = self.gds_validate_integer(ival_, node, 'Code')
self.Code = ival_
elif nodeName_ == 'Description':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Description')
value_ = self.gds_validate_string(value_, node, 'Description')
self.Description = value_
# end class Status
class Result(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Status=None, extensiontype_=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Status = Status
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Result)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Result.subclass:
return Result.subclass(*args_, **kwargs_)
else:
return Result(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Status(self):
return self.Status
def set_Status(self, Status):
self.Status = Status
def get_extensiontype_(self):
return self.extensiontype_
def set_extensiontype_(self, extensiontype_):
self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.Status is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Result', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Result')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Result')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='Result', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Result'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if ":" not in self.extensiontype_:
imported_ns_type_prefix_ = GenerateDSNamespaceTypePrefixes_.get(self.extensiontype_, '')
outfile.write(' xsi:type="%s%s"' % (imported_ns_type_prefix_, self.extensiontype_))
else:
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Result',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Status is not None:
self.Status.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Status',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Status':
obj_ = Status.factory(parent_object_=self)
obj_.build(child_)
self.Status = obj_
obj_.original_tagname_ = 'Status'
# end class Result
class CreateBucketResult(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, BucketName=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.BucketName = BucketName
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CreateBucketResult)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CreateBucketResult.subclass:
return CreateBucketResult.subclass(*args_, **kwargs_)
else:
return CreateBucketResult(*args_, **kwargs_)
factory = staticmethod(factory)
def get_BucketName(self):
return self.BucketName
def set_BucketName(self, BucketName):
self.BucketName = BucketName
def hasContent_(self):
if (
self.BucketName is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CreateBucketResult',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CreateBucketResult')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CreateBucketResult')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CreateBucketResult',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CreateBucketResult'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CreateBucketResult',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.BucketName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucketName>%s</%sBucketName>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.BucketName), input_name='BucketName')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'BucketName':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'BucketName')
value_ = self.gds_validate_string(value_, node, 'BucketName')
self.BucketName = value_
# end class CreateBucketResult
class DeleteBucket(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, DeleteBucket)
if subclass is not None:
return subclass(*args_, **kwargs_)
if DeleteBucket.subclass:
return DeleteBucket.subclass(*args_, **kwargs_)
else:
return DeleteBucket(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteBucket',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DeleteBucket')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DeleteBucket')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='DeleteBucket', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DeleteBucket'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteBucket',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class DeleteBucket
class DeleteBucketResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, DeleteBucketResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.DeleteBucketResponse = DeleteBucketResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, DeleteBucketResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if DeleteBucketResponse.subclass:
return DeleteBucketResponse.subclass(*args_, **kwargs_)
else:
return DeleteBucketResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DeleteBucketResponse(self):
return self.DeleteBucketResponse
def set_DeleteBucketResponse(self, DeleteBucketResponse):
self.DeleteBucketResponse = DeleteBucketResponse
def hasContent_(self):
if (
self.DeleteBucketResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteBucketResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DeleteBucketResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DeleteBucketResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='DeleteBucketResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DeleteBucketResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='DeleteBucketResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.DeleteBucketResponse is not None:
self.DeleteBucketResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='DeleteBucketResponse', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DeleteBucketResponse':
obj_ = Status.factory(parent_object_=self)
obj_.build(child_)
self.DeleteBucketResponse = obj_
obj_.original_tagname_ = 'DeleteBucketResponse'
# end class DeleteBucketResponse
class BucketLoggingStatus(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, LoggingEnabled=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.LoggingEnabled = LoggingEnabled
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, BucketLoggingStatus)
if subclass is not None:
return subclass(*args_, **kwargs_)
if BucketLoggingStatus.subclass:
return BucketLoggingStatus.subclass(*args_, **kwargs_)
else:
return BucketLoggingStatus(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LoggingEnabled(self):
return self.LoggingEnabled
def set_LoggingEnabled(self, LoggingEnabled):
self.LoggingEnabled = LoggingEnabled
def hasContent_(self):
if (
self.LoggingEnabled is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='BucketLoggingStatus',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('BucketLoggingStatus')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='BucketLoggingStatus')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='BucketLoggingStatus',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='BucketLoggingStatus'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='BucketLoggingStatus',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.LoggingEnabled is not None:
self.LoggingEnabled.export(outfile, level, namespaceprefix_, namespacedef_='', name_='LoggingEnabled',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'LoggingEnabled':
obj_ = LoggingSettings.factory(parent_object_=self)
obj_.build(child_)
self.LoggingEnabled = obj_
obj_.original_tagname_ = 'LoggingEnabled'
# end class BucketLoggingStatus
class LoggingSettings(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, TargetBucket=None, TargetPrefix=None, TargetGrants=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.TargetBucket = TargetBucket
self.TargetPrefix = TargetPrefix
self.TargetGrants = TargetGrants
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LoggingSettings)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LoggingSettings.subclass:
return LoggingSettings.subclass(*args_, **kwargs_)
else:
return LoggingSettings(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TargetBucket(self):
return self.TargetBucket
def set_TargetBucket(self, TargetBucket):
self.TargetBucket = TargetBucket
def get_TargetPrefix(self):
return self.TargetPrefix
def set_TargetPrefix(self, TargetPrefix):
self.TargetPrefix = TargetPrefix
def get_TargetGrants(self):
return self.TargetGrants
def set_TargetGrants(self, TargetGrants):
self.TargetGrants = TargetGrants
def hasContent_(self):
if (
self.TargetBucket is not None or
self.TargetPrefix is not None or
self.TargetGrants is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='LoggingSettings',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LoggingSettings')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='LoggingSettings')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='LoggingSettings',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='LoggingSettings'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='LoggingSettings',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.TargetBucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTargetBucket>%s</%sTargetBucket>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.TargetBucket), input_name='TargetBucket')), namespaceprefix_,
eol_))
if self.TargetPrefix is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTargetPrefix>%s</%sTargetPrefix>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.TargetPrefix), input_name='TargetPrefix')), namespaceprefix_,
eol_))
if self.TargetGrants is not None:
self.TargetGrants.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TargetGrants',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TargetBucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TargetBucket')
value_ = self.gds_validate_string(value_, node, 'TargetBucket')
self.TargetBucket = value_
elif nodeName_ == 'TargetPrefix':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TargetPrefix')
value_ = self.gds_validate_string(value_, node, 'TargetPrefix')
self.TargetPrefix = value_
elif nodeName_ == 'TargetGrants':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.TargetGrants = obj_
obj_.original_tagname_ = 'TargetGrants'
# end class LoggingSettings
class GetBucketLoggingStatus(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetBucketLoggingStatus)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetBucketLoggingStatus.subclass:
return GetBucketLoggingStatus.subclass(*args_, **kwargs_)
else:
return GetBucketLoggingStatus(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetBucketLoggingStatus',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetBucketLoggingStatus')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetBucketLoggingStatus')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetBucketLoggingStatus',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GetBucketLoggingStatus'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetBucketLoggingStatus', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class GetBucketLoggingStatus
class GetBucketLoggingStatusResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, GetBucketLoggingStatusResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.GetBucketLoggingStatusResponse = GetBucketLoggingStatusResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetBucketLoggingStatusResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetBucketLoggingStatusResponse.subclass:
return GetBucketLoggingStatusResponse.subclass(*args_, **kwargs_)
else:
return GetBucketLoggingStatusResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_GetBucketLoggingStatusResponse(self):
return self.GetBucketLoggingStatusResponse
def set_GetBucketLoggingStatusResponse(self, GetBucketLoggingStatusResponse):
self.GetBucketLoggingStatusResponse = GetBucketLoggingStatusResponse
def hasContent_(self):
if (
self.GetBucketLoggingStatusResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetBucketLoggingStatusResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetBucketLoggingStatusResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='GetBucketLoggingStatusResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetBucketLoggingStatusResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='GetBucketLoggingStatusResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetBucketLoggingStatusResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.GetBucketLoggingStatusResponse is not None:
self.GetBucketLoggingStatusResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='GetBucketLoggingStatusResponse',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'GetBucketLoggingStatusResponse':
obj_ = BucketLoggingStatus.factory(parent_object_=self)
obj_.build(child_)
self.GetBucketLoggingStatusResponse = obj_
obj_.original_tagname_ = 'GetBucketLoggingStatusResponse'
# end class GetBucketLoggingStatusResponse
class SetBucketLoggingStatus(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
BucketLoggingStatus=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
self.BucketLoggingStatus = BucketLoggingStatus
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SetBucketLoggingStatus)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SetBucketLoggingStatus.subclass:
return SetBucketLoggingStatus.subclass(*args_, **kwargs_)
else:
return SetBucketLoggingStatus(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def get_BucketLoggingStatus(self):
return self.BucketLoggingStatus
def set_BucketLoggingStatus(self, BucketLoggingStatus):
self.BucketLoggingStatus = BucketLoggingStatus
def hasContent_(self):
if (
self.Bucket is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None or
self.BucketLoggingStatus is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='SetBucketLoggingStatus',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SetBucketLoggingStatus')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SetBucketLoggingStatus')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='SetBucketLoggingStatus',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SetBucketLoggingStatus'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetBucketLoggingStatus', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
if self.BucketLoggingStatus is not None:
self.BucketLoggingStatus.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='BucketLoggingStatus', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
elif nodeName_ == 'BucketLoggingStatus':
obj_ = BucketLoggingStatus.factory(parent_object_=self)
obj_.build(child_)
self.BucketLoggingStatus = obj_
obj_.original_tagname_ = 'BucketLoggingStatus'
# end class SetBucketLoggingStatus
class SetBucketLoggingStatusResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SetBucketLoggingStatusResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SetBucketLoggingStatusResponse.subclass:
return SetBucketLoggingStatusResponse.subclass(*args_, **kwargs_)
else:
return SetBucketLoggingStatusResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetBucketLoggingStatusResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SetBucketLoggingStatusResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='SetBucketLoggingStatusResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='SetBucketLoggingStatusResponse',
pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='SetBucketLoggingStatusResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetBucketLoggingStatusResponse', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SetBucketLoggingStatusResponse
class GetObjectAccessControlPolicy(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Key=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
**kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Key = Key
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetObjectAccessControlPolicy)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetObjectAccessControlPolicy.subclass:
return GetObjectAccessControlPolicy.subclass(*args_, **kwargs_)
else:
return GetObjectAccessControlPolicy(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.Key is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetObjectAccessControlPolicy', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetObjectAccessControlPolicy')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetObjectAccessControlPolicy')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetObjectAccessControlPolicy',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='GetObjectAccessControlPolicy'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetObjectAccessControlPolicy', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class GetObjectAccessControlPolicy
class GetObjectAccessControlPolicyResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, GetObjectAccessControlPolicyResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.GetObjectAccessControlPolicyResponse = GetObjectAccessControlPolicyResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetObjectAccessControlPolicyResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetObjectAccessControlPolicyResponse.subclass:
return GetObjectAccessControlPolicyResponse.subclass(*args_, **kwargs_)
else:
return GetObjectAccessControlPolicyResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_GetObjectAccessControlPolicyResponse(self):
return self.GetObjectAccessControlPolicyResponse
def set_GetObjectAccessControlPolicyResponse(self, GetObjectAccessControlPolicyResponse):
self.GetObjectAccessControlPolicyResponse = GetObjectAccessControlPolicyResponse
def hasContent_(self):
if (
self.GetObjectAccessControlPolicyResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetObjectAccessControlPolicyResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetObjectAccessControlPolicyResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='GetObjectAccessControlPolicyResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetObjectAccessControlPolicyResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='GetObjectAccessControlPolicyResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetObjectAccessControlPolicyResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.GetObjectAccessControlPolicyResponse is not None:
self.GetObjectAccessControlPolicyResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='GetObjectAccessControlPolicyResponse',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'GetObjectAccessControlPolicyResponse':
obj_ = AccessControlPolicy.factory(parent_object_=self)
obj_.build(child_)
self.GetObjectAccessControlPolicyResponse = obj_
obj_.original_tagname_ = 'GetObjectAccessControlPolicyResponse'
# end class GetObjectAccessControlPolicyResponse
class GetBucketAccessControlPolicy(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetBucketAccessControlPolicy)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetBucketAccessControlPolicy.subclass:
return GetBucketAccessControlPolicy.subclass(*args_, **kwargs_)
else:
return GetBucketAccessControlPolicy(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetBucketAccessControlPolicy', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetBucketAccessControlPolicy')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetBucketAccessControlPolicy')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetBucketAccessControlPolicy',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='GetBucketAccessControlPolicy'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetBucketAccessControlPolicy', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class GetBucketAccessControlPolicy
class GetBucketAccessControlPolicyResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, GetBucketAccessControlPolicyResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.GetBucketAccessControlPolicyResponse = GetBucketAccessControlPolicyResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetBucketAccessControlPolicyResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetBucketAccessControlPolicyResponse.subclass:
return GetBucketAccessControlPolicyResponse.subclass(*args_, **kwargs_)
else:
return GetBucketAccessControlPolicyResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_GetBucketAccessControlPolicyResponse(self):
return self.GetBucketAccessControlPolicyResponse
def set_GetBucketAccessControlPolicyResponse(self, GetBucketAccessControlPolicyResponse):
self.GetBucketAccessControlPolicyResponse = GetBucketAccessControlPolicyResponse
def hasContent_(self):
if (
self.GetBucketAccessControlPolicyResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetBucketAccessControlPolicyResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetBucketAccessControlPolicyResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='GetBucketAccessControlPolicyResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetBucketAccessControlPolicyResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='GetBucketAccessControlPolicyResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetBucketAccessControlPolicyResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.GetBucketAccessControlPolicyResponse is not None:
self.GetBucketAccessControlPolicyResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='GetBucketAccessControlPolicyResponse',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'GetBucketAccessControlPolicyResponse':
obj_ = AccessControlPolicy.factory(parent_object_=self)
obj_.build(child_)
self.GetBucketAccessControlPolicyResponse = obj_
obj_.original_tagname_ = 'GetBucketAccessControlPolicyResponse'
# end class GetBucketAccessControlPolicyResponse
class Grantee(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, extensiontype_=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Grantee)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Grantee.subclass:
return Grantee.subclass(*args_, **kwargs_)
else:
return Grantee(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self):
return self.extensiontype_
def set_extensiontype_(self, extensiontype_):
self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Grantee', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Grantee')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Grantee')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='Grantee', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Grantee'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if ":" not in self.extensiontype_:
imported_ns_type_prefix_ = GenerateDSNamespaceTypePrefixes_.get(self.extensiontype_, '')
outfile.write(' xsi:type="%s%s"' % (imported_ns_type_prefix_, self.extensiontype_))
else:
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Grantee',
fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Grantee
class User(Grantee):
subclass = None
superclass = Grantee
def __init__(self, extensiontype_=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
super(User, self).__init__(extensiontype_, **kwargs_)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, User)
if subclass is not None:
return subclass(*args_, **kwargs_)
if User.subclass:
return User.subclass(*args_, **kwargs_)
else:
return User(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self):
return self.extensiontype_
def set_extensiontype_(self, extensiontype_):
self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
super(User, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='User', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('User')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='User')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='User', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='User'):
super(User, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='User')
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if ":" not in self.extensiontype_:
imported_ns_type_prefix_ = GenerateDSNamespaceTypePrefixes_.get(self.extensiontype_, '')
outfile.write(' xsi:type="%s%s"' % (imported_ns_type_prefix_, self.extensiontype_))
else:
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='User',
fromsubclass_=False, pretty_print=True):
super(User, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True,
pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(User, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(User, self).buildChildren(child_, node, nodeName_, True)
pass
# end class User
class AmazonCustomerByEmail(User):
subclass = None
superclass = User
def __init__(self, EmailAddress=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
super(AmazonCustomerByEmail, self).__init__(**kwargs_)
self.EmailAddress = EmailAddress
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, AmazonCustomerByEmail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if AmazonCustomerByEmail.subclass:
return AmazonCustomerByEmail.subclass(*args_, **kwargs_)
else:
return AmazonCustomerByEmail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_EmailAddress(self):
return self.EmailAddress
def set_EmailAddress(self, EmailAddress):
self.EmailAddress = EmailAddress
def hasContent_(self):
if (
self.EmailAddress is not None or
super(AmazonCustomerByEmail, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='AmazonCustomerByEmail',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AmazonCustomerByEmail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AmazonCustomerByEmail')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='AmazonCustomerByEmail',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AmazonCustomerByEmail'):
super(AmazonCustomerByEmail, self).exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='AmazonCustomerByEmail')
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='AmazonCustomerByEmail', fromsubclass_=False, pretty_print=True):
super(AmazonCustomerByEmail, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True,
pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.EmailAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sEmailAddress>%s</%sEmailAddress>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.EmailAddress), input_name='EmailAddress')), namespaceprefix_,
eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(AmazonCustomerByEmail, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'EmailAddress':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'EmailAddress')
value_ = self.gds_validate_string(value_, node, 'EmailAddress')
self.EmailAddress = value_
super(AmazonCustomerByEmail, self).buildChildren(child_, node, nodeName_, True)
# end class AmazonCustomerByEmail
class CanonicalUser(User):
subclass = None
superclass = User
def __init__(self, ID=None, DisplayName=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
super(CanonicalUser, self).__init__(**kwargs_)
self.ID = ID
self.DisplayName = DisplayName
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CanonicalUser)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CanonicalUser.subclass:
return CanonicalUser.subclass(*args_, **kwargs_)
else:
return CanonicalUser(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ID(self):
return self.ID
def set_ID(self, ID):
self.ID = ID
def get_DisplayName(self):
return self.DisplayName
def set_DisplayName(self, DisplayName):
self.DisplayName = DisplayName
def hasContent_(self):
if (
self.ID is not None or
self.DisplayName is not None or
super(CanonicalUser, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CanonicalUser',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CanonicalUser')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CanonicalUser')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CanonicalUser', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CanonicalUser'):
super(CanonicalUser, self).exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='CanonicalUser')
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CanonicalUser',
fromsubclass_=False, pretty_print=True):
super(CanonicalUser, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True,
pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sID>%s</%sID>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.ID), input_name='ID')),
namespaceprefix_, eol_))
if self.DisplayName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDisplayName>%s</%sDisplayName>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.DisplayName), input_name='DisplayName')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(CanonicalUser, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ID':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ID')
value_ = self.gds_validate_string(value_, node, 'ID')
self.ID = value_
elif nodeName_ == 'DisplayName':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'DisplayName')
value_ = self.gds_validate_string(value_, node, 'DisplayName')
self.DisplayName = value_
super(CanonicalUser, self).buildChildren(child_, node, nodeName_, True)
# end class CanonicalUser
class Group(Grantee):
subclass = None
superclass = Grantee
def __init__(self, URI=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
super(Group, self).__init__(**kwargs_)
self.URI = URI
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Group)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Group.subclass:
return Group.subclass(*args_, **kwargs_)
else:
return Group(*args_, **kwargs_)
factory = staticmethod(factory)
def get_URI(self):
return self.URI
def set_URI(self, URI):
self.URI = URI
def hasContent_(self):
if (
self.URI is not None or
super(Group, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Group', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Group')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Group')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='Group', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Group'):
super(Group, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Group')
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Group',
fromsubclass_=False, pretty_print=True):
super(Group, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True,
pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.URI is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sURI>%s</%sURI>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.URI), input_name='URI')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(Group, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'URI':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'URI')
value_ = self.gds_validate_string(value_, node, 'URI')
self.URI = value_
super(Group, self).buildChildren(child_, node, nodeName_, True)
# end class Group
class Grant(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Grantee=None, Permission=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Grantee = Grantee
self.Permission = Permission
self.validate_Permission(self.Permission)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Grant)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Grant.subclass:
return Grant.subclass(*args_, **kwargs_)
else:
return Grant(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Grantee(self):
return self.Grantee
def set_Grantee(self, Grantee):
self.Grantee = Grantee
def set_Grantee_with_type(self, value):
self.Grantee = value
value.original_tagname_ = 'Grantee'
value.extensiontype_ = value.__class__.__name__
def get_Permission(self):
return self.Permission
def set_Permission(self, Permission):
self.Permission = Permission
def validate_Permission(self, value):
# Validate type Permission, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on Permission' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.Grantee is not None or
self.Permission is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Grant', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Grant')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Grant')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='Grant', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Grant'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='Grant',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Grantee is not None:
self.Grantee.export(outfile, level, namespaceprefix_, namespacedef_='', pretty_print=pretty_print)
if self.Permission is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPermission>%s</%sPermission>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Permission), input_name='Permission')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Grantee':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <Grantee> element')
self.Grantee = obj_
obj_.original_tagname_ = 'Grantee'
elif nodeName_ == 'Permission':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Permission')
value_ = self.gds_validate_string(value_, node, 'Permission')
self.Permission = value_
# validate type Permission
self.validate_Permission(self.Permission)
# end class Grant
class AccessControlList(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Grant=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
if Grant is None:
self.Grant = []
else:
self.Grant = Grant
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, AccessControlList)
if subclass is not None:
return subclass(*args_, **kwargs_)
if AccessControlList.subclass:
return AccessControlList.subclass(*args_, **kwargs_)
else:
return AccessControlList(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Grant(self):
return self.Grant
def set_Grant(self, Grant):
self.Grant = Grant
def add_Grant(self, value):
self.Grant.append(value)
def insert_Grant_at(self, index, value):
self.Grant.insert(index, value)
def replace_Grant_at(self, index, value):
self.Grant[index] = value
def hasContent_(self):
if (
self.Grant
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='AccessControlList',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AccessControlList')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AccessControlList')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='AccessControlList',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AccessControlList'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='AccessControlList',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Grant_ in self.Grant:
Grant_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Grant', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Grant':
obj_ = Grant.factory(parent_object_=self)
obj_.build(child_)
self.Grant.append(obj_)
obj_.original_tagname_ = 'Grant'
# end class AccessControlList
class CreateBucketConfiguration(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, LocationConstraint=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.LocationConstraint = LocationConstraint
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CreateBucketConfiguration)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CreateBucketConfiguration.subclass:
return CreateBucketConfiguration.subclass(*args_, **kwargs_)
else:
return CreateBucketConfiguration(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LocationConstraint(self):
return self.LocationConstraint
def set_LocationConstraint(self, LocationConstraint):
self.LocationConstraint = LocationConstraint
def hasContent_(self):
if (
self.LocationConstraint is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CreateBucketConfiguration',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CreateBucketConfiguration')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CreateBucketConfiguration')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CreateBucketConfiguration',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='CreateBucketConfiguration'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='CreateBucketConfiguration', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.LocationConstraint is not None:
self.LocationConstraint.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='LocationConstraint', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'LocationConstraint':
obj_ = LocationConstraint.factory(parent_object_=self)
obj_.build(child_)
self.LocationConstraint = obj_
obj_.original_tagname_ = 'LocationConstraint'
# end class CreateBucketConfiguration
class LocationConstraint(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LocationConstraint)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LocationConstraint.subclass:
return LocationConstraint.subclass(*args_, **kwargs_)
else:
return LocationConstraint(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self):
return self.valueOf_
def set_valueOf_(self, valueOf_):
self.valueOf_ = valueOf_
def hasContent_(self):
if (
(1 if type(self.valueOf_) in [int, float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='LocationConstraint',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LocationConstraint')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='LocationConstraint')
if self.hasContent_():
outfile.write('>')
outfile.write(self.convert_unicode(self.valueOf_))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='LocationConstraint',
pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='LocationConstraint'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='LocationConstraint',
fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class LocationConstraint
class AccessControlPolicy(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Owner=None, AccessControlList=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Owner = Owner
self.AccessControlList = AccessControlList
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, AccessControlPolicy)
if subclass is not None:
return subclass(*args_, **kwargs_)
if AccessControlPolicy.subclass:
return AccessControlPolicy.subclass(*args_, **kwargs_)
else:
return AccessControlPolicy(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Owner(self):
return self.Owner
def set_Owner(self, Owner):
self.Owner = Owner
def get_AccessControlList(self):
return self.AccessControlList
def set_AccessControlList(self, AccessControlList):
self.AccessControlList = AccessControlList
def hasContent_(self):
if (
self.Owner is not None or
self.AccessControlList is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='AccessControlPolicy',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('AccessControlPolicy')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='AccessControlPolicy')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='AccessControlPolicy',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='AccessControlPolicy'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='AccessControlPolicy',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Owner is not None:
self.Owner.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Owner',
pretty_print=pretty_print)
if self.AccessControlList is not None:
self.AccessControlList.export(outfile, level, namespaceprefix_, namespacedef_='', name_='AccessControlList',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Owner':
obj_ = CanonicalUser.factory(parent_object_=self)
obj_.build(child_)
self.Owner = obj_
obj_.original_tagname_ = 'Owner'
elif nodeName_ == 'AccessControlList':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.AccessControlList = obj_
obj_.original_tagname_ = 'AccessControlList'
# end class AccessControlPolicy
class SetObjectAccessControlPolicy(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Key=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None,
Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Key = Key
self.AccessControlList = AccessControlList
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SetObjectAccessControlPolicy)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SetObjectAccessControlPolicy.subclass:
return SetObjectAccessControlPolicy.subclass(*args_, **kwargs_)
else:
return SetObjectAccessControlPolicy(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_AccessControlList(self):
return self.AccessControlList
def set_AccessControlList(self, AccessControlList):
self.AccessControlList = AccessControlList
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.Key is not None or
self.AccessControlList is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetObjectAccessControlPolicy', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SetObjectAccessControlPolicy')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SetObjectAccessControlPolicy')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='SetObjectAccessControlPolicy',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='SetObjectAccessControlPolicy'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetObjectAccessControlPolicy', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.AccessControlList is not None:
self.AccessControlList.export(outfile, level, namespaceprefix_, namespacedef_='', name_='AccessControlList',
pretty_print=pretty_print)
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'AccessControlList':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.AccessControlList = obj_
obj_.original_tagname_ = 'AccessControlList'
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class SetObjectAccessControlPolicy
class SetObjectAccessControlPolicyResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SetObjectAccessControlPolicyResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SetObjectAccessControlPolicyResponse.subclass:
return SetObjectAccessControlPolicyResponse.subclass(*args_, **kwargs_)
else:
return SetObjectAccessControlPolicyResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetObjectAccessControlPolicyResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SetObjectAccessControlPolicyResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='SetObjectAccessControlPolicyResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='SetObjectAccessControlPolicyResponse',
pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='SetObjectAccessControlPolicyResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetObjectAccessControlPolicyResponse', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SetObjectAccessControlPolicyResponse
class SetBucketAccessControlPolicy(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.AccessControlList = AccessControlList
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SetBucketAccessControlPolicy)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SetBucketAccessControlPolicy.subclass:
return SetBucketAccessControlPolicy.subclass(*args_, **kwargs_)
else:
return SetBucketAccessControlPolicy(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_AccessControlList(self):
return self.AccessControlList
def set_AccessControlList(self, AccessControlList):
self.AccessControlList = AccessControlList
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.AccessControlList is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetBucketAccessControlPolicy', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SetBucketAccessControlPolicy')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SetBucketAccessControlPolicy')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='SetBucketAccessControlPolicy',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='SetBucketAccessControlPolicy'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetBucketAccessControlPolicy', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.AccessControlList is not None:
self.AccessControlList.export(outfile, level, namespaceprefix_, namespacedef_='', name_='AccessControlList',
pretty_print=pretty_print)
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'AccessControlList':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.AccessControlList = obj_
obj_.original_tagname_ = 'AccessControlList'
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class SetBucketAccessControlPolicy
class SetBucketAccessControlPolicyResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SetBucketAccessControlPolicyResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SetBucketAccessControlPolicyResponse.subclass:
return SetBucketAccessControlPolicyResponse.subclass(*args_, **kwargs_)
else:
return SetBucketAccessControlPolicyResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetBucketAccessControlPolicyResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SetBucketAccessControlPolicyResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='SetBucketAccessControlPolicyResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='SetBucketAccessControlPolicyResponse',
pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='SetBucketAccessControlPolicyResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='SetBucketAccessControlPolicyResponse', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SetBucketAccessControlPolicyResponse
class GetObject(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Key=None, GetMetadata=None, GetData=None, InlineData=None, AWSAccessKeyId=None,
Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Key = Key
self.GetMetadata = GetMetadata
self.GetData = GetData
self.InlineData = InlineData
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetObject)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetObject.subclass:
return GetObject.subclass(*args_, **kwargs_)
else:
return GetObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_GetMetadata(self):
return self.GetMetadata
def set_GetMetadata(self, GetMetadata):
self.GetMetadata = GetMetadata
def get_GetData(self):
return self.GetData
def set_GetData(self, GetData):
self.GetData = GetData
def get_InlineData(self):
return self.InlineData
def set_InlineData(self, InlineData):
self.InlineData = InlineData
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.Key is not None or
self.GetMetadata is not None or
self.GetData is not None or
self.InlineData is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObject',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetObject')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetObject')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetObject', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GetObject'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObject',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.GetMetadata is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sGetMetadata>%s</%sGetMetadata>%s' % (
namespaceprefix_, self.gds_format_boolean(self.GetMetadata, input_name='GetMetadata'), namespaceprefix_,
eol_))
if self.GetData is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sGetData>%s</%sGetData>%s' % (
namespaceprefix_, self.gds_format_boolean(self.GetData, input_name='GetData'), namespaceprefix_, eol_))
if self.InlineData is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sInlineData>%s</%sInlineData>%s' % (
namespaceprefix_, self.gds_format_boolean(self.InlineData, input_name='InlineData'), namespaceprefix_,
eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'GetMetadata':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'GetMetadata')
ival_ = self.gds_validate_boolean(ival_, node, 'GetMetadata')
self.GetMetadata = ival_
elif nodeName_ == 'GetData':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'GetData')
ival_ = self.gds_validate_boolean(ival_, node, 'GetData')
self.GetData = ival_
elif nodeName_ == 'InlineData':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'InlineData')
ival_ = self.gds_validate_boolean(ival_, node, 'InlineData')
self.InlineData = ival_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class GetObject
class GetObjectResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, GetObjectResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.GetObjectResponse = GetObjectResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetObjectResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetObjectResponse.subclass:
return GetObjectResponse.subclass(*args_, **kwargs_)
else:
return GetObjectResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_GetObjectResponse(self):
return self.GetObjectResponse
def set_GetObjectResponse(self, GetObjectResponse):
self.GetObjectResponse = GetObjectResponse
def hasContent_(self):
if (
self.GetObjectResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObjectResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetObjectResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetObjectResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetObjectResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GetObjectResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObjectResponse',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.GetObjectResponse is not None:
self.GetObjectResponse.export(outfile, level, namespaceprefix_, namespacedef_='', name_='GetObjectResponse',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'GetObjectResponse':
obj_ = GetObjectResult.factory(parent_object_=self)
obj_.build(child_)
self.GetObjectResponse = obj_
obj_.original_tagname_ = 'GetObjectResponse'
# end class GetObjectResponse
class GetObjectResult(Result):
subclass = None
superclass = Result
def __init__(self, Status=None, Metadata=None, Data=None, LastModified=None, ETag=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
super(GetObjectResult, self).__init__(Status, **kwargs_)
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
self.Data = Data
if isinstance(LastModified, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(LastModified, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = LastModified
self.LastModified = initvalue_
self.ETag = ETag
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetObjectResult)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetObjectResult.subclass:
return GetObjectResult.subclass(*args_, **kwargs_)
else:
return GetObjectResult(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Metadata(self):
return self.Metadata
def set_Metadata(self, Metadata):
self.Metadata = Metadata
def add_Metadata(self, value):
self.Metadata.append(value)
def insert_Metadata_at(self, index, value):
self.Metadata.insert(index, value)
def replace_Metadata_at(self, index, value):
self.Metadata[index] = value
def get_Data(self):
return self.Data
def set_Data(self, Data):
self.Data = Data
def get_LastModified(self):
return self.LastModified
def set_LastModified(self, LastModified):
self.LastModified = LastModified
def get_ETag(self):
return self.ETag
def set_ETag(self, ETag):
self.ETag = ETag
def hasContent_(self):
if (
self.Metadata or
self.Data is not None or
self.LastModified is not None or
self.ETag is not None or
super(GetObjectResult, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObjectResult',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetObjectResult')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetObjectResult')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetObjectResult',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GetObjectResult'):
super(GetObjectResult, self).exportAttributes(outfile, level, already_processed, namespaceprefix_,
name_='GetObjectResult')
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObjectResult',
fromsubclass_=False, pretty_print=True):
super(GetObjectResult, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True,
pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Metadata',
pretty_print=pretty_print)
if self.Data is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sData>%s</%sData>%s' % (
namespaceprefix_, self.gds_format_base64(self.Data, input_name='Data'), namespaceprefix_, eol_))
if self.LastModified is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLastModified>%s</%sLastModified>%s' % (
namespaceprefix_, self.gds_format_datetime(self.LastModified, input_name='LastModified'), namespaceprefix_,
eol_))
if self.ETag is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sETag>%s</%sETag>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.ETag), input_name='ETag')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(GetObjectResult, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Metadata':
obj_ = MetadataEntry.factory(parent_object_=self)
obj_.build(child_)
self.Metadata.append(obj_)
obj_.original_tagname_ = 'Metadata'
elif nodeName_ == 'Data':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'Data')
else:
bval_ = None
self.Data = bval_
elif nodeName_ == 'LastModified':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.LastModified = dval_
elif nodeName_ == 'ETag':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ETag')
value_ = self.gds_validate_string(value_, node, 'ETag')
self.ETag = value_
super(GetObjectResult, self).buildChildren(child_, node, nodeName_, True)
# end class GetObjectResult
class GetObjectExtended(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Key=None, GetMetadata=None, GetData=None, InlineData=None, ByteRangeStart=None,
ByteRangeEnd=None, IfModifiedSince=None, IfUnmodifiedSince=None, IfMatch=None, IfNoneMatch=None,
ReturnCompleteObjectOnConditionFailure=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Key = Key
self.GetMetadata = GetMetadata
self.GetData = GetData
self.InlineData = InlineData
self.ByteRangeStart = ByteRangeStart
self.ByteRangeEnd = ByteRangeEnd
if isinstance(IfModifiedSince, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(IfModifiedSince, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = IfModifiedSince
self.IfModifiedSince = initvalue_
if isinstance(IfUnmodifiedSince, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(IfUnmodifiedSince, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = IfUnmodifiedSince
self.IfUnmodifiedSince = initvalue_
if IfMatch is None:
self.IfMatch = []
else:
self.IfMatch = IfMatch
if IfNoneMatch is None:
self.IfNoneMatch = []
else:
self.IfNoneMatch = IfNoneMatch
self.ReturnCompleteObjectOnConditionFailure = ReturnCompleteObjectOnConditionFailure
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetObjectExtended)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetObjectExtended.subclass:
return GetObjectExtended.subclass(*args_, **kwargs_)
else:
return GetObjectExtended(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_GetMetadata(self):
return self.GetMetadata
def set_GetMetadata(self, GetMetadata):
self.GetMetadata = GetMetadata
def get_GetData(self):
return self.GetData
def set_GetData(self, GetData):
self.GetData = GetData
def get_InlineData(self):
return self.InlineData
def set_InlineData(self, InlineData):
self.InlineData = InlineData
def get_ByteRangeStart(self):
return self.ByteRangeStart
def set_ByteRangeStart(self, ByteRangeStart):
self.ByteRangeStart = ByteRangeStart
def get_ByteRangeEnd(self):
return self.ByteRangeEnd
def set_ByteRangeEnd(self, ByteRangeEnd):
self.ByteRangeEnd = ByteRangeEnd
def get_IfModifiedSince(self):
return self.IfModifiedSince
def set_IfModifiedSince(self, IfModifiedSince):
self.IfModifiedSince = IfModifiedSince
def get_IfUnmodifiedSince(self):
return self.IfUnmodifiedSince
def set_IfUnmodifiedSince(self, IfUnmodifiedSince):
self.IfUnmodifiedSince = IfUnmodifiedSince
def get_IfMatch(self):
return self.IfMatch
def set_IfMatch(self, IfMatch):
self.IfMatch = IfMatch
def add_IfMatch(self, value):
self.IfMatch.append(value)
def insert_IfMatch_at(self, index, value):
self.IfMatch.insert(index, value)
def replace_IfMatch_at(self, index, value):
self.IfMatch[index] = value
def get_IfNoneMatch(self):
return self.IfNoneMatch
def set_IfNoneMatch(self, IfNoneMatch):
self.IfNoneMatch = IfNoneMatch
def add_IfNoneMatch(self, value):
self.IfNoneMatch.append(value)
def insert_IfNoneMatch_at(self, index, value):
self.IfNoneMatch.insert(index, value)
def replace_IfNoneMatch_at(self, index, value):
self.IfNoneMatch[index] = value
def get_ReturnCompleteObjectOnConditionFailure(self):
return self.ReturnCompleteObjectOnConditionFailure
def set_ReturnCompleteObjectOnConditionFailure(self, ReturnCompleteObjectOnConditionFailure):
self.ReturnCompleteObjectOnConditionFailure = ReturnCompleteObjectOnConditionFailure
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.Key is not None or
self.GetMetadata is not None or
self.GetData is not None or
self.InlineData is not None or
self.ByteRangeStart is not None or
self.ByteRangeEnd is not None or
self.IfModifiedSince is not None or
self.IfUnmodifiedSince is not None or
self.IfMatch or
self.IfNoneMatch or
self.ReturnCompleteObjectOnConditionFailure is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObjectExtended',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetObjectExtended')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetObjectExtended')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetObjectExtended',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GetObjectExtended'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObjectExtended',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.GetMetadata is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sGetMetadata>%s</%sGetMetadata>%s' % (
namespaceprefix_, self.gds_format_boolean(self.GetMetadata, input_name='GetMetadata'), namespaceprefix_,
eol_))
if self.GetData is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sGetData>%s</%sGetData>%s' % (
namespaceprefix_, self.gds_format_boolean(self.GetData, input_name='GetData'), namespaceprefix_, eol_))
if self.InlineData is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sInlineData>%s</%sInlineData>%s' % (
namespaceprefix_, self.gds_format_boolean(self.InlineData, input_name='InlineData'), namespaceprefix_,
eol_))
if self.ByteRangeStart is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sByteRangeStart>%s</%sByteRangeStart>%s' % (
namespaceprefix_, self.gds_format_integer(self.ByteRangeStart, input_name='ByteRangeStart'),
namespaceprefix_, eol_))
if self.ByteRangeEnd is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sByteRangeEnd>%s</%sByteRangeEnd>%s' % (
namespaceprefix_, self.gds_format_integer(self.ByteRangeEnd, input_name='ByteRangeEnd'), namespaceprefix_,
eol_))
if self.IfModifiedSince is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIfModifiedSince>%s</%sIfModifiedSince>%s' % (
namespaceprefix_, self.gds_format_datetime(self.IfModifiedSince, input_name='IfModifiedSince'),
namespaceprefix_, eol_))
if self.IfUnmodifiedSince is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIfUnmodifiedSince>%s</%sIfUnmodifiedSince>%s' % (
namespaceprefix_, self.gds_format_datetime(self.IfUnmodifiedSince, input_name='IfUnmodifiedSince'),
namespaceprefix_, eol_))
for IfMatch_ in self.IfMatch:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIfMatch>%s</%sIfMatch>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(IfMatch_), input_name='IfMatch')),
namespaceprefix_, eol_))
for IfNoneMatch_ in self.IfNoneMatch:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIfNoneMatch>%s</%sIfNoneMatch>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(IfNoneMatch_), input_name='IfNoneMatch')), namespaceprefix_, eol_))
if self.ReturnCompleteObjectOnConditionFailure is not None:
showIndent(outfile, level, pretty_print)
outfile.write(
'<%sReturnCompleteObjectOnConditionFailure>%s</%sReturnCompleteObjectOnConditionFailure>%s' % (
namespaceprefix_, self.gds_format_boolean(self.ReturnCompleteObjectOnConditionFailure,
input_name='ReturnCompleteObjectOnConditionFailure'),
namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'GetMetadata':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'GetMetadata')
ival_ = self.gds_validate_boolean(ival_, node, 'GetMetadata')
self.GetMetadata = ival_
elif nodeName_ == 'GetData':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'GetData')
ival_ = self.gds_validate_boolean(ival_, node, 'GetData')
self.GetData = ival_
elif nodeName_ == 'InlineData':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'InlineData')
ival_ = self.gds_validate_boolean(ival_, node, 'InlineData')
self.InlineData = ival_
elif nodeName_ == 'ByteRangeStart' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'ByteRangeStart')
ival_ = self.gds_validate_integer(ival_, node, 'ByteRangeStart')
self.ByteRangeStart = ival_
elif nodeName_ == 'ByteRangeEnd' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'ByteRangeEnd')
ival_ = self.gds_validate_integer(ival_, node, 'ByteRangeEnd')
self.ByteRangeEnd = ival_
elif nodeName_ == 'IfModifiedSince':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.IfModifiedSince = dval_
elif nodeName_ == 'IfUnmodifiedSince':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.IfUnmodifiedSince = dval_
elif nodeName_ == 'IfMatch':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'IfMatch')
value_ = self.gds_validate_string(value_, node, 'IfMatch')
self.IfMatch.append(value_)
elif nodeName_ == 'IfNoneMatch':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'IfNoneMatch')
value_ = self.gds_validate_string(value_, node, 'IfNoneMatch')
self.IfNoneMatch.append(value_)
elif nodeName_ == 'ReturnCompleteObjectOnConditionFailure':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'ReturnCompleteObjectOnConditionFailure')
ival_ = self.gds_validate_boolean(ival_, node, 'ReturnCompleteObjectOnConditionFailure')
self.ReturnCompleteObjectOnConditionFailure = ival_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class GetObjectExtended
class GetObjectExtendedResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, GetObjectResponse=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.GetObjectResponse = GetObjectResponse
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetObjectExtendedResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetObjectExtendedResponse.subclass:
return GetObjectExtendedResponse.subclass(*args_, **kwargs_)
else:
return GetObjectExtendedResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_GetObjectResponse(self):
return self.GetObjectResponse
def set_GetObjectResponse(self, GetObjectResponse):
self.GetObjectResponse = GetObjectResponse
def hasContent_(self):
if (
self.GetObjectResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='GetObjectExtendedResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetObjectExtendedResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetObjectExtendedResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='GetObjectExtendedResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='GetObjectExtendedResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='GetObjectExtendedResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.GetObjectResponse is not None:
self.GetObjectResponse.export(outfile, level, namespaceprefix_, namespacedef_='', name_='GetObjectResponse',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'GetObjectResponse':
obj_ = GetObjectResult.factory(parent_object_=self)
obj_.build(child_)
self.GetObjectResponse = obj_
obj_.original_tagname_ = 'GetObjectResponse'
# end class GetObjectExtendedResponse
class PutObject(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Key=None, Metadata=None, ContentLength=None, AccessControlList=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Key = Key
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
self.ContentLength = ContentLength
self.AccessControlList = AccessControlList
self.StorageClass = StorageClass
self.validate_StorageClass(self.StorageClass)
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PutObject)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PutObject.subclass:
return PutObject.subclass(*args_, **kwargs_)
else:
return PutObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_Metadata(self):
return self.Metadata
def set_Metadata(self, Metadata):
self.Metadata = Metadata
def add_Metadata(self, value):
self.Metadata.append(value)
def insert_Metadata_at(self, index, value):
self.Metadata.insert(index, value)
def replace_Metadata_at(self, index, value):
self.Metadata[index] = value
def get_ContentLength(self):
return self.ContentLength
def set_ContentLength(self, ContentLength):
self.ContentLength = ContentLength
def get_AccessControlList(self):
return self.AccessControlList
def set_AccessControlList(self, AccessControlList):
self.AccessControlList = AccessControlList
def get_StorageClass(self):
return self.StorageClass
def set_StorageClass(self, StorageClass):
self.StorageClass = StorageClass
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def validate_StorageClass(self, value):
# Validate type StorageClass, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['STANDARD', 'REDUCED_REDUNDANCY', 'GLACIER', 'UNKNOWN']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on StorageClass' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.Bucket is not None or
self.Key is not None or
self.Metadata or
self.ContentLength is not None or
self.AccessControlList is not None or
self.StorageClass is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObject',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PutObject')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PutObject')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='PutObject', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PutObject'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObject',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Metadata',
pretty_print=pretty_print)
if self.ContentLength is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sContentLength>%s</%sContentLength>%s' % (
namespaceprefix_, self.gds_format_integer(self.ContentLength, input_name='ContentLength'), namespaceprefix_,
eol_))
if self.AccessControlList is not None:
self.AccessControlList.export(outfile, level, namespaceprefix_, namespacedef_='', name_='AccessControlList',
pretty_print=pretty_print)
if self.StorageClass is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sStorageClass>%s</%sStorageClass>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.StorageClass), input_name='StorageClass')), namespaceprefix_,
eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'Metadata':
obj_ = MetadataEntry.factory(parent_object_=self)
obj_.build(child_)
self.Metadata.append(obj_)
obj_.original_tagname_ = 'Metadata'
elif nodeName_ == 'ContentLength' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'ContentLength')
ival_ = self.gds_validate_integer(ival_, node, 'ContentLength')
self.ContentLength = ival_
elif nodeName_ == 'AccessControlList':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.AccessControlList = obj_
obj_.original_tagname_ = 'AccessControlList'
elif nodeName_ == 'StorageClass':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'StorageClass')
value_ = self.gds_validate_string(value_, node, 'StorageClass')
self.StorageClass = value_
# validate type StorageClass
self.validate_StorageClass(self.StorageClass)
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class PutObject
class PutObjectResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, PutObjectResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.PutObjectResponse = PutObjectResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PutObjectResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PutObjectResponse.subclass:
return PutObjectResponse.subclass(*args_, **kwargs_)
else:
return PutObjectResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_PutObjectResponse(self):
return self.PutObjectResponse
def set_PutObjectResponse(self, PutObjectResponse):
self.PutObjectResponse = PutObjectResponse
def hasContent_(self):
if (
self.PutObjectResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObjectResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PutObjectResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PutObjectResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='PutObjectResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PutObjectResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObjectResponse',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PutObjectResponse is not None:
self.PutObjectResponse.export(outfile, level, namespaceprefix_, namespacedef_='', name_='PutObjectResponse',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'PutObjectResponse':
obj_ = PutObjectResult.factory(parent_object_=self)
obj_.build(child_)
self.PutObjectResponse = obj_
obj_.original_tagname_ = 'PutObjectResponse'
# end class PutObjectResponse
class PutObjectResult(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ETag=None, LastModified=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ETag = ETag
if isinstance(LastModified, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(LastModified, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = LastModified
self.LastModified = initvalue_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PutObjectResult)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PutObjectResult.subclass:
return PutObjectResult.subclass(*args_, **kwargs_)
else:
return PutObjectResult(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ETag(self):
return self.ETag
def set_ETag(self, ETag):
self.ETag = ETag
def get_LastModified(self):
return self.LastModified
def set_LastModified(self, LastModified):
self.LastModified = LastModified
def hasContent_(self):
if (
self.ETag is not None or
self.LastModified is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObjectResult',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PutObjectResult')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PutObjectResult')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='PutObjectResult',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PutObjectResult'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObjectResult',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ETag is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sETag>%s</%sETag>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.ETag), input_name='ETag')),
namespaceprefix_, eol_))
if self.LastModified is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLastModified>%s</%sLastModified>%s' % (
namespaceprefix_, self.gds_format_datetime(self.LastModified, input_name='LastModified'), namespaceprefix_,
eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ETag':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ETag')
value_ = self.gds_validate_string(value_, node, 'ETag')
self.ETag = value_
elif nodeName_ == 'LastModified':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.LastModified = dval_
# end class PutObjectResult
class PutObjectInline(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Key=None, Metadata=None, Data=None, ContentLength=None, AccessControlList=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Key = Key
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
self.Data = Data
self.ContentLength = ContentLength
self.AccessControlList = AccessControlList
self.StorageClass = StorageClass
self.validate_StorageClass(self.StorageClass)
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PutObjectInline)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PutObjectInline.subclass:
return PutObjectInline.subclass(*args_, **kwargs_)
else:
return PutObjectInline(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_Metadata(self):
return self.Metadata
def set_Metadata(self, Metadata):
self.Metadata = Metadata
def add_Metadata(self, value):
self.Metadata.append(value)
def insert_Metadata_at(self, index, value):
self.Metadata.insert(index, value)
def replace_Metadata_at(self, index, value):
self.Metadata[index] = value
def get_Data(self):
return self.Data
def set_Data(self, Data):
self.Data = Data
def get_ContentLength(self):
return self.ContentLength
def set_ContentLength(self, ContentLength):
self.ContentLength = ContentLength
def get_AccessControlList(self):
return self.AccessControlList
def set_AccessControlList(self, AccessControlList):
self.AccessControlList = AccessControlList
def get_StorageClass(self):
return self.StorageClass
def set_StorageClass(self, StorageClass):
self.StorageClass = StorageClass
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def validate_StorageClass(self, value):
# Validate type StorageClass, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['STANDARD', 'REDUCED_REDUNDANCY', 'GLACIER', 'UNKNOWN']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on StorageClass' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.Bucket is not None or
self.Key is not None or
self.Metadata or
self.Data is not None or
self.ContentLength is not None or
self.AccessControlList is not None or
self.StorageClass is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObjectInline',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PutObjectInline')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PutObjectInline')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='PutObjectInline',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PutObjectInline'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObjectInline',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Metadata',
pretty_print=pretty_print)
if self.Data is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sData>%s</%sData>%s' % (
namespaceprefix_, self.gds_format_base64(self.Data, input_name='Data'), namespaceprefix_, eol_))
if self.ContentLength is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sContentLength>%s</%sContentLength>%s' % (
namespaceprefix_, self.gds_format_integer(self.ContentLength, input_name='ContentLength'), namespaceprefix_,
eol_))
if self.AccessControlList is not None:
self.AccessControlList.export(outfile, level, namespaceprefix_, namespacedef_='', name_='AccessControlList',
pretty_print=pretty_print)
if self.StorageClass is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sStorageClass>%s</%sStorageClass>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.StorageClass), input_name='StorageClass')), namespaceprefix_,
eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'Metadata':
obj_ = MetadataEntry.factory(parent_object_=self)
obj_.build(child_)
self.Metadata.append(obj_)
obj_.original_tagname_ = 'Metadata'
elif nodeName_ == 'Data':
sval_ = child_.text
if sval_ is not None:
try:
bval_ = base64.b64decode(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires base64 encoded string: %s' % exp)
bval_ = self.gds_validate_base64(bval_, node, 'Data')
else:
bval_ = None
self.Data = bval_
elif nodeName_ == 'ContentLength' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'ContentLength')
ival_ = self.gds_validate_integer(ival_, node, 'ContentLength')
self.ContentLength = ival_
elif nodeName_ == 'AccessControlList':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.AccessControlList = obj_
obj_.original_tagname_ = 'AccessControlList'
elif nodeName_ == 'StorageClass':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'StorageClass')
value_ = self.gds_validate_string(value_, node, 'StorageClass')
self.StorageClass = value_
# validate type StorageClass
self.validate_StorageClass(self.StorageClass)
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class PutObjectInline
class PutObjectInlineResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, PutObjectInlineResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.PutObjectInlineResponse = PutObjectInlineResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PutObjectInlineResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PutObjectInlineResponse.subclass:
return PutObjectInlineResponse.subclass(*args_, **kwargs_)
else:
return PutObjectInlineResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_PutObjectInlineResponse(self):
return self.PutObjectInlineResponse
def set_PutObjectInlineResponse(self, PutObjectInlineResponse):
self.PutObjectInlineResponse = PutObjectInlineResponse
def hasContent_(self):
if (
self.PutObjectInlineResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PutObjectInlineResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PutObjectInlineResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PutObjectInlineResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='PutObjectInlineResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PutObjectInlineResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='PutObjectInlineResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PutObjectInlineResponse is not None:
self.PutObjectInlineResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='PutObjectInlineResponse', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'PutObjectInlineResponse':
obj_ = PutObjectResult.factory(parent_object_=self)
obj_.build(child_)
self.PutObjectInlineResponse = obj_
obj_.original_tagname_ = 'PutObjectInlineResponse'
# end class PutObjectInlineResponse
class DeleteObject(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Key=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
**kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Key = Key
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, DeleteObject)
if subclass is not None:
return subclass(*args_, **kwargs_)
if DeleteObject.subclass:
return DeleteObject.subclass(*args_, **kwargs_)
else:
return DeleteObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.Key is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteObject',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DeleteObject')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DeleteObject')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='DeleteObject', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DeleteObject'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteObject',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class DeleteObject
class DeleteObjectResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, DeleteObjectResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.DeleteObjectResponse = DeleteObjectResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, DeleteObjectResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if DeleteObjectResponse.subclass:
return DeleteObjectResponse.subclass(*args_, **kwargs_)
else:
return DeleteObjectResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DeleteObjectResponse(self):
return self.DeleteObjectResponse
def set_DeleteObjectResponse(self, DeleteObjectResponse):
self.DeleteObjectResponse = DeleteObjectResponse
def hasContent_(self):
if (
self.DeleteObjectResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteObjectResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DeleteObjectResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DeleteObjectResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='DeleteObjectResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DeleteObjectResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='DeleteObjectResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.DeleteObjectResponse is not None:
self.DeleteObjectResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='DeleteObjectResponse', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DeleteObjectResponse':
obj_ = Status.factory(parent_object_=self)
obj_.build(child_)
self.DeleteObjectResponse = obj_
obj_.original_tagname_ = 'DeleteObjectResponse'
# end class DeleteObjectResponse
class ListBucket(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, Prefix=None, Marker=None, MaxKeys=None, Delimiter=None, AWSAccessKeyId=None,
Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Bucket = Bucket
self.Prefix = Prefix
self.Marker = Marker
self.MaxKeys = MaxKeys
self.Delimiter = Delimiter
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListBucket)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListBucket.subclass:
return ListBucket.subclass(*args_, **kwargs_)
else:
return ListBucket(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Prefix(self):
return self.Prefix
def set_Prefix(self, Prefix):
self.Prefix = Prefix
def get_Marker(self):
return self.Marker
def set_Marker(self, Marker):
self.Marker = Marker
def get_MaxKeys(self):
return self.MaxKeys
def set_MaxKeys(self, MaxKeys):
self.MaxKeys = MaxKeys
def get_Delimiter(self):
return self.Delimiter
def set_Delimiter(self, Delimiter):
self.Delimiter = Delimiter
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def hasContent_(self):
if (
self.Bucket is not None or
self.Prefix is not None or
self.Marker is not None or
self.MaxKeys is not None or
self.Delimiter is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListBucket',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListBucket')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListBucket')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListBucket', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListBucket'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListBucket',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Prefix is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPrefix>%s</%sPrefix>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Prefix), input_name='Prefix')),
namespaceprefix_, eol_))
if self.Marker is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sMarker>%s</%sMarker>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Marker), input_name='Marker')),
namespaceprefix_, eol_))
if self.MaxKeys is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sMaxKeys>%s</%sMaxKeys>%s' % (
namespaceprefix_, self.gds_format_integer(self.MaxKeys, input_name='MaxKeys'), namespaceprefix_, eol_))
if self.Delimiter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDelimiter>%s</%sDelimiter>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Delimiter), input_name='Delimiter')), namespaceprefix_, eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Prefix':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Prefix')
value_ = self.gds_validate_string(value_, node, 'Prefix')
self.Prefix = value_
elif nodeName_ == 'Marker':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Marker')
value_ = self.gds_validate_string(value_, node, 'Marker')
self.Marker = value_
elif nodeName_ == 'MaxKeys' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MaxKeys')
ival_ = self.gds_validate_integer(ival_, node, 'MaxKeys')
self.MaxKeys = ival_
elif nodeName_ == 'Delimiter':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Delimiter')
value_ = self.gds_validate_string(value_, node, 'Delimiter')
self.Delimiter = value_
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class ListBucket
class ListBucketResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ListBucketResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ListBucketResponse = ListBucketResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListBucketResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListBucketResponse.subclass:
return ListBucketResponse.subclass(*args_, **kwargs_)
else:
return ListBucketResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ListBucketResponse(self):
return self.ListBucketResponse
def set_ListBucketResponse(self, ListBucketResponse):
self.ListBucketResponse = ListBucketResponse
def hasContent_(self):
if (
self.ListBucketResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListBucketResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListBucketResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListBucketResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListBucketResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListBucketResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListBucketResponse',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ListBucketResponse is not None:
self.ListBucketResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='ListBucketResponse', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ListBucketResponse':
obj_ = ListBucketResult.factory(parent_object_=self)
obj_.build(child_)
self.ListBucketResponse = obj_
obj_.original_tagname_ = 'ListBucketResponse'
# end class ListBucketResponse
class ListVersionsResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ListVersionsResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ListVersionsResponse = ListVersionsResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListVersionsResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListVersionsResponse.subclass:
return ListVersionsResponse.subclass(*args_, **kwargs_)
else:
return ListVersionsResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ListVersionsResponse(self):
return self.ListVersionsResponse
def set_ListVersionsResponse(self, ListVersionsResponse):
self.ListVersionsResponse = ListVersionsResponse
def hasContent_(self):
if (
self.ListVersionsResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListVersionsResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListVersionsResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListVersionsResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListVersionsResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListVersionsResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='ListVersionsResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ListVersionsResponse is not None:
self.ListVersionsResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='ListVersionsResponse', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ListVersionsResponse':
obj_ = ListVersionsResult.factory(parent_object_=self)
obj_.build(child_)
self.ListVersionsResponse = obj_
obj_.original_tagname_ = 'ListVersionsResponse'
# end class ListVersionsResponse
class ListEntry(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Key=None, LastModified=None, ETag=None, Size=None, Owner=None, StorageClass=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Key = Key
if isinstance(LastModified, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(LastModified, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = LastModified
self.LastModified = initvalue_
self.ETag = ETag
self.Size = Size
self.Owner = Owner
self.StorageClass = StorageClass
self.validate_StorageClass(self.StorageClass)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListEntry)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListEntry.subclass:
return ListEntry.subclass(*args_, **kwargs_)
else:
return ListEntry(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_LastModified(self):
return self.LastModified
def set_LastModified(self, LastModified):
self.LastModified = LastModified
def get_ETag(self):
return self.ETag
def set_ETag(self, ETag):
self.ETag = ETag
def get_Size(self):
return self.Size
def set_Size(self, Size):
self.Size = Size
def get_Owner(self):
return self.Owner
def set_Owner(self, Owner):
self.Owner = Owner
def get_StorageClass(self):
return self.StorageClass
def set_StorageClass(self, StorageClass):
self.StorageClass = StorageClass
def validate_StorageClass(self, value):
# Validate type StorageClass, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['STANDARD', 'REDUCED_REDUNDANCY', 'GLACIER', 'UNKNOWN']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on StorageClass' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.Key is not None or
self.LastModified is not None or
self.ETag is not None or
self.Size is not None or
self.Owner is not None or
self.StorageClass is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListEntry',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListEntry')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListEntry')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListEntry', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListEntry'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListEntry',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.LastModified is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLastModified>%s</%sLastModified>%s' % (
namespaceprefix_, self.gds_format_datetime(self.LastModified, input_name='LastModified'), namespaceprefix_,
eol_))
if self.ETag is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sETag>%s</%sETag>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.ETag), input_name='ETag')),
namespaceprefix_, eol_))
if self.Size is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSize>%s</%sSize>%s' % (
namespaceprefix_, self.gds_format_integer(self.Size, input_name='Size'), namespaceprefix_, eol_))
if self.Owner is not None:
self.Owner.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Owner',
pretty_print=pretty_print)
if self.StorageClass is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sStorageClass>%s</%sStorageClass>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.StorageClass), input_name='StorageClass')), namespaceprefix_,
eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'LastModified':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.LastModified = dval_
elif nodeName_ == 'ETag':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ETag')
value_ = self.gds_validate_string(value_, node, 'ETag')
self.ETag = value_
elif nodeName_ == 'Size' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Size')
ival_ = self.gds_validate_integer(ival_, node, 'Size')
self.Size = ival_
elif nodeName_ == 'Owner':
obj_ = CanonicalUser.factory(parent_object_=self)
obj_.build(child_)
self.Owner = obj_
obj_.original_tagname_ = 'Owner'
elif nodeName_ == 'StorageClass':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'StorageClass')
value_ = self.gds_validate_string(value_, node, 'StorageClass')
self.StorageClass = value_
# validate type StorageClass
self.validate_StorageClass(self.StorageClass)
# end class ListEntry
class VersionEntry(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Key=None, VersionId=None, IsLatest=None, LastModified=None, ETag=None, Size=None, Owner=None,
StorageClass=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Key = Key
self.VersionId = VersionId
self.IsLatest = IsLatest
if isinstance(LastModified, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(LastModified, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = LastModified
self.LastModified = initvalue_
self.ETag = ETag
self.Size = Size
self.Owner = Owner
self.StorageClass = StorageClass
self.validate_StorageClass(self.StorageClass)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, VersionEntry)
if subclass is not None:
return subclass(*args_, **kwargs_)
if VersionEntry.subclass:
return VersionEntry.subclass(*args_, **kwargs_)
else:
return VersionEntry(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_VersionId(self):
return self.VersionId
def set_VersionId(self, VersionId):
self.VersionId = VersionId
def get_IsLatest(self):
return self.IsLatest
def set_IsLatest(self, IsLatest):
self.IsLatest = IsLatest
def get_LastModified(self):
return self.LastModified
def set_LastModified(self, LastModified):
self.LastModified = LastModified
def get_ETag(self):
return self.ETag
def set_ETag(self, ETag):
self.ETag = ETag
def get_Size(self):
return self.Size
def set_Size(self, Size):
self.Size = Size
def get_Owner(self):
return self.Owner
def set_Owner(self, Owner):
self.Owner = Owner
def get_StorageClass(self):
return self.StorageClass
def set_StorageClass(self, StorageClass):
self.StorageClass = StorageClass
def validate_StorageClass(self, value):
# Validate type StorageClass, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['STANDARD', 'REDUCED_REDUNDANCY', 'GLACIER', 'UNKNOWN']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on StorageClass' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.Key is not None or
self.VersionId is not None or
self.IsLatest is not None or
self.LastModified is not None or
self.ETag is not None or
self.Size is not None or
self.Owner is not None or
self.StorageClass is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='VersionEntry',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('VersionEntry')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='VersionEntry')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='VersionEntry', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='VersionEntry'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='VersionEntry',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.VersionId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sVersionId>%s</%sVersionId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.VersionId), input_name='VersionId')), namespaceprefix_, eol_))
if self.IsLatest is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsLatest>%s</%sIsLatest>%s' % (
namespaceprefix_, self.gds_format_boolean(self.IsLatest, input_name='IsLatest'), namespaceprefix_, eol_))
if self.LastModified is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLastModified>%s</%sLastModified>%s' % (
namespaceprefix_, self.gds_format_datetime(self.LastModified, input_name='LastModified'), namespaceprefix_,
eol_))
if self.ETag is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sETag>%s</%sETag>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.ETag), input_name='ETag')),
namespaceprefix_, eol_))
if self.Size is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSize>%s</%sSize>%s' % (
namespaceprefix_, self.gds_format_integer(self.Size, input_name='Size'), namespaceprefix_, eol_))
if self.Owner is not None:
self.Owner.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Owner',
pretty_print=pretty_print)
if self.StorageClass is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sStorageClass>%s</%sStorageClass>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.StorageClass), input_name='StorageClass')), namespaceprefix_,
eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'VersionId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'VersionId')
value_ = self.gds_validate_string(value_, node, 'VersionId')
self.VersionId = value_
elif nodeName_ == 'IsLatest':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'IsLatest')
ival_ = self.gds_validate_boolean(ival_, node, 'IsLatest')
self.IsLatest = ival_
elif nodeName_ == 'LastModified':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.LastModified = dval_
elif nodeName_ == 'ETag':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ETag')
value_ = self.gds_validate_string(value_, node, 'ETag')
self.ETag = value_
elif nodeName_ == 'Size' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'Size')
ival_ = self.gds_validate_integer(ival_, node, 'Size')
self.Size = ival_
elif nodeName_ == 'Owner':
obj_ = CanonicalUser.factory(parent_object_=self)
obj_.build(child_)
self.Owner = obj_
obj_.original_tagname_ = 'Owner'
elif nodeName_ == 'StorageClass':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'StorageClass')
value_ = self.gds_validate_string(value_, node, 'StorageClass')
self.StorageClass = value_
# validate type StorageClass
self.validate_StorageClass(self.StorageClass)
# end class VersionEntry
class DeleteMarkerEntry(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Key=None, VersionId=None, IsLatest=None, LastModified=None, Owner=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Key = Key
self.VersionId = VersionId
self.IsLatest = IsLatest
if isinstance(LastModified, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(LastModified, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = LastModified
self.LastModified = initvalue_
self.Owner = Owner
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, DeleteMarkerEntry)
if subclass is not None:
return subclass(*args_, **kwargs_)
if DeleteMarkerEntry.subclass:
return DeleteMarkerEntry.subclass(*args_, **kwargs_)
else:
return DeleteMarkerEntry(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_VersionId(self):
return self.VersionId
def set_VersionId(self, VersionId):
self.VersionId = VersionId
def get_IsLatest(self):
return self.IsLatest
def set_IsLatest(self, IsLatest):
self.IsLatest = IsLatest
def get_LastModified(self):
return self.LastModified
def set_LastModified(self, LastModified):
self.LastModified = LastModified
def get_Owner(self):
return self.Owner
def set_Owner(self, Owner):
self.Owner = Owner
def hasContent_(self):
if (
self.Key is not None or
self.VersionId is not None or
self.IsLatest is not None or
self.LastModified is not None or
self.Owner is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteMarkerEntry',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('DeleteMarkerEntry')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DeleteMarkerEntry')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='DeleteMarkerEntry',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DeleteMarkerEntry'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='DeleteMarkerEntry',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.VersionId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sVersionId>%s</%sVersionId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.VersionId), input_name='VersionId')), namespaceprefix_, eol_))
if self.IsLatest is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsLatest>%s</%sIsLatest>%s' % (
namespaceprefix_, self.gds_format_boolean(self.IsLatest, input_name='IsLatest'), namespaceprefix_, eol_))
if self.LastModified is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLastModified>%s</%sLastModified>%s' % (
namespaceprefix_, self.gds_format_datetime(self.LastModified, input_name='LastModified'), namespaceprefix_,
eol_))
if self.Owner is not None:
self.Owner.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Owner',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'VersionId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'VersionId')
value_ = self.gds_validate_string(value_, node, 'VersionId')
self.VersionId = value_
elif nodeName_ == 'IsLatest':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'IsLatest')
ival_ = self.gds_validate_boolean(ival_, node, 'IsLatest')
self.IsLatest = ival_
elif nodeName_ == 'LastModified':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.LastModified = dval_
elif nodeName_ == 'Owner':
obj_ = CanonicalUser.factory(parent_object_=self)
obj_.build(child_)
self.Owner = obj_
obj_.original_tagname_ = 'Owner'
# end class DeleteMarkerEntry
class PrefixEntry(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Prefix=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Prefix = Prefix
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PrefixEntry)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PrefixEntry.subclass:
return PrefixEntry.subclass(*args_, **kwargs_)
else:
return PrefixEntry(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Prefix(self):
return self.Prefix
def set_Prefix(self, Prefix):
self.Prefix = Prefix
def hasContent_(self):
if (
self.Prefix is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PrefixEntry',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PrefixEntry')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PrefixEntry')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='PrefixEntry', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PrefixEntry'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PrefixEntry',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Prefix is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPrefix>%s</%sPrefix>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Prefix), input_name='Prefix')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Prefix':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Prefix')
value_ = self.gds_validate_string(value_, node, 'Prefix')
self.Prefix = value_
# end class PrefixEntry
class ListBucketResult(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Metadata=None, Name=None, Prefix=None, Marker=None, NextMarker=None, MaxKeys=None,
Delimiter=None, IsTruncated=None, Contents=None, CommonPrefixes=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
self.Name = Name
self.Prefix = Prefix
self.Marker = Marker
self.NextMarker = NextMarker
self.MaxKeys = MaxKeys
self.Delimiter = Delimiter
self.IsTruncated = IsTruncated
if Contents is None:
self.Contents = []
else:
self.Contents = Contents
if CommonPrefixes is None:
self.CommonPrefixes = []
else:
self.CommonPrefixes = CommonPrefixes
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListBucketResult)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListBucketResult.subclass:
return ListBucketResult.subclass(*args_, **kwargs_)
else:
return ListBucketResult(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Metadata(self):
return self.Metadata
def set_Metadata(self, Metadata):
self.Metadata = Metadata
def add_Metadata(self, value):
self.Metadata.append(value)
def insert_Metadata_at(self, index, value):
self.Metadata.insert(index, value)
def replace_Metadata_at(self, index, value):
self.Metadata[index] = value
def get_Name(self):
return self.Name
def set_Name(self, Name):
self.Name = Name
def get_Prefix(self):
return self.Prefix
def set_Prefix(self, Prefix):
self.Prefix = Prefix
def get_Marker(self):
return self.Marker
def set_Marker(self, Marker):
self.Marker = Marker
def get_NextMarker(self):
return self.NextMarker
def set_NextMarker(self, NextMarker):
self.NextMarker = NextMarker
def get_MaxKeys(self):
return self.MaxKeys
def set_MaxKeys(self, MaxKeys):
self.MaxKeys = MaxKeys
def get_Delimiter(self):
return self.Delimiter
def set_Delimiter(self, Delimiter):
self.Delimiter = Delimiter
def get_IsTruncated(self):
return self.IsTruncated
def set_IsTruncated(self, IsTruncated):
self.IsTruncated = IsTruncated
def get_Contents(self):
return self.Contents
def set_Contents(self, Contents):
self.Contents = Contents
def add_Contents(self, value):
self.Contents.append(value)
def insert_Contents_at(self, index, value):
self.Contents.insert(index, value)
def replace_Contents_at(self, index, value):
self.Contents[index] = value
def get_CommonPrefixes(self):
return self.CommonPrefixes
def set_CommonPrefixes(self, CommonPrefixes):
self.CommonPrefixes = CommonPrefixes
def add_CommonPrefixes(self, value):
self.CommonPrefixes.append(value)
def insert_CommonPrefixes_at(self, index, value):
self.CommonPrefixes.insert(index, value)
def replace_CommonPrefixes_at(self, index, value):
self.CommonPrefixes[index] = value
def hasContent_(self):
if (
self.Metadata or
self.Name is not None or
self.Prefix is not None or
self.Marker is not None or
self.NextMarker is not None or
self.MaxKeys is not None or
self.Delimiter is not None or
self.IsTruncated is not None or
self.Contents or
self.CommonPrefixes
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListBucketResult',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListBucketResult')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListBucketResult')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListBucketResult',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListBucketResult'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListBucketResult',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Metadata',
pretty_print=pretty_print)
if self.Name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sName>%s</%sName>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Name), input_name='Name')),
namespaceprefix_, eol_))
if self.Prefix is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPrefix>%s</%sPrefix>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Prefix), input_name='Prefix')),
namespaceprefix_, eol_))
if self.Marker is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sMarker>%s</%sMarker>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Marker), input_name='Marker')),
namespaceprefix_, eol_))
if self.NextMarker is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sNextMarker>%s</%sNextMarker>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.NextMarker), input_name='NextMarker')), namespaceprefix_, eol_))
if self.MaxKeys is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sMaxKeys>%s</%sMaxKeys>%s' % (
namespaceprefix_, self.gds_format_integer(self.MaxKeys, input_name='MaxKeys'), namespaceprefix_, eol_))
if self.Delimiter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDelimiter>%s</%sDelimiter>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Delimiter), input_name='Delimiter')), namespaceprefix_, eol_))
if self.IsTruncated is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsTruncated>%s</%sIsTruncated>%s' % (
namespaceprefix_, self.gds_format_boolean(self.IsTruncated, input_name='IsTruncated'), namespaceprefix_,
eol_))
for Contents_ in self.Contents:
Contents_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Contents',
pretty_print=pretty_print)
for CommonPrefixes_ in self.CommonPrefixes:
CommonPrefixes_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='CommonPrefixes',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Metadata':
obj_ = MetadataEntry.factory(parent_object_=self)
obj_.build(child_)
self.Metadata.append(obj_)
obj_.original_tagname_ = 'Metadata'
elif nodeName_ == 'Name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Name')
value_ = self.gds_validate_string(value_, node, 'Name')
self.Name = value_
elif nodeName_ == 'Prefix':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Prefix')
value_ = self.gds_validate_string(value_, node, 'Prefix')
self.Prefix = value_
elif nodeName_ == 'Marker':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Marker')
value_ = self.gds_validate_string(value_, node, 'Marker')
self.Marker = value_
elif nodeName_ == 'NextMarker':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'NextMarker')
value_ = self.gds_validate_string(value_, node, 'NextMarker')
self.NextMarker = value_
elif nodeName_ == 'MaxKeys' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MaxKeys')
ival_ = self.gds_validate_integer(ival_, node, 'MaxKeys')
self.MaxKeys = ival_
elif nodeName_ == 'Delimiter':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Delimiter')
value_ = self.gds_validate_string(value_, node, 'Delimiter')
self.Delimiter = value_
elif nodeName_ == 'IsTruncated':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'IsTruncated')
ival_ = self.gds_validate_boolean(ival_, node, 'IsTruncated')
self.IsTruncated = ival_
elif nodeName_ == 'Contents':
obj_ = ListEntry.factory(parent_object_=self)
obj_.build(child_)
self.Contents.append(obj_)
obj_.original_tagname_ = 'Contents'
elif nodeName_ == 'CommonPrefixes':
obj_ = PrefixEntry.factory(parent_object_=self)
obj_.build(child_)
self.CommonPrefixes.append(obj_)
obj_.original_tagname_ = 'CommonPrefixes'
# end class ListBucketResult
class ListVersionsResult(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Metadata=None, Name=None, Prefix=None, KeyMarker=None, VersionIdMarker=None, NextKeyMarker=None,
NextVersionIdMarker=None, MaxKeys=None, Delimiter=None, IsTruncated=None, Version=None,
DeleteMarker=None, CommonPrefixes=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
self.Name = Name
self.Prefix = Prefix
self.KeyMarker = KeyMarker
self.VersionIdMarker = VersionIdMarker
self.NextKeyMarker = NextKeyMarker
self.NextVersionIdMarker = NextVersionIdMarker
self.MaxKeys = MaxKeys
self.Delimiter = Delimiter
self.IsTruncated = IsTruncated
if Version is None:
self.Version = []
else:
self.Version = Version
if DeleteMarker is None:
self.DeleteMarker = []
else:
self.DeleteMarker = DeleteMarker
if CommonPrefixes is None:
self.CommonPrefixes = []
else:
self.CommonPrefixes = CommonPrefixes
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListVersionsResult)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListVersionsResult.subclass:
return ListVersionsResult.subclass(*args_, **kwargs_)
else:
return ListVersionsResult(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Metadata(self):
return self.Metadata
def set_Metadata(self, Metadata):
self.Metadata = Metadata
def add_Metadata(self, value):
self.Metadata.append(value)
def insert_Metadata_at(self, index, value):
self.Metadata.insert(index, value)
def replace_Metadata_at(self, index, value):
self.Metadata[index] = value
def get_Name(self):
return self.Name
def set_Name(self, Name):
self.Name = Name
def get_Prefix(self):
return self.Prefix
def set_Prefix(self, Prefix):
self.Prefix = Prefix
def get_KeyMarker(self):
return self.KeyMarker
def set_KeyMarker(self, KeyMarker):
self.KeyMarker = KeyMarker
def get_VersionIdMarker(self):
return self.VersionIdMarker
def set_VersionIdMarker(self, VersionIdMarker):
self.VersionIdMarker = VersionIdMarker
def get_NextKeyMarker(self):
return self.NextKeyMarker
def set_NextKeyMarker(self, NextKeyMarker):
self.NextKeyMarker = NextKeyMarker
def get_NextVersionIdMarker(self):
return self.NextVersionIdMarker
def set_NextVersionIdMarker(self, NextVersionIdMarker):
self.NextVersionIdMarker = NextVersionIdMarker
def get_MaxKeys(self):
return self.MaxKeys
def set_MaxKeys(self, MaxKeys):
self.MaxKeys = MaxKeys
def get_Delimiter(self):
return self.Delimiter
def set_Delimiter(self, Delimiter):
self.Delimiter = Delimiter
def get_IsTruncated(self):
return self.IsTruncated
def set_IsTruncated(self, IsTruncated):
self.IsTruncated = IsTruncated
def get_Version(self):
return self.Version
def set_Version(self, Version):
self.Version = Version
def add_Version(self, value):
self.Version.append(value)
def insert_Version_at(self, index, value):
self.Version.insert(index, value)
def replace_Version_at(self, index, value):
self.Version[index] = value
def get_DeleteMarker(self):
return self.DeleteMarker
def set_DeleteMarker(self, DeleteMarker):
self.DeleteMarker = DeleteMarker
def add_DeleteMarker(self, value):
self.DeleteMarker.append(value)
def insert_DeleteMarker_at(self, index, value):
self.DeleteMarker.insert(index, value)
def replace_DeleteMarker_at(self, index, value):
self.DeleteMarker[index] = value
def get_CommonPrefixes(self):
return self.CommonPrefixes
def set_CommonPrefixes(self, CommonPrefixes):
self.CommonPrefixes = CommonPrefixes
def add_CommonPrefixes(self, value):
self.CommonPrefixes.append(value)
def insert_CommonPrefixes_at(self, index, value):
self.CommonPrefixes.insert(index, value)
def replace_CommonPrefixes_at(self, index, value):
self.CommonPrefixes[index] = value
def hasContent_(self):
if (
self.Metadata or
self.Name is not None or
self.Prefix is not None or
self.KeyMarker is not None or
self.VersionIdMarker is not None or
self.NextKeyMarker is not None or
self.NextVersionIdMarker is not None or
self.MaxKeys is not None or
self.Delimiter is not None or
self.IsTruncated is not None or
self.Version or
self.DeleteMarker or
self.CommonPrefixes
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListVersionsResult',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListVersionsResult')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListVersionsResult')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListVersionsResult',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListVersionsResult'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListVersionsResult',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Metadata',
pretty_print=pretty_print)
if self.Name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sName>%s</%sName>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Name), input_name='Name')),
namespaceprefix_, eol_))
if self.Prefix is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPrefix>%s</%sPrefix>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Prefix), input_name='Prefix')),
namespaceprefix_, eol_))
if self.KeyMarker is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKeyMarker>%s</%sKeyMarker>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.KeyMarker), input_name='KeyMarker')), namespaceprefix_, eol_))
if self.VersionIdMarker is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sVersionIdMarker>%s</%sVersionIdMarker>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.VersionIdMarker), input_name='VersionIdMarker')),
namespaceprefix_, eol_))
if self.NextKeyMarker is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sNextKeyMarker>%s</%sNextKeyMarker>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.NextKeyMarker), input_name='NextKeyMarker')), namespaceprefix_,
eol_))
if self.NextVersionIdMarker is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sNextVersionIdMarker>%s</%sNextVersionIdMarker>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.NextVersionIdMarker), input_name='NextVersionIdMarker')),
namespaceprefix_, eol_))
if self.MaxKeys is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sMaxKeys>%s</%sMaxKeys>%s' % (
namespaceprefix_, self.gds_format_integer(self.MaxKeys, input_name='MaxKeys'), namespaceprefix_, eol_))
if self.Delimiter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDelimiter>%s</%sDelimiter>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Delimiter), input_name='Delimiter')), namespaceprefix_, eol_))
if self.IsTruncated is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sIsTruncated>%s</%sIsTruncated>%s' % (
namespaceprefix_, self.gds_format_boolean(self.IsTruncated, input_name='IsTruncated'), namespaceprefix_,
eol_))
for Version_ in self.Version:
Version_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Version',
pretty_print=pretty_print)
for DeleteMarker_ in self.DeleteMarker:
DeleteMarker_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='DeleteMarker',
pretty_print=pretty_print)
for CommonPrefixes_ in self.CommonPrefixes:
CommonPrefixes_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='CommonPrefixes',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Metadata':
obj_ = MetadataEntry.factory(parent_object_=self)
obj_.build(child_)
self.Metadata.append(obj_)
obj_.original_tagname_ = 'Metadata'
elif nodeName_ == 'Name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Name')
value_ = self.gds_validate_string(value_, node, 'Name')
self.Name = value_
elif nodeName_ == 'Prefix':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Prefix')
value_ = self.gds_validate_string(value_, node, 'Prefix')
self.Prefix = value_
elif nodeName_ == 'KeyMarker':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'KeyMarker')
value_ = self.gds_validate_string(value_, node, 'KeyMarker')
self.KeyMarker = value_
elif nodeName_ == 'VersionIdMarker':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'VersionIdMarker')
value_ = self.gds_validate_string(value_, node, 'VersionIdMarker')
self.VersionIdMarker = value_
elif nodeName_ == 'NextKeyMarker':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'NextKeyMarker')
value_ = self.gds_validate_string(value_, node, 'NextKeyMarker')
self.NextKeyMarker = value_
elif nodeName_ == 'NextVersionIdMarker':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'NextVersionIdMarker')
value_ = self.gds_validate_string(value_, node, 'NextVersionIdMarker')
self.NextVersionIdMarker = value_
elif nodeName_ == 'MaxKeys' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MaxKeys')
ival_ = self.gds_validate_integer(ival_, node, 'MaxKeys')
self.MaxKeys = ival_
elif nodeName_ == 'Delimiter':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Delimiter')
value_ = self.gds_validate_string(value_, node, 'Delimiter')
self.Delimiter = value_
elif nodeName_ == 'IsTruncated':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'IsTruncated')
ival_ = self.gds_validate_boolean(ival_, node, 'IsTruncated')
self.IsTruncated = ival_
elif nodeName_ == 'Version':
obj_ = VersionEntry.factory(parent_object_=self)
obj_.build(child_)
self.Version.append(obj_)
obj_.original_tagname_ = 'Version'
elif nodeName_ == 'DeleteMarker':
obj_ = DeleteMarkerEntry.factory(parent_object_=self)
obj_.build(child_)
self.DeleteMarker.append(obj_)
obj_.original_tagname_ = 'DeleteMarker'
elif nodeName_ == 'CommonPrefixes':
obj_ = PrefixEntry.factory(parent_object_=self)
obj_.build(child_)
self.CommonPrefixes.append(obj_)
obj_.original_tagname_ = 'CommonPrefixes'
# end class ListVersionsResult
class ListAllMyBuckets(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, AWSAccessKeyId=None, Timestamp=None, Signature=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListAllMyBuckets)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListAllMyBuckets.subclass:
return ListAllMyBuckets.subclass(*args_, **kwargs_)
else:
return ListAllMyBuckets(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def hasContent_(self):
if (
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListAllMyBuckets',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListAllMyBuckets')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListAllMyBuckets')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListAllMyBuckets',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListAllMyBuckets'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListAllMyBuckets',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
# end class ListAllMyBuckets
class ListAllMyBucketsResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ListAllMyBucketsResponse_member=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ListAllMyBucketsResponse = ListAllMyBucketsResponse_member
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListAllMyBucketsResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListAllMyBucketsResponse.subclass:
return ListAllMyBucketsResponse.subclass(*args_, **kwargs_)
else:
return ListAllMyBucketsResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ListAllMyBucketsResponse(self):
return self.ListAllMyBucketsResponse
def set_ListAllMyBucketsResponse(self, ListAllMyBucketsResponse):
self.ListAllMyBucketsResponse = ListAllMyBucketsResponse
def hasContent_(self):
if (
self.ListAllMyBucketsResponse is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListAllMyBucketsResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListAllMyBucketsResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListAllMyBucketsResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListAllMyBucketsResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='ListAllMyBucketsResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='ListAllMyBucketsResponse', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ListAllMyBucketsResponse is not None:
self.ListAllMyBucketsResponse.export(outfile, level, namespaceprefix_, namespacedef_='',
name_='ListAllMyBucketsResponse', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ListAllMyBucketsResponse':
obj_ = ListAllMyBucketsResult.factory(parent_object_=self)
obj_.build(child_)
self.ListAllMyBucketsResponse = obj_
obj_.original_tagname_ = 'ListAllMyBucketsResponse'
# end class ListAllMyBucketsResponse
class ListAllMyBucketsEntry(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Name=None, CreationDate=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Name = Name
if isinstance(CreationDate, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(CreationDate, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = CreationDate
self.CreationDate = initvalue_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListAllMyBucketsEntry)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListAllMyBucketsEntry.subclass:
return ListAllMyBucketsEntry.subclass(*args_, **kwargs_)
else:
return ListAllMyBucketsEntry(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Name(self):
return self.Name
def set_Name(self, Name):
self.Name = Name
def get_CreationDate(self):
return self.CreationDate
def set_CreationDate(self, CreationDate):
self.CreationDate = CreationDate
def hasContent_(self):
if (
self.Name is not None or
self.CreationDate is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListAllMyBucketsEntry',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListAllMyBucketsEntry')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListAllMyBucketsEntry')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListAllMyBucketsEntry',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListAllMyBucketsEntry'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='ListAllMyBucketsEntry', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sName>%s</%sName>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Name), input_name='Name')),
namespaceprefix_, eol_))
if self.CreationDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCreationDate>%s</%sCreationDate>%s' % (
namespaceprefix_, self.gds_format_datetime(self.CreationDate, input_name='CreationDate'), namespaceprefix_,
eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Name':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Name')
value_ = self.gds_validate_string(value_, node, 'Name')
self.Name = value_
elif nodeName_ == 'CreationDate':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.CreationDate = dval_
# end class ListAllMyBucketsEntry
class ListAllMyBucketsResult(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Owner=None, Buckets=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Owner = Owner
self.Buckets = Buckets
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListAllMyBucketsResult)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListAllMyBucketsResult.subclass:
return ListAllMyBucketsResult.subclass(*args_, **kwargs_)
else:
return ListAllMyBucketsResult(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Owner(self):
return self.Owner
def set_Owner(self, Owner):
self.Owner = Owner
def get_Buckets(self):
return self.Buckets
def set_Buckets(self, Buckets):
self.Buckets = Buckets
def hasContent_(self):
if (
self.Owner is not None or
self.Buckets is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListAllMyBucketsResult',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListAllMyBucketsResult')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListAllMyBucketsResult')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListAllMyBucketsResult',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListAllMyBucketsResult'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='ListAllMyBucketsResult', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Owner is not None:
self.Owner.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Owner',
pretty_print=pretty_print)
if self.Buckets is not None:
self.Buckets.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Buckets',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Owner':
obj_ = CanonicalUser.factory(parent_object_=self)
obj_.build(child_)
self.Owner = obj_
obj_.original_tagname_ = 'Owner'
elif nodeName_ == 'Buckets':
obj_ = ListAllMyBucketsList.factory(parent_object_=self)
obj_.build(child_)
self.Buckets = obj_
obj_.original_tagname_ = 'Buckets'
# end class ListAllMyBucketsResult
class ListAllMyBucketsList(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bucket=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
if Bucket is None:
self.Bucket = []
else:
self.Bucket = Bucket
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ListAllMyBucketsList)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ListAllMyBucketsList.subclass:
return ListAllMyBucketsList.subclass(*args_, **kwargs_)
else:
return ListAllMyBucketsList(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def add_Bucket(self, value):
self.Bucket.append(value)
def insert_Bucket_at(self, index, value):
self.Bucket.insert(index, value)
def replace_Bucket_at(self, index, value):
self.Bucket[index] = value
def hasContent_(self):
if (
self.Bucket
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='ListAllMyBucketsList',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ListAllMyBucketsList')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ListAllMyBucketsList')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='ListAllMyBucketsList',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ListAllMyBucketsList'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='ListAllMyBucketsList', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Bucket_ in self.Bucket:
Bucket_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Bucket',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Bucket':
obj_ = ListAllMyBucketsEntry.factory(parent_object_=self)
obj_.build(child_)
self.Bucket.append(obj_)
obj_.original_tagname_ = 'Bucket'
# end class ListAllMyBucketsList
class PostResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Location=None, Bucket=None, Key=None, ETag=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Location = Location
self.Bucket = Bucket
self.Key = Key
self.ETag = ETag
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PostResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PostResponse.subclass:
return PostResponse.subclass(*args_, **kwargs_)
else:
return PostResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Location(self):
return self.Location
def set_Location(self, Location):
self.Location = Location
def get_Bucket(self):
return self.Bucket
def set_Bucket(self, Bucket):
self.Bucket = Bucket
def get_Key(self):
return self.Key
def set_Key(self, Key):
self.Key = Key
def get_ETag(self):
return self.ETag
def set_ETag(self, ETag):
self.ETag = ETag
def hasContent_(self):
if (
self.Location is not None or
self.Bucket is not None or
self.Key is not None or
self.ETag is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PostResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PostResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PostResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='PostResponse', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PostResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='PostResponse',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Location is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLocation>%s</%sLocation>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Location), input_name='Location')),
namespaceprefix_, eol_))
if self.Bucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sBucket>%s</%sBucket>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Bucket), input_name='Bucket')),
namespaceprefix_, eol_))
if self.Key is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sKey>%s</%sKey>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Key), input_name='Key')),
namespaceprefix_, eol_))
if self.ETag is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sETag>%s</%sETag>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.ETag), input_name='ETag')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Location':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Location')
value_ = self.gds_validate_string(value_, node, 'Location')
self.Location = value_
elif nodeName_ == 'Bucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Bucket')
value_ = self.gds_validate_string(value_, node, 'Bucket')
self.Bucket = value_
elif nodeName_ == 'Key':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Key')
value_ = self.gds_validate_string(value_, node, 'Key')
self.Key = value_
elif nodeName_ == 'ETag':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ETag')
value_ = self.gds_validate_string(value_, node, 'ETag')
self.ETag = value_
# end class PostResponse
class CopyObject(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, SourceBucket=None, SourceKey=None, DestinationBucket=None, DestinationKey=None,
MetadataDirective=None, Metadata=None, AccessControlList=None, CopySourceIfModifiedSince=None,
CopySourceIfUnmodifiedSince=None, CopySourceIfMatch=None, CopySourceIfNoneMatch=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.SourceBucket = SourceBucket
self.SourceKey = SourceKey
self.DestinationBucket = DestinationBucket
self.DestinationKey = DestinationKey
self.MetadataDirective = MetadataDirective
self.validate_MetadataDirective(self.MetadataDirective)
if Metadata is None:
self.Metadata = []
else:
self.Metadata = Metadata
self.AccessControlList = AccessControlList
if isinstance(CopySourceIfModifiedSince, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(CopySourceIfModifiedSince, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = CopySourceIfModifiedSince
self.CopySourceIfModifiedSince = initvalue_
if isinstance(CopySourceIfUnmodifiedSince, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(CopySourceIfUnmodifiedSince, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = CopySourceIfUnmodifiedSince
self.CopySourceIfUnmodifiedSince = initvalue_
if CopySourceIfMatch is None:
self.CopySourceIfMatch = []
else:
self.CopySourceIfMatch = CopySourceIfMatch
if CopySourceIfNoneMatch is None:
self.CopySourceIfNoneMatch = []
else:
self.CopySourceIfNoneMatch = CopySourceIfNoneMatch
self.StorageClass = StorageClass
self.validate_StorageClass(self.StorageClass)
self.AWSAccessKeyId = AWSAccessKeyId
if isinstance(Timestamp, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(Timestamp, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = Timestamp
self.Timestamp = initvalue_
self.Signature = Signature
self.Credential = Credential
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CopyObject)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CopyObject.subclass:
return CopyObject.subclass(*args_, **kwargs_)
else:
return CopyObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_SourceBucket(self):
return self.SourceBucket
def set_SourceBucket(self, SourceBucket):
self.SourceBucket = SourceBucket
def get_SourceKey(self):
return self.SourceKey
def set_SourceKey(self, SourceKey):
self.SourceKey = SourceKey
def get_DestinationBucket(self):
return self.DestinationBucket
def set_DestinationBucket(self, DestinationBucket):
self.DestinationBucket = DestinationBucket
def get_DestinationKey(self):
return self.DestinationKey
def set_DestinationKey(self, DestinationKey):
self.DestinationKey = DestinationKey
def get_MetadataDirective(self):
return self.MetadataDirective
def set_MetadataDirective(self, MetadataDirective):
self.MetadataDirective = MetadataDirective
def get_Metadata(self):
return self.Metadata
def set_Metadata(self, Metadata):
self.Metadata = Metadata
def add_Metadata(self, value):
self.Metadata.append(value)
def insert_Metadata_at(self, index, value):
self.Metadata.insert(index, value)
def replace_Metadata_at(self, index, value):
self.Metadata[index] = value
def get_AccessControlList(self):
return self.AccessControlList
def set_AccessControlList(self, AccessControlList):
self.AccessControlList = AccessControlList
def get_CopySourceIfModifiedSince(self):
return self.CopySourceIfModifiedSince
def set_CopySourceIfModifiedSince(self, CopySourceIfModifiedSince):
self.CopySourceIfModifiedSince = CopySourceIfModifiedSince
def get_CopySourceIfUnmodifiedSince(self):
return self.CopySourceIfUnmodifiedSince
def set_CopySourceIfUnmodifiedSince(self, CopySourceIfUnmodifiedSince):
self.CopySourceIfUnmodifiedSince = CopySourceIfUnmodifiedSince
def get_CopySourceIfMatch(self):
return self.CopySourceIfMatch
def set_CopySourceIfMatch(self, CopySourceIfMatch):
self.CopySourceIfMatch = CopySourceIfMatch
def add_CopySourceIfMatch(self, value):
self.CopySourceIfMatch.append(value)
def insert_CopySourceIfMatch_at(self, index, value):
self.CopySourceIfMatch.insert(index, value)
def replace_CopySourceIfMatch_at(self, index, value):
self.CopySourceIfMatch[index] = value
def get_CopySourceIfNoneMatch(self):
return self.CopySourceIfNoneMatch
def set_CopySourceIfNoneMatch(self, CopySourceIfNoneMatch):
self.CopySourceIfNoneMatch = CopySourceIfNoneMatch
def add_CopySourceIfNoneMatch(self, value):
self.CopySourceIfNoneMatch.append(value)
def insert_CopySourceIfNoneMatch_at(self, index, value):
self.CopySourceIfNoneMatch.insert(index, value)
def replace_CopySourceIfNoneMatch_at(self, index, value):
self.CopySourceIfNoneMatch[index] = value
def get_StorageClass(self):
return self.StorageClass
def set_StorageClass(self, StorageClass):
self.StorageClass = StorageClass
def get_AWSAccessKeyId(self):
return self.AWSAccessKeyId
def set_AWSAccessKeyId(self, AWSAccessKeyId):
self.AWSAccessKeyId = AWSAccessKeyId
def get_Timestamp(self):
return self.Timestamp
def set_Timestamp(self, Timestamp):
self.Timestamp = Timestamp
def get_Signature(self):
return self.Signature
def set_Signature(self, Signature):
self.Signature = Signature
def get_Credential(self):
return self.Credential
def set_Credential(self, Credential):
self.Credential = Credential
def validate_MetadataDirective(self, value):
# Validate type MetadataDirective, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['COPY', 'REPLACE']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on MetadataDirective' % {
"value": value.encode("utf-8")})
def validate_StorageClass(self, value):
# Validate type StorageClass, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['STANDARD', 'REDUCED_REDUNDANCY', 'GLACIER', 'UNKNOWN']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on StorageClass' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.SourceBucket is not None or
self.SourceKey is not None or
self.DestinationBucket is not None or
self.DestinationKey is not None or
self.MetadataDirective is not None or
self.Metadata or
self.AccessControlList is not None or
self.CopySourceIfModifiedSince is not None or
self.CopySourceIfUnmodifiedSince is not None or
self.CopySourceIfMatch or
self.CopySourceIfNoneMatch or
self.StorageClass is not None or
self.AWSAccessKeyId is not None or
self.Timestamp is not None or
self.Signature is not None or
self.Credential is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CopyObject',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CopyObject')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CopyObject')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CopyObject', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CopyObject'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CopyObject',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.SourceBucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSourceBucket>%s</%sSourceBucket>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.SourceBucket), input_name='SourceBucket')), namespaceprefix_,
eol_))
if self.SourceKey is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSourceKey>%s</%sSourceKey>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.SourceKey), input_name='SourceKey')), namespaceprefix_, eol_))
if self.DestinationBucket is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDestinationBucket>%s</%sDestinationBucket>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.DestinationBucket), input_name='DestinationBucket')),
namespaceprefix_, eol_))
if self.DestinationKey is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sDestinationKey>%s</%sDestinationKey>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.DestinationKey), input_name='DestinationKey')), namespaceprefix_,
eol_))
if self.MetadataDirective is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sMetadataDirective>%s</%sMetadataDirective>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.MetadataDirective), input_name='MetadataDirective')),
namespaceprefix_, eol_))
for Metadata_ in self.Metadata:
Metadata_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Metadata',
pretty_print=pretty_print)
if self.AccessControlList is not None:
self.AccessControlList.export(outfile, level, namespaceprefix_, namespacedef_='', name_='AccessControlList',
pretty_print=pretty_print)
if self.CopySourceIfModifiedSince is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCopySourceIfModifiedSince>%s</%sCopySourceIfModifiedSince>%s' % (namespaceprefix_,
self.gds_format_datetime(
self.CopySourceIfModifiedSince,
input_name='CopySourceIfModifiedSince'),
namespaceprefix_, eol_))
if self.CopySourceIfUnmodifiedSince is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCopySourceIfUnmodifiedSince>%s</%sCopySourceIfUnmodifiedSince>%s' % (namespaceprefix_,
self.gds_format_datetime(
self.CopySourceIfUnmodifiedSince,
input_name='CopySourceIfUnmodifiedSince'),
namespaceprefix_,
eol_))
for CopySourceIfMatch_ in self.CopySourceIfMatch:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCopySourceIfMatch>%s</%sCopySourceIfMatch>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(CopySourceIfMatch_), input_name='CopySourceIfMatch')),
namespaceprefix_, eol_))
for CopySourceIfNoneMatch_ in self.CopySourceIfNoneMatch:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCopySourceIfNoneMatch>%s</%sCopySourceIfNoneMatch>%s' % (namespaceprefix_,
self.gds_encode(
self.gds_format_string(
quote_xml(
CopySourceIfNoneMatch_),
input_name='CopySourceIfNoneMatch')),
namespaceprefix_, eol_))
if self.StorageClass is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sStorageClass>%s</%sStorageClass>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.StorageClass), input_name='StorageClass')), namespaceprefix_,
eol_))
if self.AWSAccessKeyId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sAWSAccessKeyId>%s</%sAWSAccessKeyId>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.AWSAccessKeyId), input_name='AWSAccessKeyId')), namespaceprefix_,
eol_))
if self.Timestamp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTimestamp>%s</%sTimestamp>%s' % (
namespaceprefix_, self.gds_format_datetime(self.Timestamp, input_name='Timestamp'), namespaceprefix_, eol_))
if self.Signature is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sSignature>%s</%sSignature>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Signature), input_name='Signature')), namespaceprefix_, eol_))
if self.Credential is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sCredential>%s</%sCredential>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.Credential), input_name='Credential')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'SourceBucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SourceBucket')
value_ = self.gds_validate_string(value_, node, 'SourceBucket')
self.SourceBucket = value_
elif nodeName_ == 'SourceKey':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SourceKey')
value_ = self.gds_validate_string(value_, node, 'SourceKey')
self.SourceKey = value_
elif nodeName_ == 'DestinationBucket':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'DestinationBucket')
value_ = self.gds_validate_string(value_, node, 'DestinationBucket')
self.DestinationBucket = value_
elif nodeName_ == 'DestinationKey':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'DestinationKey')
value_ = self.gds_validate_string(value_, node, 'DestinationKey')
self.DestinationKey = value_
elif nodeName_ == 'MetadataDirective':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'MetadataDirective')
value_ = self.gds_validate_string(value_, node, 'MetadataDirective')
self.MetadataDirective = value_
# validate type MetadataDirective
self.validate_MetadataDirective(self.MetadataDirective)
elif nodeName_ == 'Metadata':
obj_ = MetadataEntry.factory(parent_object_=self)
obj_.build(child_)
self.Metadata.append(obj_)
obj_.original_tagname_ = 'Metadata'
elif nodeName_ == 'AccessControlList':
obj_ = AccessControlList.factory(parent_object_=self)
obj_.build(child_)
self.AccessControlList = obj_
obj_.original_tagname_ = 'AccessControlList'
elif nodeName_ == 'CopySourceIfModifiedSince':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.CopySourceIfModifiedSince = dval_
elif nodeName_ == 'CopySourceIfUnmodifiedSince':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.CopySourceIfUnmodifiedSince = dval_
elif nodeName_ == 'CopySourceIfMatch':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'CopySourceIfMatch')
value_ = self.gds_validate_string(value_, node, 'CopySourceIfMatch')
self.CopySourceIfMatch.append(value_)
elif nodeName_ == 'CopySourceIfNoneMatch':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'CopySourceIfNoneMatch')
value_ = self.gds_validate_string(value_, node, 'CopySourceIfNoneMatch')
self.CopySourceIfNoneMatch.append(value_)
elif nodeName_ == 'StorageClass':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'StorageClass')
value_ = self.gds_validate_string(value_, node, 'StorageClass')
self.StorageClass = value_
# validate type StorageClass
self.validate_StorageClass(self.StorageClass)
elif nodeName_ == 'AWSAccessKeyId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AWSAccessKeyId')
value_ = self.gds_validate_string(value_, node, 'AWSAccessKeyId')
self.AWSAccessKeyId = value_
elif nodeName_ == 'Timestamp':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.Timestamp = dval_
elif nodeName_ == 'Signature':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Signature')
value_ = self.gds_validate_string(value_, node, 'Signature')
self.Signature = value_
elif nodeName_ == 'Credential':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Credential')
value_ = self.gds_validate_string(value_, node, 'Credential')
self.Credential = value_
# end class CopyObject
class CopyObjectResponse(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, CopyObjectResult=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.CopyObjectResult = CopyObjectResult
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CopyObjectResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CopyObjectResponse.subclass:
return CopyObjectResponse.subclass(*args_, **kwargs_)
else:
return CopyObjectResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CopyObjectResult(self):
return self.CopyObjectResult
def set_CopyObjectResult(self, CopyObjectResult):
self.CopyObjectResult = CopyObjectResult
def hasContent_(self):
if (
self.CopyObjectResult is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CopyObjectResponse',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CopyObjectResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CopyObjectResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CopyObjectResponse',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CopyObjectResponse'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CopyObjectResponse',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.CopyObjectResult is not None:
self.CopyObjectResult.export(outfile, level, namespaceprefix_, namespacedef_='', name_='CopyObjectResult',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CopyObjectResult':
obj_ = CopyObjectResult.factory(parent_object_=self)
obj_.build(child_)
self.CopyObjectResult = obj_
obj_.original_tagname_ = 'CopyObjectResult'
# end class CopyObjectResponse
class CopyObjectResult(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, LastModified=None, ETag=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
if isinstance(LastModified, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(LastModified, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = LastModified
self.LastModified = initvalue_
self.ETag = ETag
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CopyObjectResult)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CopyObjectResult.subclass:
return CopyObjectResult.subclass(*args_, **kwargs_)
else:
return CopyObjectResult(*args_, **kwargs_)
factory = staticmethod(factory)
def get_LastModified(self):
return self.LastModified
def set_LastModified(self, LastModified):
self.LastModified = LastModified
def get_ETag(self):
return self.ETag
def set_ETag(self, ETag):
self.ETag = ETag
def hasContent_(self):
if (
self.LastModified is not None or
self.ETag is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CopyObjectResult',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CopyObjectResult')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='CopyObjectResult')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='CopyObjectResult',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='CopyObjectResult'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='CopyObjectResult',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.LastModified is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sLastModified>%s</%sLastModified>%s' % (
namespaceprefix_, self.gds_format_datetime(self.LastModified, input_name='LastModified'), namespaceprefix_,
eol_))
if self.ETag is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sETag>%s</%sETag>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.ETag), input_name='ETag')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'LastModified':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.LastModified = dval_
elif nodeName_ == 'ETag':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ETag')
value_ = self.gds_validate_string(value_, node, 'ETag')
self.ETag = value_
# end class CopyObjectResult
class RequestPaymentConfiguration(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Payer=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Payer = Payer
self.validate_Payer(self.Payer)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, RequestPaymentConfiguration)
if subclass is not None:
return subclass(*args_, **kwargs_)
if RequestPaymentConfiguration.subclass:
return RequestPaymentConfiguration.subclass(*args_, **kwargs_)
else:
return RequestPaymentConfiguration(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Payer(self):
return self.Payer
def set_Payer(self, Payer):
self.Payer = Payer
def validate_Payer(self, value):
# Validate type Payer, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['BucketOwner', 'Requester']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on Payer' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.Payer is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='RequestPaymentConfiguration',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('RequestPaymentConfiguration')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RequestPaymentConfiguration')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='RequestPaymentConfiguration',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='RequestPaymentConfiguration'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='RequestPaymentConfiguration', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Payer is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sPayer>%s</%sPayer>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Payer), input_name='Payer')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Payer':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Payer')
value_ = self.gds_validate_string(value_, node, 'Payer')
self.Payer = value_
# validate type Payer
self.validate_Payer(self.Payer)
# end class RequestPaymentConfiguration
class VersioningConfiguration(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Status=None, MfaDelete=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Status = Status
self.validate_VersioningStatus(self.Status)
self.MfaDelete = MfaDelete
self.validate_MfaDeleteStatus(self.MfaDelete)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, VersioningConfiguration)
if subclass is not None:
return subclass(*args_, **kwargs_)
if VersioningConfiguration.subclass:
return VersioningConfiguration.subclass(*args_, **kwargs_)
else:
return VersioningConfiguration(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Status(self):
return self.Status
def set_Status(self, Status):
self.Status = Status
def get_MfaDelete(self):
return self.MfaDelete
def set_MfaDelete(self, MfaDelete):
self.MfaDelete = MfaDelete
def validate_VersioningStatus(self, value):
# Validate type VersioningStatus, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Enabled', 'Suspended']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on VersioningStatus' % {
"value": value.encode("utf-8")})
def validate_MfaDeleteStatus(self, value):
# Validate type MfaDeleteStatus, a restriction on xsd:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Enabled', 'Disabled']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on MfaDeleteStatus' % {
"value": value.encode("utf-8")})
def hasContent_(self):
if (
self.Status is not None or
self.MfaDelete is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='VersioningConfiguration',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('VersioningConfiguration')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='VersioningConfiguration')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='VersioningConfiguration',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='VersioningConfiguration'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='VersioningConfiguration', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Status is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sStatus>%s</%sStatus>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Status), input_name='Status')),
namespaceprefix_, eol_))
if self.MfaDelete is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sMfaDelete>%s</%sMfaDelete>%s' % (namespaceprefix_, self.gds_encode(
self.gds_format_string(quote_xml(self.MfaDelete), input_name='MfaDelete')), namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Status':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Status')
value_ = self.gds_validate_string(value_, node, 'Status')
self.Status = value_
# validate type VersioningStatus
self.validate_VersioningStatus(self.Status)
elif nodeName_ == 'MfaDelete':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'MfaDelete')
value_ = self.gds_validate_string(value_, node, 'MfaDelete')
self.MfaDelete = value_
# validate type MfaDeleteStatus
self.validate_MfaDeleteStatus(self.MfaDelete)
# end class VersioningConfiguration
class NotificationConfiguration(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, TopicConfiguration=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
if TopicConfiguration is None:
self.TopicConfiguration = []
else:
self.TopicConfiguration = TopicConfiguration
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, NotificationConfiguration)
if subclass is not None:
return subclass(*args_, **kwargs_)
if NotificationConfiguration.subclass:
return NotificationConfiguration.subclass(*args_, **kwargs_)
else:
return NotificationConfiguration(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TopicConfiguration(self):
return self.TopicConfiguration
def set_TopicConfiguration(self, TopicConfiguration):
self.TopicConfiguration = TopicConfiguration
def add_TopicConfiguration(self, value):
self.TopicConfiguration.append(value)
def insert_TopicConfiguration_at(self, index, value):
self.TopicConfiguration.insert(index, value)
def replace_TopicConfiguration_at(self, index, value):
self.TopicConfiguration[index] = value
def hasContent_(self):
if (
self.TopicConfiguration
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='NotificationConfiguration',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('NotificationConfiguration')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='NotificationConfiguration')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='NotificationConfiguration',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='',
name_='NotificationConfiguration'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
name_='NotificationConfiguration', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for TopicConfiguration_ in self.TopicConfiguration:
TopicConfiguration_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TopicConfiguration',
pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TopicConfiguration':
obj_ = TopicConfiguration.factory(parent_object_=self)
obj_.build(child_)
self.TopicConfiguration.append(obj_)
obj_.original_tagname_ = 'TopicConfiguration'
# end class NotificationConfiguration
class TopicConfiguration(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Topic=None, Event=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.Topic = Topic
if Event is None:
self.Event = []
else:
self.Event = Event
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TopicConfiguration)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TopicConfiguration.subclass:
return TopicConfiguration.subclass(*args_, **kwargs_)
else:
return TopicConfiguration(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Topic(self):
return self.Topic
def set_Topic(self, Topic):
self.Topic = Topic
def get_Event(self):
return self.Event
def set_Event(self, Event):
self.Event = Event
def add_Event(self, value):
self.Event.append(value)
def insert_Event_at(self, index, value):
self.Event.insert(index, value)
def replace_Event_at(self, index, value):
self.Event[index] = value
def hasContent_(self):
if (
self.Topic is not None or
self.Event
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='TopicConfiguration',
pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TopicConfiguration')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '',))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='TopicConfiguration')
if self.hasContent_():
outfile.write('>%s' % (eol_,))
self.exportChildren(outfile, level + 1, '', namespacedef_, name_='TopicConfiguration',
pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_,))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='TopicConfiguration'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='',
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"', name_='TopicConfiguration',
fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Topic is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sTopic>%s</%sTopic>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(self.Topic), input_name='Topic')),
namespaceprefix_, eol_))
for Event_ in self.Event:
showIndent(outfile, level, pretty_print)
outfile.write('<%sEvent>%s</%sEvent>%s' % (
namespaceprefix_, self.gds_encode(self.gds_format_string(quote_xml(Event_), input_name='Event')),
namespaceprefix_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Topic':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Topic')
value_ = self.gds_validate_string(value_, node, 'Topic')
self.Topic = value_
elif nodeName_ == 'Event':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Event')
value_ = self.gds_validate_string(value_, node, 'Event')
self.Event.append(value_)
# end class TopicConfiguration
GDSClassesMapping = {
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode = parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from s3_api import *\n\n')
sys.stdout.write('import s3_api as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
# import pdb; pdb.set_trace()
main()
RenameMappings_ = {
}
__all__ = [
"AccessControlList",
"AccessControlPolicy",
"AmazonCustomerByEmail",
"BucketLoggingStatus",
"CanonicalUser",
"CopyObject",
"CopyObjectResponse",
"CopyObjectResult",
"CreateBucket",
"CreateBucketConfiguration",
"CreateBucketResponse",
"CreateBucketResult",
"DeleteBucket",
"DeleteBucketResponse",
"DeleteMarkerEntry",
"DeleteObject",
"DeleteObjectResponse",
"GetBucketAccessControlPolicy",
"GetBucketAccessControlPolicyResponse",
"GetBucketLoggingStatus",
"GetBucketLoggingStatusResponse",
"GetObject",
"GetObjectAccessControlPolicy",
"GetObjectAccessControlPolicyResponse",
"GetObjectExtended",
"GetObjectExtendedResponse",
"GetObjectResponse",
"GetObjectResult",
"Grant",
"Grantee",
"Group",
"ListAllMyBuckets",
"ListAllMyBucketsEntry",
"ListAllMyBucketsList",
"ListAllMyBucketsResponse",
"ListAllMyBucketsResult",
"ListBucket",
"ListBucketResponse",
"ListBucketResult",
"ListEntry",
"ListVersionsResponse",
"ListVersionsResult",
"LocationConstraint",
"LoggingSettings",
"MetadataEntry",
"NotificationConfiguration",
"PostResponse",
"PrefixEntry",
"PutObject",
"PutObjectInline",
"PutObjectInlineResponse",
"PutObjectResponse",
"PutObjectResult",
"RequestPaymentConfiguration",
"Result",
"SetBucketAccessControlPolicy",
"SetBucketAccessControlPolicyResponse",
"SetBucketLoggingStatus",
"SetBucketLoggingStatusResponse",
"SetObjectAccessControlPolicy",
"SetObjectAccessControlPolicyResponse",
"Status",
"TopicConfiguration",
"User",
"VersionEntry",
"VersioningConfiguration"
]
| 2.171875 | 2 |
asynced/asyncio_utils.py | jorenham/asynced | 1 | 12757739 | <reponame>jorenham/asynced
from __future__ import annotations
import functools
import inspect
__all__ = (
'amap_iter',
'race',
'resume',
'get_event_loop',
'call_soon',
'call_soon_task',
'create_future',
'create_task',
)
import asyncio
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
cast,
Coroutine,
Literal,
overload,
TypeVar,
)
from typing_extensions import ParamSpec
from asynced._typing import Maybe, Nothing, NothingType
from asynced.compat import anext, aiter
_T = TypeVar('_T')
_T_co = TypeVar('_T_co', covariant=True)
_P = ParamSpec('_P')
_R = TypeVar('_R')
@overload
async def resume() -> None: ...
@overload
async def resume(result: _T = ...) -> _T: ...
async def resume(result: _T | None = None) -> _T | None:
"""Pass control back to the event loop"""
return await asyncio.sleep(0, result)
def amap_iter(
function: Callable[[_T], _R] | Callable[[_T], Awaitable[_R]],
iterable: AsyncIterable[_T],
) -> AsyncIterable[_R]:
async def _iterable() -> AsyncIterable[_R]:
is_async = asyncio.iscoroutinefunction(function) or None
async for value in iterable:
res = function(value)
if is_async is None:
is_async = inspect.isawaitable(res)
if is_async:
yield await cast(Awaitable[_R], res)
else:
yield cast(_R, res)
return _iterable()
@overload
def race(
*args: AsyncIterable[_T],
yield_exceptions: Literal[True],
) -> AsyncIterator[tuple[int, _T | BaseException]]:
...
@overload
def race(
*args: AsyncIterable[_T],
yield_exceptions: Literal[False] = ...,
) -> AsyncIterator[tuple[int, _T]]:
...
async def race(
*args: AsyncIterable[_T],
yield_exceptions: bool = False,
) -> AsyncIterator[tuple[int, _T | BaseException]]:
"""Yields the argument index and the item of each first next of the
iterators.
"""
if not args:
raise TypeError('race() must have at least one argument.')
itrs = [aiter(arg) for arg in args]
tasks: dict[str, tuple[int, asyncio.Task[_T]]] = {}
for i, itr in enumerate(itrs):
task = asyncio.create_task(
cast(Coroutine[Any, None, _T], itr.__anext__())
)
tasks[task.get_name()] = i, task
while tasks:
done, pending = await asyncio.wait(
[task for i, task in tasks.values()],
return_when=asyncio.FIRST_COMPLETED
)
if not done:
# no done; exception might be raised in the pending ones
for task in pending:
if task.done():
task.result()
assert False, 'no tasks completed and no exceptions were raised'
break # noqa
for task in done:
assert task.done()
name = task.get_name()
i = tasks.pop(name)[0]
try:
yield i, task.result()
except (StopAsyncIteration, asyncio.CancelledError) as exc:
if yield_exceptions:
yield i, exc
continue
except (SystemExit, KeyboardInterrupt):
# an exceptional exception to the "yield exceptions" exception
# for these exit exceptions
raise
except BaseException as exc:
if not yield_exceptions:
raise
yield i, exc
# we create the next next task next:
itr = itrs[i]
task = asyncio.create_task(
cast(Coroutine[Any, None, _T], itr.__anext__())
)
name = task.get_name()
tasks[name] = i, task
@overload
def create_future(
result: _T, exception: NothingType = ...
) -> asyncio.Future[_T]: ...
@overload
def create_future(
result: NothingType = ..., exception: NothingType = ...
) -> asyncio.Future[Any]: ...
def call_soon(
callback: Callable[_P, Any],
*args: _P.args,
**kwargs: _P.kwargs
) -> asyncio.Handle:
"""Like asyncio.get_event_loop().call_soon(), but accepts keyword args."""
return get_event_loop().call_soon(
functools.partial(callback, *args, **kwargs)
)
def call_soon_task(
callback: Callable[_P, _R],
*args: _P.args,
**kwargs: _P.kwargs
) -> asyncio.Task[_R]:
"""Like asyncio.get_event_loop().call_soon(), but returns an asyncio.Task
instead of asyncio.Handle
"""
loop = get_event_loop()
future = loop.create_future()
def callback_wrapper():
try:
result = callback(*args, **kwargs)
future.set_result(result)
except asyncio.CancelledError:
future.cancel()
handle.cancel()
raise
except BaseException as exc:
future.set_exception(exc)
raise
return result
async def handle_watcher():
while not handle.cancelled():
if future.done():
return future.result()
try:
return await asyncio.wait_for(asyncio.shield(future), 0.1)
except asyncio.TimeoutError:
await asyncio.sleep(0)
assert handle.cancelled()
raise asyncio.CancelledError()
handle = loop.call_soon(callback_wrapper)
return asyncio.create_task(handle_watcher())
def create_future(
result: Maybe[_T] = Nothing,
exception: Maybe[BaseException] = Nothing
) -> asyncio.Future[_T] | asyncio.Future[Any]:
"""Shorthand for asyncio.get_event_loop().create_future()"""
fut = get_event_loop().create_future()
if result is not Nothing:
fut.set_result(result)
if exception is not Nothing:
fut.set_exception(exception)
return fut
def create_task(
coro: Coroutine[Any, Any, _T],
*,
name: str | None = None
) -> asyncio.Task[_T]:
if isinstance(coro, asyncio.Task):
if name and not coro.done() and coro.get_name().startswith('Task-'):
# only replace name if not done and it has no custom name
coro.set_name(name)
return coro
if asyncio.iscoroutine(coro):
_coro = coro
else:
raise TypeError(f'a coroutine was expected, got {coro!r}')
return get_event_loop().create_task(coro, name=name)
def get_event_loop() -> asyncio.AbstractEventLoop:
"""Return the running event loop if exists, otherwise creates a new one."""
return asyncio.get_event_loop_policy().get_event_loop()
| 2.4375 | 2 |
pipeline/sklearn-pipeline-linear/script.py | VijaySingh-GSLab/aws-ml | 0 | 12757740 | <filename>pipeline/sklearn-pipeline-linear/script.py
import argparse
import joblib
import os
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# inference functions ---------------
def model_fn(model_dir):
clf = joblib.load(os.path.join(model_dir, "model.joblib"))
return clf
if __name__ =='__main__':
print('extracting arguments')
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
# to simplify the demo we don't use all sklearn RandomForest hyperparameters
parser.add_argument('--n-estimators', type=int, default=10)
parser.add_argument('--min-samples-leaf', type=int, default=3)
# Data, model, and output directories
parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
#parser.add_argument('--train-file', type=str, default='boston_train.csv')
#parser.add_argument('--test-file', type=str, default='boston_test.csv')
parser.add_argument('--features', type=str) # in this script we ask user to explicitly name features
parser.add_argument('--target', type=str) # in this script we ask user to explicitly name the target
args, _ = parser.parse_known_args()
print("args.train", args.train)
print("args.test", args.test)
print('reading train data')
print("args.train : ",args.train)
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(args.train, file) for file in os.listdir(args.train) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(args.train, "train"))
raw_data = [ pd.read_csv(file, header=None, engine="python") for file in input_files ]
train_df = pd.concat(raw_data)
print(train_df.shape)
print('reading test data')
print("args.test : ",args.test)
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(args.test, file) for file in os.listdir(args.test) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(args.train, "train"))
raw_data = [ pd.read_csv(file, header=None, engine="python") for file in input_files ]
test_df = pd.concat(raw_data)
print(test_df.shape)
print('building training and testing datasets')
"""
X_train = train_df[args.features.split()]
X_test = test_df[args.features.split()]
y_train = train_df[args.target]
y_test = test_df[args.target]
"""
print(train_df.columns.values)
col_to_predict = train_df.columns.values[0]
print("col_to_predict : {}, arg_type : {}".format(col_to_predict, type(col_to_predict)))
X_train = train_df.drop(columns=[col_to_predict])
X_test = test_df.drop(columns=[col_to_predict])
y_train = train_df[col_to_predict]
y_test = test_df[col_to_predict]
# train
print('training model')
model = RandomForestRegressor(
n_estimators=args.n_estimators,
min_samples_leaf=args.min_samples_leaf,
n_jobs=-1)
print("-"*100)
print("X_train.shape : ", X_train.shape)
print("model training on num features : ", X_train.shape[1])
print("sample data : \n", X_train.head(1).values)
model.fit(X_train, y_train)
# print abs error
print('validating model')
abs_err = np.abs(model.predict(X_test) - y_test)
# print couple perf metrics
for q in [10, 50, 90]:
print('AE-at-' + str(q) + 'th-percentile: '
+ str(np.percentile(a=abs_err, q=q)))
# persist model
path = os.path.join(args.model_dir, "model.joblib")
joblib.dump(model, path)
print('model persisted at ' + path)
print(args.min_samples_leaf)
| 2.734375 | 3 |
hallo/test/modules/permission_control/test_find_permission_mask.py | joshcoales/Hallo | 1 | 12757741 | <reponame>joshcoales/Hallo<filename>hallo/test/modules/permission_control/test_find_permission_mask.py
import hallo.modules.permission_control
from hallo.hallo import Hallo
from hallo.modules.permission_control import Permissions
from hallo.permission_mask import PermissionMask
from hallo.test.server_mock import ServerMock
from hallo.user_group import UserGroup
def test_3_fail(hallo_getter):
test_hallo = hallo_getter({"permission_control"})
perm_cont = Permissions()
try:
perm_cont.find_permission_mask(["a", "b", "c"], test_hallo.test_user, test_hallo.test_chan)
assert False, "Exception should be thrown if more than 2 arguments passed."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "too many filters" in str(e).lower()
def test_2_no_server(hallo_getter):
test_hallo = hallo_getter({"permission_control"})
perm_cont = Permissions()
try:
perm_cont.find_permission_mask(
["channel=chan1", "user=user1"], test_hallo.test_user, test_hallo.test_chan
)
assert False, "Exception should be thrown if 2 arguments and neither is server."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "no server name found" in str(e).lower()
def test_2_no_server_by_name(hallo_getter):
test_hallo = hallo_getter({"permission_control"})
perm_cont = Permissions()
try:
perm_cont.find_permission_mask(
["server=no_server_by_name", "chan=test_chan1"], test_hallo.test_user, test_hallo.test_user
)
assert False, "Exception should be thrown if server does not exist."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "no server exists by that name" in str(e).lower()
def test_2_server_chan(hallo_getter):
test_hallo = hallo_getter({"permission_control"})
perm_cont = Permissions()
# Set up a test server and channel
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
# Get permission mask of given channel
data = perm_cont.find_permission_mask(
["server=test_serv1", "channel=test_chan1"], test_hallo.test_user, test_hallo.test_chan
)
assert perm1 == data, "Did not find the correct permission mask."
def test_2_server_user(hallo_getter):
test_hallo = hallo_getter({"permission_control"})
perm_cont = Permissions()
# Set up a test server and user
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
test_hallo.add_server(serv1)
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm1 = PermissionMask()
user1.permission_mask = perm1
# Get permission mask of given channel
data = perm_cont.find_permission_mask(
["server=test_serv1", "user=test_user1"], test_hallo.test_user, test_hallo.test_chan
)
assert perm1 == data, "Did not find the correct permission mask."
def test_2_server_no_chan_user(hallo_getter):
test_hallo = hallo_getter({"permission_control"})
perm_cont = Permissions()
# Set up a test server and channel and user
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Get permission mask of given channel
try:
perm_cont.find_permission_mask(["server=test_serv1", "core"], user1, chan1)
assert False, "Should have failed to find any permission mask."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "server but not channel or user" in str(e).lower()
def test_1_hallo():
perm_cont = Permissions()
# Set up a test hallo and server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Get permission of hallo
data = perm_cont.find_permission_mask(["hallo"], user1, chan1)
assert data == perm3, "Did not find the correct permission mask."
def test_1_server():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Get permissions of current server
data = perm_cont.find_permission_mask(["server"], user1, chan1)
assert data == perm0, "Did not find the correct permission mask."
def test_1_server_no_name():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Get permissions of current server
try:
perm_cont.find_permission_mask(["server=test_serv2"], user1, chan1)
assert False, "Find permission mask should have failed."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "no server exists by that name" in str(e).lower()
def test_1_server_name(hallo_getter):
test_hallo = hallo_getter({"permission_control"})
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Get permissions of current server
data = perm_cont.find_permission_mask(["server=test_serv1"], user1, chan1)
assert data == perm0, "Did not find correct permission mask"
def test_1_channel():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Get permissions of current channel
data = perm_cont.find_permission_mask(["channel"], user1, chan1)
assert data == perm1, "Did not find the correct permission mask."
def test_1_channel_privmsg():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Try to get permissions of current channel from a privmsg
try:
perm_cont.find_permission_mask(["channel"], user1, None)
assert False, "Should not have managed to get permission mask."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "can't set generic channel permissions in a privmsg" in str(e).lower()
def test_1_channel_name():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
perm4 = PermissionMask()
chan2.permission_mask = perm4
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
# Get permissions of current channel
data = perm_cont.find_permission_mask(["channel=test_chan2"], user1, chan1)
assert data == perm4, "Did not find the correct permission mask."
def test_1_user_group_no_name():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
group1 = UserGroup("test_group1", hallo1)
perm4 = PermissionMask()
group1.permission_mask = perm4
hallo1.add_user_group(group1)
# Try to get permissions of non-existent user group
try:
perm_cont.find_permission_mask(["user_group=test_group2"], user1, chan1)
assert False, "Find permission mask should have failed."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "no user group exists by that name" in str(e).lower()
def test_1_user_group_name():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
group1 = UserGroup("test_group1", hallo1)
perm4 = PermissionMask()
group1.permission_mask = perm4
hallo1.add_user_group(group1)
# Get permissions of specified user group
data = perm_cont.find_permission_mask(["user_group=test_group1"], user1, chan1)
assert data == perm4, "Did not find the correct permission mask."
def test_1_user_name():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
perm4 = PermissionMask()
user2.permission_mask = perm4
# Get permissions of specified user
data = perm_cont.find_permission_mask(["user=test_user2"], user1, chan1)
assert data == perm4, "Did not find the correct permission mask."
def test_1_user_just_name():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
chan1.add_user(user1)
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
perm4 = PermissionMask()
user2.permission_mask = perm4
chan1.add_user(user2)
# Get permissions of specified user in channel
data = perm_cont.find_permission_mask(["test_user2"], user1, chan1)
assert data == perm4, "Did not find the correct permission mask."
def test_1_user_just_name_not_in_channel():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
chan1.add_user(user1)
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
perm4 = PermissionMask()
user2.permission_mask = perm4
# Get permissions of specified user group
try:
perm_cont.find_permission_mask(["test_user2"], user1, chan1)
assert False, "Find permission mask should have failed."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "i can't find that permission mask" in str(e).lower()
def test_1_user_just_name_privmsg():
perm_cont = Permissions()
# Set up a test server and channel and user
hallo1 = Hallo()
perm3 = PermissionMask()
hallo1.permission_mask = perm3
serv1 = ServerMock(hallo1)
serv1.name = "test_serv1"
perm0 = PermissionMask()
serv1.permission_mask = perm0
hallo1.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
perm1 = PermissionMask()
chan1.permission_mask = perm1
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
perm2 = PermissionMask()
user1.permission_mask = perm2
chan1.add_user(user1)
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
perm4 = PermissionMask()
user2.permission_mask = perm4
# Get permissions of specified user group
try:
perm_cont.find_permission_mask(["test_user2"], user1, None)
assert False, "Find permission mask should have failed."
except hallo.modules.permission_control.PermissionControlException as e:
assert "error" in str(e).lower()
assert "i can't find that permission mask" in str(e).lower()
| 2.53125 | 3 |
util/export_users.py | MBRUN138/open-contest | 0 | 12757742 | #!/usr/bin/python3
import sys
import json
import os.path
if len(sys.argv) != 2:
print("Usage: python3 export_users.py <db-path>")
sys.exit(1)
db_path = sys.argv[1]
with open(os.path.join(db_path, 'users.json')) as f:
users = json.loads(f.read())
for user in users:
print(f"{user['username']},{user['password']}")
| 2.96875 | 3 |
leet/linkedlists/addTwoNumbers.py | Rahul-k25/python-cp-cheatsheet | 1 | 12757743 | <filename>leet/linkedlists/addTwoNumbers.py<gh_stars>1-10
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
"""
5
5
"""
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
root = n = ListNode(0)
carry = 0
sum = 0
# loop through, add num and track carry
# while both pointers valid
while l1 or l2 or carry > 0:
if l1:
sum += l1.val
l1 = l1.next
if l2:
sum += l2.val
l2 = l2.next
n.next = ListNode((sum+carry) % 10)
n = n.next
carry = (sum+carry) // 10
sum = 0
return root.next
| 3.625 | 4 |
pyro/poutine/runtime.py | futurewarning/pyro | 0 | 12757744 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import functools
from pyro.params.param_store import ( # noqa: F401
_MODULE_NAMESPACE_DIVIDER,
ParamStoreDict,
)
# the global pyro stack
_PYRO_STACK = []
# the global ParamStore
_PYRO_PARAM_STORE = ParamStoreDict()
class _DimAllocator:
"""
Dimension allocator for internal use by :class:`plate`.
There is a single global instance.
Note that dimensions are indexed from the right, e.g. -1, -2.
"""
def __init__(self):
self._stack = [] # in reverse orientation of log_prob.shape
def allocate(self, name, dim):
"""
Allocate a dimension to an :class:`plate` with given name.
Dim should be either None for automatic allocation or a negative
integer for manual allocation.
"""
if name in self._stack:
raise ValueError('duplicate plate "{}"'.format(name))
if dim is None:
# Automatically designate the rightmost available dim for allocation.
dim = -1
while -dim <= len(self._stack) and self._stack[-1 - dim] is not None:
dim -= 1
elif dim >= 0:
raise ValueError('Expected dim < 0 to index from the right, actual {}'.format(dim))
# Allocate the requested dimension.
while dim < -len(self._stack):
self._stack.append(None)
if self._stack[-1 - dim] is not None:
raise ValueError('\n'.join([
'at plates "{}" and "{}", collide at dim={}'.format(name, self._stack[-1 - dim], dim),
'\nTry moving the dim of one plate to the left, e.g. dim={}'.format(dim - 1)]))
self._stack[-1 - dim] = name
return dim
def free(self, name, dim):
"""
Free a dimension.
"""
free_idx = -1 - dim # stack index to free
assert self._stack[free_idx] == name
self._stack[free_idx] = None
while self._stack and self._stack[-1] is None:
self._stack.pop()
# Handles placement of plate dimensions
_DIM_ALLOCATOR = _DimAllocator()
class _EnumAllocator:
"""
Dimension allocator for internal use by :func:`~pyro.poutine.markov`.
There is a single global instance.
Note that dimensions are indexed from the right, e.g. -1, -2.
Note that ids are simply nonnegative integers here.
"""
def set_first_available_dim(self, first_available_dim):
"""
Set the first available dim, which should be to the left of all
:class:`plate` dimensions, e.g. ``-1 - max_plate_nesting``. This should
be called once per program. In SVI this should be called only once per
(guide,model) pair.
"""
assert first_available_dim < 0, first_available_dim
self.next_available_dim = first_available_dim
self.next_available_id = 0
self.dim_to_id = {} # only the global ids
def allocate(self, scope_dims=None):
"""
Allocate a new recyclable dim and a unique id.
If ``scope_dims`` is None, this allocates a global enumeration dim
that will never be recycled. If ``scope_dims`` is specified, this
allocates a local enumeration dim that can be reused by at any other
local site whose scope excludes this site.
:param set scope_dims: An optional set of (negative integer)
local enumeration dims to avoid when allocating this dim.
:return: A pair ``(dim, id)``, where ``dim`` is a negative integer
and ``id`` is a nonnegative integer.
:rtype: tuple
"""
id_ = self.next_available_id
self.next_available_id += 1
dim = self.next_available_dim
if dim == -float('inf'):
raise ValueError("max_plate_nesting must be set to a finite value for parallel enumeration")
if scope_dims is None:
# allocate a new global dimension
self.next_available_dim -= 1
self.dim_to_id[dim] = id_
else:
# allocate a new local dimension
while dim in scope_dims:
dim -= 1
return dim, id_
# Handles placement of enumeration dimensions
_ENUM_ALLOCATOR = _EnumAllocator()
class NonlocalExit(Exception):
"""
Exception for exiting nonlocally from poutine execution.
Used by poutine.EscapeMessenger to return site information.
"""
def __init__(self, site, *args, **kwargs):
"""
:param site: message at a pyro site constructor.
Just stores the input site.
"""
super().__init__(*args, **kwargs)
self.site = site
def reset_stack(self):
"""
Reset the state of the frames remaining in the stack.
Necessary for multiple re-executions in poutine.queue.
"""
for frame in reversed(_PYRO_STACK):
frame._reset()
if type(frame).__name__ == "BlockMessenger" and frame.hide_fn(self.site):
break
def default_process_message(msg):
"""
Default method for processing messages in inference.
:param msg: a message to be processed
:returns: None
"""
if msg["done"] or msg["is_observed"] or msg["value"] is not None:
msg["done"] = True
return msg
msg["value"] = msg["fn"](*msg["args"], **msg["kwargs"])
# after fn has been called, update msg to prevent it from being called again.
msg["done"] = True
def apply_stack(initial_msg):
"""
Execute the effect stack at a single site according to the following scheme:
1. For each ``Messenger`` in the stack from bottom to top,
execute ``Messenger._process_message`` with the message;
if the message field "stop" is True, stop;
otherwise, continue
2. Apply default behavior (``default_process_message``) to finish remaining site execution
3. For each ``Messenger`` in the stack from top to bottom,
execute ``_postprocess_message`` to update the message and internal messenger state with the site results
4. If the message field "continuation" is not ``None``, call it with the message
:param dict initial_msg: the starting version of the trace site
:returns: ``None``
"""
stack = _PYRO_STACK
# TODO check at runtime if stack is valid
# msg is used to pass information up and down the stack
msg = initial_msg
pointer = 0
# go until time to stop?
for frame in reversed(stack):
pointer = pointer + 1
frame._process_message(msg)
if msg["stop"]:
break
default_process_message(msg)
for frame in stack[-pointer:]:
frame._postprocess_message(msg)
cont = msg["continuation"]
if cont is not None:
cont(msg)
return None
def am_i_wrapped():
"""
Checks whether the current computation is wrapped in a poutine.
:returns: bool
"""
return len(_PYRO_STACK) > 0
def effectful(fn=None, type=None):
"""
:param fn: function or callable that performs an effectful computation
:param str type: the type label of the operation, e.g. `"sample"`
Wrapper for calling :func:`~pyro.poutine.runtime.apply_stack` to apply any active effects.
"""
if fn is None:
return functools.partial(effectful, type=type)
if getattr(fn, "_is_effectful", None):
return fn
assert type is not None, "must provide a type label for operation {}".format(fn)
assert type != "message", "cannot use 'message' as keyword"
@functools.wraps(fn)
def _fn(*args, **kwargs):
name = kwargs.pop("name", None)
infer = kwargs.pop("infer", {})
value = kwargs.pop("obs", None)
is_observed = value is not None
if not am_i_wrapped():
return fn(*args, **kwargs)
else:
msg = {
"type": type,
"name": name,
"fn": fn,
"is_observed": is_observed,
"args": args,
"kwargs": kwargs,
"value": value,
"scale": 1.0,
"mask": None,
"cond_indep_stack": (),
"done": False,
"stop": False,
"continuation": None,
"infer": infer,
}
# apply the stack and return its return value
apply_stack(msg)
return msg["value"]
_fn._is_effectful = True
return _fn
def _inspect():
"""
EXPERIMENTAL Inspect the Pyro stack.
.. warning:: The format of the returned message may change at any time and
does not guarantee backwards compatibility.
:returns: A message with all effects applied.
:rtype: dict
"""
msg = {
"type": "inspect",
"name": "_pyro_inspect",
"fn": lambda: True,
"is_observed": False,
"args": (),
"kwargs": {},
"value": None,
"infer": {"_do_not_trace": True},
"scale": 1.0,
"mask": None,
"cond_indep_stack": (),
"done": False,
"stop": False,
"continuation": None,
}
apply_stack(msg)
return msg
def get_mask():
"""
Records the effects of enclosing ``poutine.mask`` handlers.
This is useful for avoiding expensive ``pyro.factor()`` computations during
prediction, when the log density need not be computed, e.g.::
def model():
# ...
if poutine.get_mask() is not False:
log_density = my_expensive_computation()
pyro.factor("foo", log_density)
# ...
:returns: The mask.
:rtype: None, bool, or torch.Tensor
"""
return _inspect()["mask"]
| 2.296875 | 2 |
FusionIIIT/applications/hostel_management/models.py | suyash-code/Fusion-op | 0 | 12757745 | import datetime
from django.db import models
from django.contrib.auth.models import User
from applications.globals.models import ExtraInfo, Staff, Faculty
from applications.academic_information.models import Student
from django.utils import timezone
class HostelManagementConstants:
ROOM_STATUS = (
('Booked', 'Booked'),
('CheckedIn', 'Checked In'),
('Available', 'Available'),
('UnderMaintenance', 'Under Maintenance'),
)
DAYS_OF_WEEK = (
(0, 'Monday'),
(1, 'Tuesday'),
(2, 'Wednesday'),
(3, 'Thursday'),
(4, 'Friday'),
(5, 'Saturday'),
(6, 'Sunday')
)
BOOKING_STATUS = (
("Confirmed" , 'Confirmed'),
("Pending" , 'Pending'),
("Rejected" , 'Rejected'),
("Canceled" , 'Canceled'),
("CancelRequested" , 'Cancel Requested'),
("CheckedIn" , 'Checked In'),
("Complete", 'Complete'),
("Forward", 'Forward')
)
class Hall(models.Model):
hall_id = models.CharField(max_length=10)
hall_name = models.CharField(max_length=50)
max_accomodation = models.IntegerField(default=0)
number_students = models.PositiveIntegerField(default=0)
def __str__(self):
return self.hall_id
class HallCaretaker(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
staff = models.ForeignKey(Staff, on_delete=models.CASCADE)
def __str__(self):
return self.hall + self.staff
class HallWarden(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
faculty = models.ForeignKey(Faculty, on_delete=models.CASCADE)
def __str__(self):
return self.hall + self.faculty
class GuestRoomDetail(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
room_no = models.CharField(max_length=4, unique=True)
room_status = models.CharField(max_length=20, choices=HostelManagementConstants.ROOM_STATUS, default='Available')
def __str__(self):
return self.room_no
class GuestRoomBooking(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
intender = models.ForeignKey(ExtraInfo, on_delete=models.CASCADE)
guest_name = models.CharField(max_length=100)
guest_phone = models.CharField(max_length=15)
guest_email = models.CharField(max_length=40, blank=True)
guest_address = models.TextField(blank=True)
rooms_required = models.IntegerField(default=1,null=True,blank=True)
guest_room_id = models.ManyToManyField(GuestRoomDetail)
total_guest = models.IntegerField(default=1)
purpose = models.TextField()
arrival_date = models.DateField(auto_now_add=False, auto_now=False)
arrival_time = models.TimeField(auto_now_add=False, auto_now=False)
departure_date = models.DateField(auto_now_add=False, auto_now=False)
departure_time = models.TimeField(auto_now_add=False, auto_now=False)
status = models.CharField(max_length=15, choices=HostelManagementConstants.BOOKING_STATUS ,default ="Pending")
booking_date = models.DateField(auto_now_add=False, auto_now=False, default=timezone.now)
nationality = models.CharField(max_length=20, blank=True)
def __str__(self):
return '%s ----> %s - %s' % (self.id, self.guest_id, self.status)
class StaffSchedule(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
staff_id = models.ForeignKey(Staff, on_delete=models.ForeignKey)
day = models.IntegerField(choices=HostelManagementConstants.DAYS_OF_WEEK)
start_time = models.TimeField(null=True,blank=True)
end_time = models.TimeField(null=True,blank=True)
def __str__(self):
return str(self.staff_id) + str(self.start_time) + '->' + str(self.end_time)
class HostelNoticeBoard(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
posted_by = models.ForeignKey(ExtraInfo, on_delete=models.ForeignKey)
head_line = models.CharField(max_length=100)
content = models.FileField(upload_to='hostel_management/', blank=True, null=True)
description = models.TextField(blank=True)
def __str__(self):
return self.head_line
class HostelStudentAttendence(models.Model):
hall = models.ForeignKey(Hall, on_delete=models.CASCADE)
student_id = models.ForeignKey(Student, on_delete=models.CASCADE)
date = models.DateField()
present = models.BooleanField()
def __str__(self):
return str(self.student_id) + '->' + str(self.date) + '-' + str(self.present)
| 2 | 2 |
test/test_job.py | harshgits/cluster_submission | 0 | 12757746 | <filename>test/test_job.py
from time import sleep
from random import random
from numpy import savetxt, array
import os, sys
def main():
sleep(15) #sleep for 15 seconds
#saving a random number as the result
if not os.path.isdir(str(__file__) + '_data'): os.makedirs(str(__file__) + '_data')
savetxt(fname = os.path.join(str(__file__) + '_data', str(sys.argv[-1]) + '.csv'), X = array([[random()]]), header = 'result', delimiter = ',', fmt = '%s', comments = '')
if __name__=='__main__':
main() | 2.65625 | 3 |
class_0612/class_homework.py | diaosi168/Test-demo | 0 | 12757747 | <filename>class_0612/class_homework.py
#1:url地址:http://v.juhe.cn/laohuangli/d
# 请求参数:{'date':'2018-09-11','key':'a8f2732319cf0ad3cce8ec6ef7aa4f33'}
#2:请根据上课内容对老黄历编写至少5条用例,上述给出的数据都是正确的请求数据
#3:把用例的数据写到Excel里面,然后请把每一行的数据存到一个子列表里面,所有的行数据都放到一个大的列表里面。
#4:读取数据的的功能,请写成一个类
#from unittest.test import test_case
from openpyxl import load_workbook
class ExcelCase:
def __init__(self,work_book,sheet):
self.work_book = load_workbook(work_book)
self.sheet = self.work_book[sheet]
self.list_2 = []
def read_testcase(self):
for i in range(2,self.sheet.max_row+1):
list_1=[]
for j in range(1,self.sheet.max_column+1):
list_1.append(self.sheet.cell(i,j).value)
self.list_2.append(list_1)
return self.list_2
if __name__ == '__main__':
#work_book=load_workbook('test_case_2.xlsx')
#sheet=work_book['test']
t=ExcelCase('test_case_2.xlsx','test')
print(t.read_testcase())
'''class ExcelCase:
def __init__(self, work_excel, sheet):
self.work_excel = work_excel
self.sheet = sheet
def test_date(self):
wd = load_workbook(self.work_excel)
sheet = wd[self.sheet]
lsit_data = []
for row in range(2, sheet.max_row + 1):
list_1 = []
for i in range(1, sheet.max_column + 1):
list_1.append(sheet.cell(row, i).value)
lsit_data.append(list_1)
return lsit_data
if __name__ == '__main__':
t = ExcelCase('test_case_2.xlsx', 'test')
print(t.test_date())''' | 3.4375 | 3 |
naked/funcs/sklearn/__init__.py | MaxHalford/naked | 26 | 12757748 | <filename>naked/funcs/sklearn/__init__.py
from . import feature_extraction
from . import linear_model
from . import preprocessing
| 1.015625 | 1 |
core/simulation.py | lconaboy/seren3 | 1 | 12757749 | <gh_stars>1-10
import numpy as np
from seren3.core.pymses_snapshot import PymsesSnapshot
def load(name):
import json, os
from seren3 import config
store_dir = config.get("data", "sim_dir")
if (os.path.isdir(store_dir) is False):
raise IOError("Cannot locate simulation store directory.")
else:
fname = "%s/%s.json" % (store_dir, name)
data = None
with open(fname, "rb") as f:
data = json.load(f)
return Simulation(data["path"])
class Simulation(object):
'''
Object to encapsulate a simulation directory and offer snapshot access
'''
def __init__(self, path):
import glob
if len(glob.glob("%s/output_*" % path)) == 0:
raise Exception("No outputs found in %s" % path)
self.path = path
self.store = {}
def __str__(self):
return "Simulation: %s" % self.path
def __repr__(self):
return self.__str__()
def __getitem__(self, ioutput):
return self.snapshot(ioutput)
def __len__(self):
return len(self.numbered_outputs)
def __iter__(self):
for ioutput in self.numbered_outputs:
yield self[ioutput]
def to_JSON(self):
import json
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def save(self, name):
'''
Write to json file
'''
import os
from seren3 import config
store_dir = config.get("data", "sim_dir")
if (os.path.isdir(store_dir) is False):
raise IOError("Cannot locate simulation store directory.")
else:
fname = "%s/%s.json" % (store_dir, name)
if (os.path.isfile(fname)):
raise IOError("Refusing to overwrite file: " % fname)
with open(fname, "w") as f:
f.write(self.to_JSON())
def snapshot(self, ioutput, **kwargs):
return PymsesSnapshot(self.path, ioutput, **kwargs)
def redshift(self, z):
'''
Return ioutput of snapshot closest to this redshift
'''
idx = (np.abs(self.redshifts - z)).argmin()
outputs = self.outputs
iout = int(outputs[idx][-5:])
return iout
@property
def redshifts(self):
'''
Returns a list of available redshifts
'''
redshifts = []
for iout in self.numbered_outputs:
info = "%s/output_%05i/info_%05i.txt" % (self.path, iout, iout)
f = open(info, 'r')
nline = 1
while nline <= 10:
line = f.readline()
if(nline == 10):
aexp = np.float32(line.split("=")[1])
nline += 1
redshift = 1.0 / aexp - 1.0
redshifts.append(float(redshift))
return np.array(redshifts)
@property
def numbered_outputs(self):
import re
outputs = self.outputs
numbered = np.zeros(len(outputs))
for i in range(len(outputs)):
result = re.findall(r'\d+', outputs[i])[0]
ioutput = int(result)
numbered[i] = ioutput
return np.array(sorted(numbered), dtype=np.int32)
@property
def outputs(self):
import glob
from seren3.utils import string_utils
outputs = glob.glob("%s/output_*" % self.path)
outputs.sort(key=string_utils.natural_keys)
result = []
for o in outputs:
if '/' in o:
result.append( o.split('/')[-1] )
else:
result.append(o)
return result
@property
def age(self):
'''
Returns age of simulation at last snapshot
'''
last_iout = self.numbered_outputs[-1]
return self[last_iout].age
@property
def initial_redshift(self):
'''
Returns redshift of first output
'''
ifirst = self.numbered_outputs[0]
info = '%s/output_%05i/info_%05i.txt' % (self.path, ifirst, ifirst)
with open(info, 'r') as f:
nline = 1
while nline <= 10:
line = f.readline()
if(nline == 10):
aexp = np.float32(line.split("=")[1])
nline += 1
return (1. / aexp) - 1.
def redshift_func(self, zmax=20.0, zmin=0.0, zstep=0.001):
'''
Returns an interpolation function that gives redshift as a function
of age
'''
import cosmolopy.distance as cd
init_z = self.initial_redshift
cosmo = self[self.redshift(init_z)].cosmo
del cosmo['z'], cosmo['aexp']
func = cd.quick_redshift_age_function(zmax, zmin, zstep, **cosmo)
return lambda age: func(age.in_units("s"))
def age_func(self, zmax=20.0, zmin=0.0, zstep=0.001, return_inverse=False):
'''
Returns an interpolation function that gives age as a function
of redshift
'''
import cosmolopy.distance as cd
from seren3.array import SimArray
init_z = self.initial_redshift
cosmo = self[self.redshift(init_z)].cosmo
del cosmo['z'], cosmo['aexp']
func = cd.quick_age_function(zmax, zmin, zstep, return_inverse, **cosmo)
return lambda z: SimArray(func(z), "s").in_units("Gyr")
def z_reion(self, thresh=0.999, return_vw_z=False):
'''
Return the redshift of reionization, if the xHII_reion_history.p
table exists
'''
import os
if os.path.isfile("%s/xHII_reion_history.p" % self.path):
from seren3.scripts.mpi import reion_history
from seren3.utils import first_above
table = reion_history.load_xHII_table(self.path)
vw = np.zeros(len(table))
z = np.zeros(len(table))
for i in range(len(table)):
vw[i] = table[i+1]["volume_weighted"]
z[i] = table[i+1]["z"]
eor_idx = first_above(thresh, vw)
if return_vw_z:
return z[eor_idx], z, vw
return z[eor_idx]
def write_rockstar_info(self, out_path=None):
'''
If a rockstar directory exists, writes a rockstar_info.txt file
with out_list numbers against aexp
'''
import os
from seren3 import config
rockstar_base = config.get("halo", "rockstar_base")
if out_path is None: out_path = "%s/%s/" % (self.path, rockstar_base)
if os.path.isdir("%s/" % (out_path)):
import glob
info = {}
files = glob.glob("%s/out_*.list" % (out_path))
for fname in files:
out_num = int( filter(str.isdigit, fname.replace(rockstar_base, '')) )
with open(fname, "r") as f:
while True:
line = f.readline()
if line.startswith('#a'):
spl = line.split('=')
aexp = float(spl[1])
info[out_num] = aexp
break
# Write to file
keys = sorted(info.keys())
with open("%s/info_rockstar.txt" % (out_path), "w") as f:
for i in keys:
line = "%i\t%f\n" % (i, info[i])
f.write(line)
return info
else:
raise IOError("Could not locate Rockstar directory")
| 2.390625 | 2 |
src/preprocess/preprocess_midi.py | symphonynet/SymphonyNet | 0 | 12757750 | from pprint import pprint
import numpy as np
from collections import Counter
import itertools, copy
from more_itertools import split_before
import os, json, traceback, time, warnings, shutil, sys
import multiprocessing
from miditoolkit.midi.parser import MidiFile
from miditoolkit.midi.containers import Instrument
from miditoolkit.midi.containers import Note as mtkNote
from chorder import Dechorder
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from encoding import pit2str, pos2str, bom2str, dur2str, trk2str, ins2str, pit2alphabet
WORKERS=32
def measure_calc_chord(evt_seq):
assert evt_seq[0][1] == 'BOM', "wrong measure for chord"
bom_tick = evt_seq[0][0]
ts = min(evt_seq[0][-1], 8)
chroma = Counter()
mtknotes = []
for evt in evt_seq[1:-1]:
# if evt[1] != 'ON':
# print(evt)
# print(evt_seq[-1])
assert evt[1] == 'ON', "wrong measure for chord: " + evt[1] + evt_seq[-1][1]
if evt[3] == 128: # exclude drums
continue
o, p, d = evt[0] - bom_tick, evt[2], evt[-1]
if p < 21 or p > 108: # exclude unusual pitch
continue
if o < 8:
note = mtkNote(60, p, o, o+d if o > 0 else 8)
mtknotes.append(note)
else:
break
# if o == 0:
# d = d * 10
# if ts == 32 and o == 16:
# d = d * 3
# chroma[p%12] += d
chord, score = Dechorder.get_chord_quality(mtknotes, start=0, end=ts)
if score < 0:
return [bom_tick, 'CHR', None, None, None, None, 'NA']
return [bom_tick, 'CHR', None, None, None, None, pit2alphabet[chord.root_pc] + (chord.quality if chord.quality != '7' else 'D7')]
# intervals = [
# [4,3],
# [3,4],
# [4,4],
# [3,3]
# ]
# scores = []
# for rid, root in enumerate(pit2alphabet):
# for qid, quality in enumerate(['M', 'm', 'A', 'd']): # Major, minor, augment, diminish
# root_pitch = rid
# third_pitch = (rid + intervals[qid][0]) % 12
# fifth_pitch = (third_pitch + intervals[qid][1]) % 12
# a, b = (fifth_pitch+3)%12, (fifth_pitch+4)%12
# score = chroma[root_pitch] * 3 + chroma[third_pitch] * 2.5 + chroma[fifth_pitch] * 2 + ((chroma[a]+chroma[b])/2.0 if qid < 2 else 0)
# if qid < 2:
# score = score * 1.3
# scores.append((root+quality, score))
# scores.sort(key=lambda x: (-x[1], pit2alphabet.index(x[0][0])))
# #print(chroma)
# #print(scores)
# for k, _ in itertools.groupby(scores, key=lambda x:x[1]):
# #print(k)
# candidates = []
# for x in _:
# candidates.append(x)
# len_c = sum(1 for x in candidates)
# if len_c > 2:
# return [bom_tick, 'CHR', None, None, None, None, 'NA']
# if len_c == 2 and candidates[0][0][0] == candidates[1][0][0]:
# return [bom_tick, 'CHR', None, None, None, None, candidates[0][0][0] + candidates[1][0][0]]
# return [bom_tick, 'CHR', None, None, None, None,''.join([x[0] for x in candidates])]
# for item in candidates:
# print(item)
# #print(candidates)
# break
#assert False, "program end"
def merge_drums(p_midi): # merge all percussions
drum_0_lst = []
new_instruments= []
for instrument in p_midi.instruments:
if not len(instrument.notes) == 0:
# --------------------
if instrument.is_drum:
for note in instrument.notes:
drum_0_lst.append(note)
else:
new_instruments.append(instrument)
if len(drum_0_lst) > 0:
drum_0_lst.sort(key=lambda x: x.start)
# remove duplicate
drum_0_lst = list(k for k, _ in itertools.groupby(drum_0_lst))
drum_0_instrument = Instrument(program=0, is_drum=True, name="drum")
drum_0_instrument.notes = drum_0_lst
new_instruments.append(drum_0_instrument)
p_midi.instruments = new_instruments
def merge_sparse_track(p_midi, CANDI_THRES=50, MIN_THRES=5): # merge track has too less notes
good_instruments = []
bad_instruments = []
good_instruments_idx = []
for instrument in p_midi.instruments:
if len(instrument.notes) < CANDI_THRES:
bad_instruments.append(instrument)
else:
good_instruments.append(instrument)
good_instruments_idx.append((instrument.program, instrument.is_drum))
for bad_instrument in bad_instruments:
if (bad_instrument.program, bad_instrument.is_drum) in good_instruments_idx:
# find one track to merge
for instrument in good_instruments:
if bad_instrument.program == instrument.program and \
bad_instrument.is_drum == instrument.is_drum:
instrument.notes.extend(bad_instrument.notes)
break
# no track to merge
else:
if len(bad_instrument.notes) > MIN_THRES:
good_instruments.append(bad_instrument)
p_midi.instruments = good_instruments
def limit_max_track(p_midi, MAX_TRACK=40): # merge track with least notes and limit the maximum amount of track to 40
good_instruments = p_midi.instruments
good_instruments.sort(key=lambda x: (not x.is_drum, -len(x.notes))) # place drum track or the most note track at first
assert good_instruments[0].is_drum == True or len(good_instruments[0].notes) >= len(good_instruments[1].notes), tuple(len(x.notes) for x in good_instruments[:3])
#assert good_instruments[0].is_drum == False, (, len(good_instruments[2]))
track_idx_lst = list(range(len(good_instruments)))
if len(good_instruments) > MAX_TRACK:
new_good_instruments = copy.deepcopy(good_instruments[:MAX_TRACK])
#print(midi_file_path)
for id in track_idx_lst[MAX_TRACK:]:
cur_ins = good_instruments[id]
merged = False
new_good_instruments.sort(key=lambda x: len(x.notes))
for nid, ins in enumerate(new_good_instruments):
if cur_ins.program == ins.program and cur_ins.is_drum == ins.is_drum:
new_good_instruments[nid].notes.extend(cur_ins.notes)
merged = True
break
if not merged:
pass#print('Track {:d} deprecated, program {:d}, note count {:d}'.format(id, cur_ins.program, len(cur_ins.notes)))
good_instruments = new_good_instruments
#print(trks, probs, chosen)
assert len(good_instruments) <= MAX_TRACK, len(good_instruments)
for idx, good_instrument in enumerate(good_instruments):
if good_instrument.is_drum:
good_instruments[idx].program = 128
good_instruments[idx].is_drum = False
# for i, note in enumerate(good_instruments.notes):
# good_instruments.notes[i].pitch += 128
p_midi.instruments = good_instruments
def get_init_note_events(p_midi): # extract all notes in midi file
note_events, note_on_ticks, note_dur_lst = [], [], []
for track_idx, instrument in enumerate(p_midi.instruments):
#track_idx_lst.append(track_idx)
for note in instrument.notes:
note_dur = note.end - note.start
# special case: note_dur too long
max_dur = 4 * p_midi.ticks_per_beat
if note_dur / max_dur > 1:
total_dur = note_dur
start = note.start
while total_dur != 0:
if total_dur > max_dur:
note_events.extend([[start, "ON", note.pitch, instrument.program,
instrument.is_drum, track_idx, max_dur]])
note_on_ticks.append(start)
note_dur_lst.append(max_dur)
start += max_dur
total_dur -= max_dur
else:
note_events.extend([[start, "ON", note.pitch, instrument.program,
instrument.is_drum, track_idx, total_dur]])
note_on_ticks.append(start)
note_dur_lst.append(total_dur)
total_dur = 0
else:
note_events.extend([[note.start, "ON", note.pitch, instrument.program, instrument.is_drum, track_idx, note_dur]])
# for score analysis and beat estimating when score has no time signature
note_on_ticks.append(note.start)
note_dur_lst.append(note.end - note.start)
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[5], x[4], x[3], x[2], x[-1]))
note_events = list(k for k, _ in itertools.groupby(note_events))
return note_events, note_on_ticks, note_dur_lst
def calculate_measure(p_midi, first_event_tick, last_event_tick): # calculate measures and append measure symbol to event_seq
measure_events = []
time_signature_changes = p_midi.time_signature_changes
if not time_signature_changes: # no time_signature_changes, estimate it
raise AssertionError("No time_signature_changes")
else:
if time_signature_changes[0].time != 0 and \
time_signature_changes[0].time > first_event_tick:
raise AssertionError("First time signature start with None zero tick")
# clean duplicate time_signature_changes
temp_sig = []
for idx, time_sig in enumerate(time_signature_changes):
if idx == 0:
temp_sig.append(time_sig)
else:
previous_timg_sig = time_signature_changes[idx - 1]
if not (previous_timg_sig.numerator == time_sig.numerator
and previous_timg_sig.denominator == time_sig.denominator):
temp_sig.append(time_sig)
time_signature_changes = temp_sig
# print("time_signature_changes", time_signature_changes)
for idx in range(len(time_signature_changes)):
# calculate measures, eg: how many ticks per measure
numerator = time_signature_changes[idx].numerator
denominator = time_signature_changes[idx].denominator
ticks_per_measure = p_midi.ticks_per_beat * (4 / denominator) * numerator
cur_tick = time_signature_changes[idx].time
if idx < len(time_signature_changes) - 1:
next_tick = time_signature_changes[idx + 1].time
else:
next_tick = last_event_tick + int(ticks_per_measure)
if ticks_per_measure.is_integer():
for measure_start_tick in range(cur_tick, next_tick, int(ticks_per_measure)):
if measure_start_tick + int(ticks_per_measure) > next_tick:
measure_events.append([measure_start_tick, "BOM", None, None, None, None, 0])
measure_events.append([next_tick, "EOM", None, None, None, None, 0])
else:
measure_events.append([measure_start_tick, "BOM", None, None, None, None, 0])
measure_events.append([measure_start_tick + int(ticks_per_measure), "EOM", None, None, None, None, 0])
else:
assert False, "ticks_per_measure Error"
return measure_events
def quantize_by_nth(nth_tick, note_events):
# Eg. Quantize by 32th note
half = nth_tick / 2
split_score = list(split_before(note_events, lambda x: x[1] == "BOM"))
measure_durs = []
eom_tick = 0
for measure_id, measure in enumerate(split_score):
bom_tick = measure[0][0]
assert bom_tick == eom_tick, 'measure time error {bom_tick} {eom_tick}'
eom_tick = measure[-1][0]
mea_dur = eom_tick - bom_tick
if mea_dur < nth_tick: # measure duration need to be quantized
measure_durs.append(1)
else:
if mea_dur % nth_tick < half: # quantize to left
measure_durs.append(mea_dur // nth_tick)
else:
measure_durs.append(mea_dur // nth_tick + 1)
for evt in measure[1:-1]:
assert evt[1] == 'ON', f'measure structure error {evt[1]}'
rel_tick = evt[0] - bom_tick
if rel_tick % nth_tick <= half:
rel_tick = min(rel_tick // nth_tick, measure_durs[-1] - 1)
else:
rel_tick = min(rel_tick // nth_tick + 1, measure_durs[-1] - 1)
evt[0] = rel_tick
final_events = []
lasteom = 0
for measure_id, measure in enumerate(split_score):
measure[0][0] = lasteom
measure[-1][0] = measure[0][0] + measure_durs[measure_id]
lasteom = measure[-1][0]
for event in measure[1:-1]:
event[0] += measure[0][0]
if event[-1] < nth_tick: # duration too short, quantize to 1
event[-1] = 1
else:
if event[-1] % nth_tick <= half:
event[-1] = event[-1] // nth_tick
else:
event[-1] = event[-1] // nth_tick + 1
final_events.extend(measure)
return final_events
def prettify(note_events, ticks_per_beat):
fist_event_idx = next(i for i in (range(len(note_events))) if note_events[i][1] == "ON")
last_event_idx = next(i for i in reversed(range(len(note_events))) if note_events[i][1] == "ON")
assert note_events[fist_event_idx - 1][1] == "BOM", "measure_start Error"
assert note_events[last_event_idx + 1][1] == "EOM", "measure_end Error"
# remove invalid measures on both sides
note_events = note_events[fist_event_idx - 1: last_event_idx + 2]
# check again
assert note_events[0][1] == "BOM", "measure_start Error"
assert note_events[-1][1] == "EOM", "measure_end Error"
# -------------- zero start tick -----------------
start_tick = note_events[0][0]
if start_tick != 0:
for event in note_events:
event[0] -= start_tick
from fractions import Fraction
ticks_32th = Fraction(ticks_per_beat, 8)
note_events = quantize_by_nth(ticks_32th, note_events)
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "BOM", x[1] == "EOM",
x[5], x[4], x[3], x[2], x[-1]))
note_events = list(k for k, _ in itertools.groupby(note_events))
# -------------------------check measure duration----------------------------------------------
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "BOM", x[1] == "EOM",
x[5], x[4], x[3], x[2], x[-1]))
split_score = list(split_before(note_events, lambda x: x[1] == "BOM"))
check_measure_dur = [0]
for measure_idx, measure in enumerate(split_score):
first_tick = measure[0][0]
last_tick = measure[-1][0]
measure_dur = last_tick - first_tick
if measure_dur > 100:
raise AssertionError("Measure duration error")
split_score[measure_idx][0][-1] = measure_dur
if measure_dur in check_measure_dur:
#print(measure_dur)
raise AssertionError("Measure duration error")
return split_score
def get_pos_and_cc(split_score):
new_event_seq = []
for measure_idx, measure in enumerate(split_score):
measure.sort(key=lambda x: (x[1] == "EOM", x[1] == "ON", x[1] == 'CHR', x[1] == "BOM", x[-2]))
bom_tick = measure[0][0]
# split measure by track
track_nmb = set(map(lambda x: x[-2], measure[2:-1]))
tracks = [[y for y in measure if y[-2] == x] for x in track_nmb]
# ---------- calculate POS for each track / add CC
new_measure = []
for track_idx, track in enumerate(tracks):
pos_lst = []
trk_abs_num = -1
for event in track:
if event[1] == "ON":
assert trk_abs_num == -1 or trk_abs_num == event[-2], "Error: found inconsistent trackid within same track"
trk_abs_num = event[-2]
mypos = event[0] - bom_tick
pos_lst.append(mypos)
pos_lst = list(set(pos_lst))
for pos in pos_lst:
tracks[track_idx].append([pos + bom_tick, "POS", None, None, None, None, pos])
tracks[track_idx].insert(0, [bom_tick, "CC", None, None, None, None, trk_abs_num])
tracks[track_idx].sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "POS", x[1] == "CC", x[5], x[4], x[3], x[2]))
new_measure.append(measure[0])
new_measure.append(measure[1])
for track in tracks:
for idx, event in enumerate(track):
new_measure.append(event)
# new_measure = new_measure[:-1]
#new_measure.append(measure[-1])
new_event_seq.extend(new_measure)
return new_event_seq
def event_seq_to_str(new_event_seq):
char_events = []
chord_cnt = Counter()
for evt in new_event_seq:
if evt[1] == 'ON':
char_events.append(pit2str(evt[2])) # pitch
char_events.append(dur2str(evt[-1])) # duration
char_events.append(trk2str(evt[-2])) # track
char_events.append(ins2str(evt[3])) # instrument
#dur = evt[-1]
elif evt[1] == 'POS':
char_events.append(pos2str(evt[-1])) # type (time position)
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
#onset = evt[-1]
elif evt[1] == 'BOM':
char_events.append(bom2str(evt[-1]))
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
#mea_len = evt[-1]
elif evt[1] == 'CC':
char_events.append('NT')
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
elif evt[1] == 'CHR':
#print(evt[-1])
chord_cnt[evt[-1]] += 1
char_events.append('H'+evt[-1])
char_events.append('RZ')
char_events.append('TZ')
char_events.append('YZ')
else:
assert False, ("evt type error", evt[1])
return char_events, chord_cnt
# abs_pos type pitch program is_drum track_id duration/rela_pos
def midi_to_event_seq_str(midi_file_path, readonly=False):
p_midi = MidiFile(midi_file_path)
for ins in p_midi.instruments:
ins.remove_invalid_notes(verbose=False)
merge_drums(p_midi)
if not readonly:
merge_sparse_track(p_midi)
limit_max_track(p_midi)
note_events, note_on_ticks, _ = get_init_note_events(p_midi)
measure_events = calculate_measure(p_midi, min(note_on_ticks), max(note_on_ticks))
note_events.extend(measure_events)
note_events.sort(key=lambda x: (x[0], x[1] == "ON", x[1] == "BOM", x[1] == "EOM",
x[5], x[4], x[3], x[2]))
split_score = prettify(note_events, p_midi.ticks_per_beat)
for measure_idx, measure in enumerate(split_score): # calculate chord for every measure
chord_evt = measure_calc_chord(measure)
split_score[measure_idx].insert(1, chord_evt)
new_event_seq = get_pos_and_cc(split_score)
# new_event_seq[0:0] = [[0, "BOS", None, None, None, None, 0]]
# new_event_seq.append([new_event_seq[-1][0], "EOS", None, None, None, None, 0])
char_events, chord_cnt = event_seq_to_str(new_event_seq)
return char_events, chord_cnt
def mp_worker(file_path):
try:
event_seq = midi_to_event_seq_str(file_path)
return event_seq
except (OSError, EOFError, ValueError, KeyError) as e:
print(file_path)
traceback.print_exc(limit=0)
print()
# shutil.move(file_path, "/ai/fzc/fzc/Dataset_original_file/pop/invalid/")
return "errortouch"
except AssertionError as e:
if str(e) == "No time_signature_changes":
print("Moving no time sig to folder..........")
# shutil.move(file_path, "/ai/fzc/fzc/Dataset_original_file/midi_mono/notimesig/")
return "error"
elif str(e) == "Measure duration error":
#print("Measure duration error", file_path)
# shutil.move(file_path, "/Users/bytedance/Desktop/measure_error")
return "error"
# elif str(e) == "Track length error":
# print("-----", file_path)
# shutil.move(file_path, "/ai/fzc/fzc/Dataset_original_file/pop/too_many_tracks/")
# return "error"
else:
print("Other Assertion Error", str(e), file_path)
return "error"
except Exception as e:
print(file_path)
traceback.print_exc(limit=0)
print()
return "error"
def mp_handler(file_paths):
start = time.time()
broken_counter = 0
good_counter = 0
event_seq_res = []
chord_cnter = Counter()
print(f'starts processing midis with {WORKERS} processes')
with multiprocessing.Pool(WORKERS) as p:
for ret in p.imap(mp_worker, file_paths):
if isinstance(ret, str) and ret == "error":
broken_counter += 1
# (filename, count) tuples from worker
else:
try:
event_seq, chord_cnt = ret
except:
print(ret)
if len(event_seq) > 0:
event_seq_res.append(event_seq)
chord_cnter += chord_cnt
good_counter += 1
print("Process data takes: ", time.time() - start)
print(good_counter, broken_counter)
pprint(chord_cnter)
# drum_program_cnter = sorted(list(drum_program_cnter.items()))
# for i in range(1, len(drum_program_cnter)):
# tmp = drum_program_cnter[i]
# tmp = (tmp[0], tmp[1] + drum_program_cnter[i-1][1])
# drum_program_cnter[i] = tmp
# for k, v in drum_program_cnter:
# print("{:d} {:.3f}".format(k, v * 1. / good_counter))
# ----------------------------------------------------------------------------------
txt_start = time.time()
if not os.path.exists('../../data/preprocessed/'):
os.makedirs('../../data/preprocessed/')
with open("../../data/preprocessed/linear_4096_chord.txt", "w", encoding="utf-8") as f:
for idx, piece in enumerate(event_seq_res):
f.write(' '.join(piece) + '\n')
print("Create txt file takes: ", time.time() - txt_start)
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
# test_file = "/ai/fzc/fzc/Dataset_original_file/pop/国内流行_周杰伦_周杰伦-夜的第七章.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/kunstderfuge_nolive/anonymous_anonymous_duet_1_(c)icking-archive.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/unzip/kunstderfuge_unzip/rachmaninov/rachmaninov_concerto_2_1_(c)galimberti.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/unzip/kunstderfuge_unzip/rachmaninov/rachmaninov_concerto_2_2_(c)galimberti.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/pop/国内流行_吉他音乐_King_Henry_VIII_Fantazy_a_3.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/pop/港台歌曲_邓丽君_無言的結局.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/midi_mono/NLB133686_01.mid"
# test_file = "/ai/fzc/fzc/Dataset_original_file/melody/我是一只小小鸟.mid"
# test_file = "../../full_data/mozart_die_zauber_floete_620_21a_(c)icking-archive.mid"
# # test_file = "/Users/bytedance/Desktop/selected_symphony/rachmaninov_concerto_2_1_(c)galimberti.mid"
# #print('starts')
# evnt_seq, _ = midi_to_event_seq_str(test_file, read_only=True)
# # for trk in evnt_seq:
# # print(trk)
# #print(' '.join(evnt_seq))
# exit()
# # for evt in evnt_seq:
# if evt[1] == 'POS':
#print(evnt_seq[1])
# for trk in evnt_seq:
# print(len(trk.split()))
#print(len(event_seq[1]))
warnings.filterwarnings('ignore')
# kunstderfuge_folder_path = "/ai/fzc/fzc/Dataset_original_file/kunstderfuge_nolive"
pop_folder_path = "../../data/midis"
file_paths = []
for path, directories, files in os.walk(pop_folder_path):
if path.split("/")[-1] != "invalid" \
and path.split("/")[-1] != "notimesig" \
and path.split("/")[-1] != "too_many_tracks" \
and path.split("/")[-1] != "measure_error" \
and path.split("/")[-1] != "delete":
for file in files:
passed = True
try:
num = int(file.replace('.mid', ''))
except:
passed = True
if not passed:
continue
#print(file)
if file.endswith(".mid") or file.endswith(".MID"):
file_path = path + "/" + file
file_paths.append(file_path)
# run multi-processing midi extractor
mp_handler(file_paths[:]) | 2.125 | 2 |