hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75c116e836176120a22f793aeda4f075ef0d22bf
| 43,864
|
py
|
Python
|
neurokit2/ecg/ecg_delineate.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | null | null | null |
neurokit2/ecg/ecg_delineate.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | null | null | null |
neurokit2/ecg/ecg_delineate.py
|
danibene/NeuroKit
|
df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.signal
from ..epochs import epochs_create, epochs_to_df
from ..signal import (
signal_findpeaks,
signal_formatpeaks,
signal_rate,
signal_resample,
signal_smooth,
signal_zerocrossings,
)
from ..stats import standardize
from .ecg_peaks import ecg_peaks
from .ecg_segment import ecg_segment
def ecg_delineate(
ecg_cleaned,
rpeaks=None,
sampling_rate=1000,
method="dwt",
show=False,
show_type="peaks",
check=False,
**kwargs
):
"""**Delineate QRS complex**
Function to delineate the QRS complex, i.e., the different waves of the cardiac cycles. A
typical ECG heartbeat consists of a P wave, a QRS complex and a T wave. The P wave represents
the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex
reflects the rapid depolarization of the right and left ventricles. Since the ventricles are
the largest part of the heart, in terms of mass, the QRS complex usually has a much larger
amplitude than the P-wave. The T wave represents the ventricular repolarization of the
ventricles.On rare occasions, a U wave can be seen following the T wave. The U wave is believed
to be related to the last remnants of ventricular repolarization.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by ``ecg_clean()``.
rpeaks : Union[list, np.array, pd.Series]
The samples at which R-peaks occur. Accessible with the key "ECG_R_Peaks" in the info
dictionary returned by ``ecg_findpeaks()``.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
method : str
Can be one of ``"peak"`` for a peak-based method, ``"cwt"`` for continuous wavelet transform
or ``"dwt"`` (default) for discrete wavelet transform.
show : bool
If ``True``, will return a plot to visualizing the delineated waves information.
show_type: str
The type of delineated waves information showed in the plot.
Can be ``"peaks"``, ``"bounds_R"``, ``"bounds_T"``, ``"bounds_P"`` or ``"all"``.
check : bool
Defaults to ``False``. If ``True``, replaces the delineated features with ``np.nan`` if its
standardized distance from R-peaks is more than 3.
**kwargs
Other optional arguments.
Returns
-------
waves : dict
A dictionary containing additional information.
For derivative method, the dictionary contains the samples at which P-peaks, Q-peaks,
S-peaks, T-peaks, P-onsets and T-offsets occur, accessible with the keys ``"ECG_P_Peaks"``,
``"ECG_Q_Peaks"``, ``"ECG_S_Peaks"``, ``"ECG_T_Peaks"``, ``"ECG_P_Onsets"``,
``"ECG_T_Offsets"``, respectively.
For wavelet methods, in addition to the above information, the dictionary contains the
samples at which QRS-onsets and QRS-offsets occur, accessible with the key
``"ECG_P_Peaks"``, ``"ECG_T_Peaks"``, ``"ECG_P_Onsets"``, ``"ECG_P_Offsets"``,
``"ECG_Q_Peaks"``, ``"ECG_S_Peaks"``, ``"ECG_T_Onsets"``, ``"ECG_T_Offsets"``,
``"ECG_R_Onsets"``, ``"ECG_R_Offsets"``, respectively.
signals : DataFrame
A DataFrame of same length as the input signal in which occurrences of
peaks, onsets and offsets marked as "1" in a list of zeros.
See Also
--------
ecg_clean, .signal_fixpeaks, ecg_peaks, .signal_rate, ecg_process, ecg_plot
Examples
--------
* Step 1. Delineate
.. ipython:: python
import neurokit2 as nk
# Simulate ECG signal
ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
# Get R-peaks location
_, rpeaks = nk.ecg_peaks(ecg, sampling_rate=1000)
# Delineate cardiac cycle
signals, waves = nk.ecg_delineate(ecg, rpeaks, sampling_rate=1000)
* Step 2. Plot P-Peaks and T-Peaks
.. ipython:: python
@savefig p_ecg_delineate1.png scale=100%
nk.events_plot([waves["ECG_P_Peaks"], waves["ECG_T_Peaks"]], ecg)
@suppress
plt.close()
References
--------------
- Martínez, J. P., Almeida, R., Olmos, S., Rocha, A. P., & Laguna, P. (2004). A wavelet-based
ECG delineator: evaluation on standard databases. IEEE Transactions on biomedical engineering,
51(4), 570-581.
"""
# Sanitize input for ecg_cleaned
if isinstance(ecg_cleaned, pd.DataFrame):
cols = [col for col in ecg_cleaned.columns if "ECG_Clean" in col]
if cols:
ecg_cleaned = ecg_cleaned[cols[0]].values
else:
raise ValueError(
"NeuroKit error: ecg_delineate(): Wrong input, we couldn't extract"
"cleaned signal."
)
elif isinstance(ecg_cleaned, dict):
for i in ecg_cleaned:
cols = [col for col in ecg_cleaned[i].columns if "ECG_Clean" in col]
if cols:
signals = epochs_to_df(ecg_cleaned)
ecg_cleaned = signals[cols[0]].values
else:
raise ValueError(
"NeuroKit error: ecg_delineate(): Wrong input, we couldn't extract"
"cleaned signal."
)
# Sanitize input for rpeaks
if rpeaks is None:
_, rpeaks = ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate)
rpeaks = rpeaks["ECG_R_Peaks"]
if isinstance(rpeaks, dict):
rpeaks = rpeaks["ECG_R_Peaks"]
method = method.lower() # remove capitalised letters
if method in ["peak", "peaks", "derivative", "gradient"]:
waves = _ecg_delineator_peak(ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate)
elif method in ["cwt", "continuous wavelet transform"]:
waves = _ecg_delineator_cwt(ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate)
elif method in ["dwt", "discrete wavelet transform"]:
waves = _dwt_ecg_delineator(ecg_cleaned, rpeaks, sampling_rate=sampling_rate)
else:
raise ValueError(
"NeuroKit error: ecg_delineate(): 'method' should be one of 'peak'," "'cwt' or 'dwt'."
)
# Remove NaN in Peaks, Onsets, and Offsets
waves_noNA = waves.copy()
for feature in waves_noNA.keys():
waves_noNA[feature] = [int(x) for x in waves_noNA[feature] if ~np.isnan(x) and x > 0]
instant_peaks = signal_formatpeaks(waves_noNA, desired_length=len(ecg_cleaned))
signals = instant_peaks
waves_sanitized = {}
for feature, values in waves.items():
waves_sanitized[feature] = [x for x in values if x > 0 or x is np.nan]
if show is True:
_ecg_delineate_plot(
ecg_cleaned,
rpeaks=rpeaks,
signals=signals,
signal_features_type=show_type,
sampling_rate=sampling_rate,
**kwargs
)
if check is True:
waves_sanitized = _ecg_delineate_check(waves_sanitized, rpeaks)
return signals, waves_sanitized
# =============================================================================
# WAVELET METHOD (DWT)
# =============================================================================
def _dwt_resample_points(peaks, sampling_rate, desired_sampling_rate):
"""Resample given points to a different sampling rate."""
if isinstance(peaks, np.ndarray): # peaks are passed in from previous processing steps
# Prevent overflow by converting to np.int64 (peaks might be passed in containing np.int32).
peaks = peaks.astype(dtype=np.int64)
elif isinstance(peaks, list): # peaks returned from internal functions
# Cannot be converted to int since list might contain np.nan. Automatically cast to np.float64 if list contains np.nan.
peaks = np.array(peaks)
peaks_resample = peaks * desired_sampling_rate / sampling_rate
peaks_resample = [np.nan if np.isnan(x) else int(x) for x in peaks_resample.tolist()]
return peaks_resample
def _dwt_ecg_delineator(ecg, rpeaks, sampling_rate, analysis_sampling_rate=2000):
"""Delinate ecg signal using discrete wavelet transforms.
Parameters
----------
ecg : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by `ecg_clean()`.
rpeaks : Union[list, np.array, pd.Series]
The samples at which R-peaks occur. Accessible with the key "ECG_R_Peaks" in the info dictionary
returned by `ecg_findpeaks()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
analysis_sampling_rate : int
The sampling frequency for analysis (in Hz, i.e., samples/second).
Returns
--------
dict
Dictionary of the points.
"""
# No dwt defined method for Q and S peak
# Adopting manual method from "peak" method
qpeaks = []
speaks = []
heartbeats = ecg_segment(ecg, rpeaks, sampling_rate=sampling_rate)
for i, rpeak in enumerate(rpeaks):
heartbeat = heartbeats[str(i + 1)]
# Get index of R peaks
R = heartbeat.index.get_loc(np.min(heartbeat.index.values[heartbeat.index.values > 0]))
# Q wave
Q_index, Q = _ecg_delineator_peak_Q(rpeak, heartbeat, R)
qpeaks.append(Q_index)
# S wave
S_index, S = _ecg_delineator_peak_S(rpeak, heartbeat)
speaks.append(S_index)
# dwt to delineate tp waves, onsets, offsets and qrs ontsets and offsets
ecg = signal_resample(
ecg, sampling_rate=sampling_rate, desired_sampling_rate=analysis_sampling_rate
)
dwtmatr = _dwt_compute_multiscales(ecg, 9)
# # only for debugging
# for idx in [0, 1, 2, 3]:
# plt.plot(dwtmatr[idx + 3], label=f'W[{idx}]')
# plt.plot(ecg, '--')
# plt.legend()
# plt.grid(True)
# plt.show()
rpeaks_resampled = _dwt_resample_points(rpeaks, sampling_rate, analysis_sampling_rate)
tpeaks, ppeaks = _dwt_delineate_tp_peaks(
ecg, rpeaks_resampled, dwtmatr, sampling_rate=analysis_sampling_rate
)
qrs_onsets, qrs_offsets = _dwt_delineate_qrs_bounds(
rpeaks_resampled, dwtmatr, ppeaks, tpeaks, sampling_rate=analysis_sampling_rate
)
ponsets, poffsets = _dwt_delineate_tp_onsets_offsets(
ppeaks, rpeaks_resampled, dwtmatr, sampling_rate=analysis_sampling_rate
)
tonsets, toffsets = _dwt_delineate_tp_onsets_offsets(
tpeaks,
rpeaks_resampled,
dwtmatr,
sampling_rate=analysis_sampling_rate,
onset_weight=0.6,
duration_onset=0.6,
)
return dict(
ECG_P_Peaks=_dwt_resample_points(
ppeaks, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_P_Onsets=_dwt_resample_points(
ponsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_P_Offsets=_dwt_resample_points(
poffsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_Q_Peaks=qpeaks,
ECG_R_Onsets=_dwt_resample_points(
qrs_onsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_R_Offsets=_dwt_resample_points(
qrs_offsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_S_Peaks=speaks,
ECG_T_Peaks=_dwt_resample_points(
tpeaks, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_T_Onsets=_dwt_resample_points(
tonsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_T_Offsets=_dwt_resample_points(
toffsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
)
def _dwt_adjust_parameters(rpeaks, sampling_rate, duration=None, target=None):
average_rate = np.median(signal_rate(peaks=rpeaks, sampling_rate=sampling_rate))
if target == "degree":
# adjust defree of dwt by sampling_rate and HR
scale_factor = (sampling_rate / 250) / (average_rate / 60)
return int(np.log2(scale_factor))
elif target == "duration":
# adjust duration of search by HR
return np.round(duration * (60 / average_rate), 3)
def _dwt_delineate_tp_peaks(
ecg,
rpeaks,
dwtmatr,
sampling_rate=250,
qrs_width=0.13,
p2r_duration=0.2,
rt_duration=0.25,
degree_tpeak=3,
degree_ppeak=2,
epsilon_T_weight=0.25,
epsilon_P_weight=0.02,
):
"""
Parameters
----------
ecg : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by `ecg_clean()`.
rpeaks : Union[list, np.array, pd.Series]
The samples at which R-peaks occur. Accessible with the key "ECG_R_Peaks" in the info dictionary
returned by `ecg_findpeaks()`.
dwtmatr : np.array
Output of `_dwt_compute_multiscales()`. Multiscales of wavelet transform.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
qrs_width : int
Approximate duration of qrs in seconds. Default to 0.13 seconds.
p2r_duration : int
Approximate duration from P peaks to R peaks in seconds.
rt_duration : int
Approximate duration from R peaks to T peaks in secons.
degree_tpeak : int
Wavelet transform of scales 2**3.
degree_tpeak : int
Wavelet transform of scales 2**2.
epsilon_T_weight : int
Epsilon of RMS value of wavelet transform. Appendix (A.3).
epsilon_P_weight : int
Epsilon of RMS value of wavelet transform. Appendix (A.4).
"""
srch_bndry = int(0.5 * qrs_width * sampling_rate)
degree_add = _dwt_adjust_parameters(rpeaks, sampling_rate, target="degree")
# sanitize search duration by HR
p2r_duration = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=p2r_duration, target="duration"
)
rt_duration = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=rt_duration, target="duration"
)
tpeaks = []
for rpeak_ in rpeaks:
if np.isnan(rpeak_):
tpeaks.append(np.nan)
continue
# search for T peaks from R peaks
srch_idx_start = rpeak_ + srch_bndry
srch_idx_end = rpeak_ + 2 * int(rt_duration * sampling_rate)
dwt_local = dwtmatr[degree_tpeak + degree_add, srch_idx_start:srch_idx_end]
height = epsilon_T_weight * np.sqrt(np.mean(np.square(dwt_local)))
if len(dwt_local) == 0:
tpeaks.append(np.nan)
continue
ecg_local = ecg[srch_idx_start:srch_idx_end]
peaks, __ = scipy.signal.find_peaks(np.abs(dwt_local), height=height)
peaks = list(
filter(lambda p: np.abs(dwt_local[p]) > 0.025 * max(dwt_local), peaks)
) # pylint: disable=W0640
if dwt_local[0] > 0: # just append
peaks = [0] + peaks
# detect morphology
candidate_peaks = []
candidate_peaks_scores = []
for idx_peak, idx_peak_nxt in zip(peaks[:-1], peaks[1:]):
correct_sign = (
dwt_local[idx_peak] > 0 and dwt_local[idx_peak_nxt] < 0
) # pylint: disable=R1716
if correct_sign:
idx_zero = (
signal_zerocrossings(dwt_local[idx_peak : idx_peak_nxt + 1])[0] + idx_peak
)
# This is the score assigned to each peak. The peak with the highest score will be
# selected.
score = ecg_local[idx_zero] - (
float(idx_zero) / sampling_rate - (rt_duration - 0.5 * qrs_width)
)
candidate_peaks.append(idx_zero)
candidate_peaks_scores.append(score)
if not candidate_peaks:
tpeaks.append(np.nan)
continue
tpeaks.append(candidate_peaks[np.argmax(candidate_peaks_scores)] + srch_idx_start)
ppeaks = []
for rpeak in rpeaks:
if np.isnan(rpeak):
ppeaks.append(np.nan)
continue
# search for P peaks from Rpeaks
srch_idx_start = rpeak - 2 * int(p2r_duration * sampling_rate)
srch_idx_end = rpeak - srch_bndry
dwt_local = dwtmatr[degree_ppeak + degree_add, srch_idx_start:srch_idx_end]
height = epsilon_P_weight * np.sqrt(np.mean(np.square(dwt_local)))
if len(dwt_local) == 0:
ppeaks.append(np.nan)
continue
ecg_local = ecg[srch_idx_start:srch_idx_end]
peaks, __ = scipy.signal.find_peaks(np.abs(dwt_local), height=height)
peaks = list(filter(lambda p: np.abs(dwt_local[p]) > 0.025 * max(dwt_local), peaks))
if dwt_local[0] > 0: # just append
peaks = [0] + peaks
# detect morphology
candidate_peaks = []
candidate_peaks_scores = []
for idx_peak, idx_peak_nxt in zip(peaks[:-1], peaks[1:]):
correct_sign = (
dwt_local[idx_peak] > 0 and dwt_local[idx_peak_nxt] < 0
) # pylint: disable=R1716
if correct_sign:
idx_zero = (
signal_zerocrossings(dwt_local[idx_peak : idx_peak_nxt + 1])[0] + idx_peak
)
# This is the score assigned to each peak. The peak with the highest score will be
# selected.
score = ecg_local[idx_zero] - abs(
float(idx_zero) / sampling_rate - p2r_duration
) # Minus p2r because of the srch_idx_start
candidate_peaks.append(idx_zero)
candidate_peaks_scores.append(score)
if not candidate_peaks:
ppeaks.append(np.nan)
continue
ppeaks.append(candidate_peaks[np.argmax(candidate_peaks_scores)] + srch_idx_start)
return tpeaks, ppeaks
def _dwt_delineate_tp_onsets_offsets(
peaks,
rpeaks,
dwtmatr,
sampling_rate=250,
duration_onset=0.3,
duration_offset=0.3,
onset_weight=0.4,
offset_weight=0.4,
degree_onset=2,
degree_offset=2,
):
# sanitize search duration by HR
duration_onset = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=duration_onset, target="duration"
)
duration_offset = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=duration_offset, target="duration"
)
degree = _dwt_adjust_parameters(rpeaks, sampling_rate, target="degree")
onsets = []
offsets = []
for i in range(len(peaks)): # pylint: disable=C0200
# look for onsets
srch_idx_start = peaks[i] - int(duration_onset * sampling_rate)
srch_idx_end = peaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
onsets.append(np.nan)
continue
dwt_local = dwtmatr[degree_onset + degree, srch_idx_start:srch_idx_end]
onset_slope_peaks, __ = scipy.signal.find_peaks(dwt_local)
if len(onset_slope_peaks) == 0:
onsets.append(np.nan)
continue
epsilon_onset = onset_weight * dwt_local[onset_slope_peaks[-1]]
if not (dwt_local[: onset_slope_peaks[-1]] < epsilon_onset).any():
onsets.append(np.nan)
continue
candidate_onsets = np.where(dwt_local[: onset_slope_peaks[-1]] < epsilon_onset)[0]
onsets.append(candidate_onsets[-1] + srch_idx_start)
# # only for debugging
# events_plot([candidate_onsets, onset_slope_peaks], dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.show()
for i in range(len(peaks)): # pylint: disable=C0200
# look for offset
srch_idx_start = peaks[i]
srch_idx_end = peaks[i] + int(duration_offset * sampling_rate)
if srch_idx_start is np.nan or srch_idx_end is np.nan:
offsets.append(np.nan)
continue
dwt_local = dwtmatr[degree_offset + degree, srch_idx_start:srch_idx_end]
offset_slope_peaks, __ = scipy.signal.find_peaks(-dwt_local)
if len(offset_slope_peaks) == 0:
offsets.append(np.nan)
continue
epsilon_offset = -offset_weight * dwt_local[offset_slope_peaks[0]]
if not (-dwt_local[offset_slope_peaks[0] :] < epsilon_offset).any():
offsets.append(np.nan)
continue
candidate_offsets = (
np.where(-dwt_local[offset_slope_peaks[0] :] < epsilon_offset)[0]
+ offset_slope_peaks[0]
)
offsets.append(candidate_offsets[0] + srch_idx_start)
# # only for debugging
# events_plot([candidate_offsets, offset_slope_peaks], dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.show()
return onsets, offsets
def _dwt_delineate_qrs_bounds(rpeaks, dwtmatr, ppeaks, tpeaks, sampling_rate=250):
degree = _dwt_adjust_parameters(rpeaks, sampling_rate, target="degree")
onsets = []
for i in range(len(rpeaks)): # pylint: disable=C0200
# look for onsets
srch_idx_start = ppeaks[i]
srch_idx_end = rpeaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
onsets.append(np.nan)
continue
dwt_local = dwtmatr[2 + degree, srch_idx_start:srch_idx_end]
onset_slope_peaks, __ = scipy.signal.find_peaks(-dwt_local)
if len(onset_slope_peaks) == 0:
onsets.append(np.nan)
continue
epsilon_onset = 0.5 * -dwt_local[onset_slope_peaks[-1]]
if not (-dwt_local[: onset_slope_peaks[-1]] < epsilon_onset).any():
onsets.append(np.nan)
continue
candidate_onsets = np.where(-dwt_local[: onset_slope_peaks[-1]] < epsilon_onset)[0]
onsets.append(candidate_onsets[-1] + srch_idx_start)
# only for debugging
# import neurokit as nk
# events_plot(candidate_onsets, -dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.legend()
# plt.show()
offsets = []
for i in range(len(rpeaks)): # pylint: disable=C0200
# look for offsets
srch_idx_start = rpeaks[i]
srch_idx_end = tpeaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
offsets.append(np.nan)
continue
dwt_local = dwtmatr[2 + degree, srch_idx_start:srch_idx_end]
onset_slope_peaks, __ = scipy.signal.find_peaks(dwt_local)
if len(onset_slope_peaks) == 0:
offsets.append(np.nan)
continue
epsilon_offset = 0.5 * dwt_local[onset_slope_peaks[0]]
if not (dwt_local[onset_slope_peaks[0] :] < epsilon_offset).any():
offsets.append(np.nan)
continue
candidate_offsets = (
np.where(dwt_local[onset_slope_peaks[0] :] < epsilon_offset)[0] + onset_slope_peaks[0]
)
offsets.append(candidate_offsets[0] + srch_idx_start)
# # only for debugging
# events_plot(candidate_offsets, dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.legend()
# plt.show()
return onsets, offsets
def _dwt_compute_multiscales(ecg: np.ndarray, max_degree):
"""Return multiscales wavelet transforms."""
def _apply_H_filter(signal_i, power=0):
zeros = np.zeros(2 ** power - 1)
timedelay = 2 ** power
banks = np.r_[
1.0 / 8,
zeros,
3.0 / 8,
zeros,
3.0 / 8,
zeros,
1.0 / 8,
]
signal_f = scipy.signal.convolve(signal_i, banks, mode="full")
signal_f[:-timedelay] = signal_f[timedelay:] # timeshift: 2 steps
return signal_f
def _apply_G_filter(signal_i, power=0):
zeros = np.zeros(2 ** power - 1)
timedelay = 2 ** power
banks = np.r_[2, zeros, -2]
signal_f = scipy.signal.convolve(signal_i, banks, mode="full")
signal_f[:-timedelay] = signal_f[timedelay:] # timeshift: 1 step
return signal_f
dwtmatr = []
intermediate_ret = np.array(ecg)
for deg in range(max_degree):
S_deg = _apply_G_filter(intermediate_ret, power=deg)
T_deg = _apply_H_filter(intermediate_ret, power=deg)
dwtmatr.append(S_deg)
intermediate_ret = np.array(T_deg)
dwtmatr = [arr[: len(ecg)] for arr in dwtmatr] # rescale transforms to the same length
return np.array(dwtmatr)
# =============================================================================
# WAVELET METHOD (CWT)
# =============================================================================
def _ecg_delineator_cwt(ecg, rpeaks=None, sampling_rate=1000):
# P-Peaks and T-Peaks
tpeaks, ppeaks = _peaks_delineator(ecg, rpeaks, sampling_rate=sampling_rate)
# qrs onsets and offsets
qrs_onsets, qrs_offsets = _onset_offset_delineator(
ecg, rpeaks, peak_type="rpeaks", sampling_rate=sampling_rate
)
# ppeaks onsets and offsets
p_onsets, p_offsets = _onset_offset_delineator(
ecg, ppeaks, peak_type="ppeaks", sampling_rate=sampling_rate
)
# tpeaks onsets and offsets
t_onsets, t_offsets = _onset_offset_delineator(
ecg, tpeaks, peak_type="tpeaks", sampling_rate=sampling_rate
)
# No dwt defined method for Q and S peak
# Adopting manual method from "peak" method
q_peaks = []
s_peaks = []
heartbeats = ecg_segment(ecg, rpeaks, sampling_rate=sampling_rate)
for i, rpeak in enumerate(rpeaks):
heartbeat = heartbeats[str(i + 1)]
# Get index of R peaks
R = heartbeat.index.get_loc(np.min(heartbeat.index.values[heartbeat.index.values > 0]))
# Q wave
Q_index, Q = _ecg_delineator_peak_Q(rpeak, heartbeat, R)
q_peaks.append(Q_index)
# S wave
S_index, S = _ecg_delineator_peak_S(rpeak, heartbeat)
s_peaks.append(S_index)
# Return info dictionary
return {
"ECG_P_Onsets": p_onsets,
"ECG_P_Peaks": ppeaks,
"ECG_P_Offsets": p_offsets,
"ECG_Q_Peaks": q_peaks,
"ECG_R_Onsets": qrs_onsets,
"ECG_R_Offsets": qrs_offsets,
"ECG_S_Peaks": s_peaks,
"ECG_T_Onsets": t_onsets,
"ECG_T_Peaks": tpeaks,
"ECG_T_Offsets": t_offsets,
}
# Internals
# ---------------------
def _onset_offset_delineator(ecg, peaks, peak_type="rpeaks", sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError(
"NeuroKit error: ecg_delineator(): the 'PyWavelets' module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).",
)
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, __ = pywt.cwt(ecg, scales, "gaus1", sampling_period=1.0 / sampling_rate)
half_wave_width = int(0.1 * sampling_rate) # NEED TO CHECK
onsets = []
offsets = []
for index_peak in peaks:
# find onset
if np.isnan(index_peak):
onsets.append(np.nan)
offsets.append(np.nan)
continue
if peak_type == "rpeaks":
search_window = cwtmatr[2, index_peak - half_wave_width : index_peak]
prominence = 0.20 * max(search_window)
height = 0.0
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
elif peak_type in ["tpeaks", "ppeaks"]:
search_window = -cwtmatr[4, index_peak - half_wave_width : index_peak]
prominence = 0.10 * max(search_window)
height = 0.0
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
if len(wt_peaks) == 0:
# print("Fail to find onset at index: %d", index_peak)
onsets.append(np.nan)
else:
# The last peak is nfirst in (Martinez, 2004)
nfirst = wt_peaks[-1] + index_peak - half_wave_width
if peak_type == "rpeaks":
if wt_peaks_data["peak_heights"][-1] > 0:
epsilon_onset = 0.05 * wt_peaks_data["peak_heights"][-1]
elif peak_type == "ppeaks":
epsilon_onset = 0.50 * wt_peaks_data["peak_heights"][-1]
elif peak_type == "tpeaks":
epsilon_onset = 0.25 * wt_peaks_data["peak_heights"][-1]
leftbase = wt_peaks_data["left_bases"][-1] + index_peak - half_wave_width
if peak_type == "rpeaks":
candidate_onsets = (
np.where(cwtmatr[2, nfirst - 100 : nfirst] < epsilon_onset)[0] + nfirst - 100
)
elif peak_type in ["tpeaks", "ppeaks"]:
candidate_onsets = (
np.where(-cwtmatr[4, nfirst - 100 : nfirst] < epsilon_onset)[0] + nfirst - 100
)
candidate_onsets = candidate_onsets.tolist() + [leftbase]
if len(candidate_onsets) == 0:
onsets.append(np.nan)
else:
onsets.append(max(candidate_onsets))
# find offset
if peak_type == "rpeaks":
search_window = -cwtmatr[2, index_peak : index_peak + half_wave_width]
prominence = 0.50 * max(search_window)
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
elif peak_type in ["tpeaks", "ppeaks"]:
search_window = cwtmatr[4, index_peak : index_peak + half_wave_width]
prominence = 0.10 * max(search_window)
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
if len(wt_peaks) == 0:
# print("Fail to find offsets at index: %d", index_peak)
offsets.append(np.nan)
else:
nlast = wt_peaks[0] + index_peak
if peak_type == "rpeaks":
if wt_peaks_data["peak_heights"][0] > 0:
epsilon_offset = 0.125 * wt_peaks_data["peak_heights"][0]
elif peak_type == "ppeaks":
epsilon_offset = 0.9 * wt_peaks_data["peak_heights"][0]
elif peak_type == "tpeaks":
epsilon_offset = 0.4 * wt_peaks_data["peak_heights"][0]
rightbase = wt_peaks_data["right_bases"][0] + index_peak
if peak_type == "rpeaks":
candidate_offsets = (
np.where((-cwtmatr[2, nlast : nlast + 100]) < epsilon_offset)[0] + nlast
)
elif peak_type in ["tpeaks", "ppeaks"]:
candidate_offsets = (
np.where((cwtmatr[4, nlast : nlast + 100]) < epsilon_offset)[0] + nlast
)
candidate_offsets = candidate_offsets.tolist() + [rightbase]
if len(candidate_offsets) == 0:
offsets.append(np.nan)
else:
offsets.append(min(candidate_offsets))
onsets = np.array(onsets, dtype="object")
offsets = np.array(offsets, dtype="object")
return onsets, offsets
def _peaks_delineator(ecg, rpeaks, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError(
"NeuroKit error: ecg_delineator(): the 'PyWavelets' module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).",
)
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, __ = pywt.cwt(ecg, scales, "gaus1", sampling_period=1.0 / sampling_rate)
qrs_duration = 0.1
search_boundary = int(0.9 * qrs_duration * sampling_rate / 2)
significant_peaks_groups = []
for i in range(len(rpeaks) - 1):
# search for T peaks and P peaks from R peaks
start = rpeaks[i] + search_boundary
end = rpeaks[i + 1] - search_boundary
search_window = cwtmatr[4, start:end]
height = 0.25 * np.sqrt(np.mean(np.square(search_window)))
peaks_tp, heights_tp = scipy.signal.find_peaks(np.abs(search_window), height=height)
peaks_tp = peaks_tp + rpeaks[i] + search_boundary
# set threshold for heights of peaks to find significant peaks in wavelet
threshold = 0.125 * max(search_window)
significant_peaks_tp = []
significant_peaks_tp = [
peaks_tp[j] for j in range(len(peaks_tp)) if heights_tp["peak_heights"][j] > threshold
]
significant_peaks_groups.append(
_find_tppeaks(ecg, significant_peaks_tp, sampling_rate=sampling_rate)
)
tpeaks, ppeaks = zip(*[(g[0], g[-1]) for g in significant_peaks_groups])
tpeaks = np.array(tpeaks, dtype="object")
ppeaks = np.array(ppeaks, dtype="object")
return tpeaks, ppeaks
def _find_tppeaks(ecg, keep_tp, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError(
"NeuroKit error: ecg_delineator(): the 'PyWavelets' module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).",
)
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, __ = pywt.cwt(ecg, scales, "gaus1", sampling_period=1.0 / sampling_rate)
max_search_duration = 0.05
tppeaks = []
for index_cur, index_next in zip(keep_tp[:-1], keep_tp[1:]):
# limit 1
correct_sign = (
cwtmatr[4, :][index_cur] < 0 and cwtmatr[4, :][index_next] > 0
) # pylint: disable=R1716
# near = (index_next - index_cur) < max_wv_peak_dist #limit 2
# if near and correct_sign:
if correct_sign:
index_zero_cr = (
signal_zerocrossings(cwtmatr[4, :][index_cur : index_next + 1])[0] + index_cur
)
nb_idx = int(max_search_duration * sampling_rate)
index_max = np.argmax(ecg[index_zero_cr - nb_idx : index_zero_cr + nb_idx]) + (
index_zero_cr - nb_idx
)
tppeaks.append(index_max)
if len(tppeaks) == 0:
tppeaks = [np.nan]
return tppeaks
# =============================================================================
# PEAK METHOD
# =============================================================================
def _ecg_delineator_peak(ecg, rpeaks=None, sampling_rate=1000):
# Initialize
heartbeats = ecg_segment(ecg, rpeaks, sampling_rate)
Q_list = []
P_list = []
S_list = []
T_list = []
P_onsets = []
T_offsets = []
for i, rpeak in enumerate(rpeaks):
heartbeat = heartbeats[str(i + 1)]
# Get index of heartbeat
R = heartbeat.index.get_loc(np.min(heartbeat.index.values[heartbeat.index.values > 0]))
# Peaks ------
# Q wave
Q_index, Q = _ecg_delineator_peak_Q(rpeak, heartbeat, R)
Q_list.append(Q_index)
# P wave
P_index, P = _ecg_delineator_peak_P(rpeak, heartbeat, R, Q)
P_list.append(P_index)
# S wave
S_index, S = _ecg_delineator_peak_S(rpeak, heartbeat)
S_list.append(S_index)
# T wave
T_index, T = _ecg_delineator_peak_T(rpeak, heartbeat, R, S)
T_list.append(T_index)
# Onsets/Offsets ------
P_onsets.append(_ecg_delineator_peak_P_onset(rpeak, heartbeat, R, P))
T_offsets.append(_ecg_delineator_peak_T_offset(rpeak, heartbeat, R, T))
# Manual fix for T_offsets
if T_offsets[-1] >= len(ecg):
T_offsets[-1] = np.nan
# Return info dictionary
return {
"ECG_P_Peaks": P_list,
"ECG_Q_Peaks": Q_list,
"ECG_S_Peaks": S_list,
"ECG_T_Peaks": T_list,
"ECG_P_Onsets": P_onsets,
"ECG_T_Offsets": T_offsets,
}
# Internal
# --------------------------
def _ecg_delineator_peak_Q(rpeak, heartbeat, R):
segment = heartbeat[:0] # Select left hand side
Q = signal_findpeaks(
-1 * segment["Signal"],
height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min()),
)
if len(Q["Peaks"]) == 0:
return np.nan, None
Q = Q["Peaks"][-1] # Select most right-hand side
from_R = R - Q # Relative to R
return rpeak - from_R, Q
def _ecg_delineator_peak_P(rpeak, heartbeat, R, Q):
if Q is None:
return np.nan, None
segment = heartbeat.iloc[:Q] # Select left of Q wave
P = signal_findpeaks(
segment["Signal"], height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min())
)
if len(P["Peaks"]) == 0:
return np.nan, None
P = P["Peaks"][np.argmax(P["Height"])] # Select heighest
from_R = R - P # Relative to R
return rpeak - from_R, P
def _ecg_delineator_peak_S(rpeak, heartbeat):
segment = heartbeat[0:] # Select right hand side
S = signal_findpeaks(
-segment["Signal"], height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min())
)
if len(S["Peaks"]) == 0:
return np.nan, None
S = S["Peaks"][0] # Select most left-hand side
return rpeak + S, S
def _ecg_delineator_peak_T(rpeak, heartbeat, R, S):
if S is None:
return np.nan, None
segment = heartbeat.iloc[R + S :] # Select right of S wave
T = signal_findpeaks(
segment["Signal"], height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min())
)
if len(T["Peaks"]) == 0:
return np.nan, None
T = S + T["Peaks"][np.argmax(T["Height"])] # Select heighest
return rpeak + T, T
def _ecg_delineator_peak_P_onset(rpeak, heartbeat, R, P):
if P is None:
return np.nan
segment = heartbeat.iloc[:P] # Select left of P wave
try:
signal = signal_smooth(segment["Signal"].values, size=R / 10)
except TypeError:
signal = segment["Signal"]
if len(signal) < 2:
return np.nan
signal = np.gradient(np.gradient(signal))
P_onset = np.argmax(signal)
from_R = R - P_onset # Relative to R
return rpeak - from_R
def _ecg_delineator_peak_T_offset(rpeak, heartbeat, R, T):
if T is None:
return np.nan
segment = heartbeat.iloc[R + T :] # Select right of T wave
try:
signal = signal_smooth(segment["Signal"].values, size=R / 10)
except TypeError:
signal = segment["Signal"]
if len(signal) < 2:
return np.nan
signal = np.gradient(np.gradient(signal))
T_offset = np.argmax(signal)
return rpeak + T + T_offset
# =============================================================================
# Internals
# =============================================================================
def _ecg_delineate_plot(
ecg_signal,
rpeaks=None,
signals=None,
signal_features_type="all",
sampling_rate=1000,
window_start=-0.35,
window_end=0.55,
):
"""
import neurokit2 as nk
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
ecg_signal = nk.data("ecg_100hz")
# Extract R-peaks locations
_, rpeaks = nk.ecg_peaks(ecg_signal, sampling_rate=1000)
# Delineate the ECG signal with ecg_delineate()
signals, waves = nk.ecg_delineate(ecg_signal, rpeaks, sampling_rate=1000)
# Plot the ECG signal with markings on ECG peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='peaks', sampling_rate=1000)
# Plot the ECG signal with markings on boundaries of R peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='bound_R', sampling_rate=1000)
# Plot the ECG signal with markings on boundaries of P peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='bound_P', sampling_rate=1000)
# Plot the ECG signal with markings on boundaries of T peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='bound_T', sampling_rate=1000)
# Plot the ECG signal with markings on all peaks and boundaries
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='all', sampling_rate=1000)
"""
data = pd.DataFrame({"Signal": list(ecg_signal)})
data = pd.concat([data, signals], axis=1)
# Try retrieving right column
if isinstance(rpeaks, dict):
rpeaks = rpeaks["ECG_R_Peaks"]
# Segment the signal around the R-peaks
epochs = epochs_create(
data,
events=rpeaks,
sampling_rate=sampling_rate,
epochs_start=window_start,
epochs_end=window_end,
)
data = epochs_to_df(epochs)
data_cols = data.columns.values
dfs = []
for feature in data_cols:
if signal_features_type == "peaks":
if any(x in str(feature) for x in ["Peak"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "bounds_R":
if any(x in str(feature) for x in ["ECG_R_Onsets", "ECG_R_Offsets"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "bounds_T":
if any(x in str(feature) for x in ["ECG_T_Onsets", "ECG_T_Offsets"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "bounds_P":
if any(x in str(feature) for x in ["ECG_P_Onsets", "ECG_P_Offsets"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "all":
if any(x in str(feature) for x in ["Peak", "Onset", "Offset"]):
df = data[feature]
dfs.append(df)
features = pd.concat(dfs, axis=1)
fig, ax = plt.subplots()
data.Label = data.Label.astype(int)
for label in data.Label.unique():
epoch_data = data[data.Label == label]
ax.plot(epoch_data.Time, epoch_data.Signal, color="grey", alpha=0.2)
for i, feature_type in enumerate(features.columns.values): # pylint: disable=W0612
event_data = data[data[feature_type] == 1.0]
ax.scatter(event_data.Time, event_data.Signal, label=feature_type, alpha=0.5, s=200)
ax.legend()
return fig
def _ecg_delineate_check(waves, rpeaks):
"""This function replaces the delineated features with np.nan if its standardized distance from R-peaks is more than
3."""
df = pd.DataFrame.from_dict(waves)
features_columns = df.columns
df = pd.concat([df, pd.DataFrame({"ECG_R_Peaks": rpeaks})], axis=1)
# loop through all columns to calculate the z distance
for column in features_columns: # pylint: disable=W0612
df = _calculate_abs_z(df, features_columns)
# Replace with nan if distance > 3
for col in features_columns:
for i in range(len(df)):
if df["Dist_R_" + col][i] > 3:
df[col][i] = np.nan
# Return df without distance columns
df = df[features_columns]
waves = df.to_dict("list")
return waves
def _calculate_abs_z(df, columns):
"""This function helps to calculate the absolute standardized distance between R-peaks and other delineated waves
features by `ecg_delineate()`"""
for column in columns:
df["Dist_R_" + column] = np.abs(standardize(df[column].sub(df["ECG_R_Peaks"], axis=0)))
return df
| 37.047297
| 127
| 0.615015
|
57afec5a2ca0f6a3fd2296dd9a6613eaf2fcf179
| 1,086
|
py
|
Python
|
tests/sitemaps_tests/base.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/sitemaps_tests/base.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/sitemaps_tests/base.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from django.apps import apps
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.test import TestCase, modify_settings, override_settings
from .models import I18nTestModel, TestModel
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sitemaps'})
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.http')
class SitemapTestsBase(TestCase):
protocol = 'http'
sites_installed = apps.is_installed('django.contrib.sites')
domain = 'example.com' if sites_installed else 'testserver'
def setUp(self):
self.base_url = '%s://%s' % (self.protocol, self.domain)
cache.clear()
# Create an object for sitemap content.
TestModel.objects.create(name='Test Object')
self.i18n_model = I18nTestModel.objects.create(name='Test Object')
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
| 37.448276
| 75
| 0.702578
|
608881435dcfecb30f7f23577553124657186b01
| 9,865
|
py
|
Python
|
homeassistant/components/lcn/cover.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/lcn/cover.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 64
|
2020-10-01T06:39:48.000Z
|
2022-03-31T06:02:17.000Z
|
homeassistant/components/lcn/cover.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 6
|
2018-02-04T03:48:55.000Z
|
2022-01-24T20:37:04.000Z
|
"""Support for LCN covers."""
from __future__ import annotations
from typing import Any
import pypck
from homeassistant.components.cover import DOMAIN as DOMAIN_COVER, CoverEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ADDRESS, CONF_DOMAIN, CONF_ENTITIES
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import LcnEntity
from .const import CONF_DOMAIN_DATA, CONF_MOTOR, CONF_REVERSE_TIME
from .helpers import DeviceConnectionType, InputType, get_device_connection
PARALLEL_UPDATES = 0
def create_lcn_cover_entity(
hass: HomeAssistantType, entity_config: ConfigType, config_entry: ConfigEntry
) -> LcnEntity:
"""Set up an entity for this domain."""
device_connection = get_device_connection(
hass, entity_config[CONF_ADDRESS], config_entry
)
if entity_config[CONF_DOMAIN_DATA][CONF_MOTOR] in "OUTPUTS":
return LcnOutputsCover(entity_config, config_entry.entry_id, device_connection)
# in RELAYS
return LcnRelayCover(entity_config, config_entry.entry_id, device_connection)
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up LCN cover entities from a config entry."""
entities = []
for entity_config in config_entry.data[CONF_ENTITIES]:
if entity_config[CONF_DOMAIN] == DOMAIN_COVER:
entities.append(create_lcn_cover_entity(hass, entity_config, config_entry))
async_add_entities(entities)
class LcnOutputsCover(LcnEntity, CoverEntity):
"""Representation of a LCN cover connected to output ports."""
def __init__(
self, config: ConfigType, entry_id: str, device_connection: DeviceConnectionType
) -> None:
"""Initialize the LCN cover."""
super().__init__(config, entry_id, device_connection)
self.output_ids = [
pypck.lcn_defs.OutputPort["OUTPUTUP"].value,
pypck.lcn_defs.OutputPort["OUTPUTDOWN"].value,
]
if CONF_REVERSE_TIME in config[CONF_DOMAIN_DATA]:
self.reverse_time = pypck.lcn_defs.MotorReverseTime[
config[CONF_DOMAIN_DATA][CONF_REVERSE_TIME]
]
else:
self.reverse_time = None
self._is_closed = False
self._is_closing = False
self._is_opening = False
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
if not self.device_connection.is_group:
await self.device_connection.activate_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTUP"]
)
await self.device_connection.activate_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTDOWN"]
)
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
await super().async_will_remove_from_hass()
if not self.device_connection.is_group:
await self.device_connection.cancel_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTUP"]
)
await self.device_connection.cancel_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTDOWN"]
)
@property
def is_closed(self) -> bool:
"""Return if the cover is closed."""
return self._is_closed
@property
def is_opening(self) -> bool:
"""Return if the cover is opening or not."""
return self._is_opening
@property
def is_closing(self) -> bool:
"""Return if the cover is closing or not."""
return self._is_closing
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return True
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close the cover."""
state = pypck.lcn_defs.MotorStateModifier.DOWN
if not await self.device_connection.control_motors_outputs(
state, self.reverse_time
):
return
self._is_opening = False
self._is_closing = True
self.async_write_ha_state()
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
state = pypck.lcn_defs.MotorStateModifier.UP
if not await self.device_connection.control_motors_outputs(
state, self.reverse_time
):
return
self._is_closed = False
self._is_opening = True
self._is_closing = False
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
state = pypck.lcn_defs.MotorStateModifier.STOP
if not await self.device_connection.control_motors_outputs(state):
return
self._is_closing = False
self._is_opening = False
self.async_write_ha_state()
def input_received(self, input_obj: InputType) -> None:
"""Set cover states when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusOutput)
or input_obj.get_output_id() not in self.output_ids
):
return
if input_obj.get_percent() > 0: # motor is on
if input_obj.get_output_id() == self.output_ids[0]:
self._is_opening = True
self._is_closing = False
else: # self.output_ids[1]
self._is_opening = False
self._is_closing = True
self._is_closed = self._is_closing
else: # motor is off
# cover is assumed to be closed if we were in closing state before
self._is_closed = self._is_closing
self._is_closing = False
self._is_opening = False
self.async_write_ha_state()
class LcnRelayCover(LcnEntity, CoverEntity):
"""Representation of a LCN cover connected to relays."""
def __init__(
self, config: ConfigType, entry_id: str, device_connection: DeviceConnectionType
) -> None:
"""Initialize the LCN cover."""
super().__init__(config, entry_id, device_connection)
self.motor = pypck.lcn_defs.MotorPort[config[CONF_DOMAIN_DATA][CONF_MOTOR]]
self.motor_port_onoff = self.motor.value * 2
self.motor_port_updown = self.motor_port_onoff + 1
self._is_closed = False
self._is_closing = False
self._is_opening = False
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
if not self.device_connection.is_group:
await self.device_connection.activate_status_request_handler(self.motor)
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
await super().async_will_remove_from_hass()
if not self.device_connection.is_group:
await self.device_connection.cancel_status_request_handler(self.motor)
@property
def is_closed(self) -> bool:
"""Return if the cover is closed."""
return self._is_closed
@property
def is_opening(self) -> bool:
"""Return if the cover is opening or not."""
return self._is_opening
@property
def is_closing(self) -> bool:
"""Return if the cover is closing or not."""
return self._is_closing
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return True
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close the cover."""
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.DOWN
if not await self.device_connection.control_motors_relays(states):
return
self._is_opening = False
self._is_closing = True
self.async_write_ha_state()
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.UP
if not await self.device_connection.control_motors_relays(states):
return
self._is_closed = False
self._is_opening = True
self._is_closing = False
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.STOP
if not await self.device_connection.control_motors_relays(states):
return
self._is_closing = False
self._is_opening = False
self.async_write_ha_state()
def input_received(self, input_obj: InputType) -> None:
"""Set cover states when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusRelays):
return
states = input_obj.states # list of boolean values (relay on/off)
if states[self.motor_port_onoff]: # motor is on
self._is_opening = not states[self.motor_port_updown] # set direction
self._is_closing = states[self.motor_port_updown] # set direction
else: # motor is off
self._is_opening = False
self._is_closing = False
self._is_closed = states[self.motor_port_updown]
self.async_write_ha_state()
| 36.809701
| 88
| 0.660213
|
e236d06c1d605ff14b8f4c75c9870ba9b47b9420
| 3,454
|
py
|
Python
|
linetools/guis/tests/test_guis.py
|
marijana777/linetools
|
73720a2f6df42b7dde1f35055cd40ad970200f7f
|
[
"BSD-3-Clause"
] | null | null | null |
linetools/guis/tests/test_guis.py
|
marijana777/linetools
|
73720a2f6df42b7dde1f35055cd40ad970200f7f
|
[
"BSD-3-Clause"
] | null | null | null |
linetools/guis/tests/test_guis.py
|
marijana777/linetools
|
73720a2f6df42b7dde1f35055cd40ad970200f7f
|
[
"BSD-3-Clause"
] | null | null | null |
# Module to run tests on Generating a LineList
# Also tests some simple functionality
from __future__ import print_function, absolute_import, division, unicode_literals
# TEST_UNICODE_LITERALS
import os, sys
import pytest
import numpy as np
from astropy import units as u
from PyQt5.QtWidgets import QApplication
from linetools.guis import xspecgui, xabssysgui
from linetools.guis import utils as ltgu
from linetools.spectra import io as lsio
from linetools.isgm.abssystem import GenericAbsSystem
app = QApplication(sys.argv)
# Set of Input lines
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), '../../spectra/tests/files')
return os.path.join(data_dir, filename)
def test_navigate():
# Init
nav_dict = dict(nav=ltgu.navigate(0,0,init=True))
assert isinstance(nav_dict['nav'], list)
nav_dict['x_minmax'] = [0., 1]
nav_dict['y_minmax'] = [0., 1]
nav_dict['sv_xy_minmax'] = [[0,1], [0,1]]
nav_dict['tmp_xy'] = None
# Usage
o = type(str('Dummy'), (object,), {})
o.xdata = 22.
o.ydata = 1.
for key in nav_dict['nav']:
o.key = key
if key == 's':
# test two options
nav_dict['tmp_xy'] = [0, 0.5] # is not None
ltgu.navigate(nav_dict, o)
nav_dict['tmp_xy'] = None # is None again
ltgu.navigate(nav_dict, o)
elif key == 'y':
# test two options
ltgu.navigate(nav_dict, o, wave = np.linspace(0,1,100), flux = np.ones(100))
ltgu.navigate(nav_dict, o)
else:
ltgu.navigate(nav_dict, o)
# test wrong key event
o.xdata = 'this_is_not_float'
out = ltgu.navigate(nav_dict, o)
assert out == 0
def test_doublet():
o = type(str('Dummy'), (object,), {})
o.xdata = 5000.
i = type(str('Dummy2'), (object,), {})
for key in ['C','M','4','X','8','B']:
o.key = key
_ = ltgu.set_doublet(i, o)
def test_llist():
# Init
idict = ltgu.set_llist('Strong')
idict = ltgu.set_llist([1215.670*u.AA])
assert idict['List'] == 'input.lst'
idict = ltgu.set_llist('None')
idict = ltgu.set_llist('OVI')
# wrong format
with pytest.raises(IOError):
idict = ltgu.set_llist((1,2)) # input is a tuple, so it is wrong.
def test_rdspec():
spec, spec_fil = ltgu.read_spec(data_path('UM184_nF.fits'))
#
ispec = lsio.readspec(data_path('UM184_nF.fits'))
spec, spec_fil = ltgu.read_spec(ispec)
# as tuple without units
ispec = (np.ones(10), np.ones(10), np.ones(10))
spec, spec_fil = ltgu.read_spec(ispec)
assert spec_fil == 'none'
# as list of files
ispec = [data_path('UM184_nF.fits')]*2
spec, spec_fil = ltgu.read_spec(ispec)
spec, spec_fil = ltgu.read_spec(ispec, exten=[1,1]) # is iterable exten
# wrong format
with pytest.raises(ValueError):
spec, spec_fil = ltgu.read_spec(dict(a='dummy')) # input is a dict
# normalize
spec, spec_fil = ltgu.read_spec(data_path('UM184_nF.fits'))
spec.co = spec.flux
spec, spec_fil = ltgu.read_spec(spec)
assert spec.normed
def test_xspecgui():
# Init
spec_fil = data_path('UM184_nF.fits')
xsgui = xspecgui.XSpecGui(spec_fil, unit_test=True)
def test_xabsgui():
# Init
spec_fil = data_path('UM184_nF.fits')
abs_sys = GenericAbsSystem((0.,0.), 3., [-500,500]*u.km/u.s)
xabsgui = xabssysgui.XAbsSysGui(spec_fil, abs_sys)
| 30.034783
| 88
| 0.6326
|
49727ba3b6afca6214f11daeb1e41fc3ca15fe3b
| 55,850
|
py
|
Python
|
ansible-ipi-install/roles/node-prep/library/nmcli.py
|
dlminvestments/baremetal-deploy
|
f35b87b4445ccd0e43237a5cc657f365446d9b5d
|
[
"Apache-2.0"
] | null | null | null |
ansible-ipi-install/roles/node-prep/library/nmcli.py
|
dlminvestments/baremetal-deploy
|
f35b87b4445ccd0e43237a5cc657f365446d9b5d
|
[
"Apache-2.0"
] | 33
|
2020-12-01T05:50:39.000Z
|
2021-12-28T19:59:04.000Z
|
ansible-ipi-install/roles/node-prep/library/nmcli.py
|
dlminvestments/baremetal-deploy
|
f35b87b4445ccd0e43237a5cc657f365446d9b5d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: nmcli
author: "Chris Long (@alcamie101)"
short_description: Manage Networking
requirements: [ nmcli, dbus, NetworkManager-libnm ]
version_added: "2.0"
description:
- Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.
- "On CentOS and Fedora like systems, install dependencies as 'yum/dnf install -y python-gobject NetworkManager-libnm'"
- "On Ubuntu and Debian like systems, install dependencies as 'apt-get install -y libnm-glib-dev'"
options:
state:
description:
- Whether the device should exist or not, taking action if the state is different from what is stated.
required: True
choices: [ present, absent ]
autoconnect:
description:
- Whether the connection should start on boot.
- Whether the connection profile can be automatically activated
type: bool
default: True
conn_name:
description:
- 'Where conn_name will be the name used to call the connection. when not provided a default name is generated: <type>[-<ifname>][-<num>]'
required: True
ifname:
description:
- Where IFNAME will be the what we call the interface name.
- interface to bind the connection to. The connection will only be applicable to this interface name.
- A special value of "*" can be used for interface-independent connections.
- The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
default: conn_name
type:
description:
- This is the type of device or network connection that you wish to create or modify.
- "type C(generic) is added in version 2.5."
choices: [ ethernet, team, team-slave, bond, bond-slave, bridge, bridge-slave, vlan, vxlan, ipip, generic ]
mode:
description:
- This is the type of device or network connection that you wish to create for a bond, team or bridge.
choices: [ "balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb" ]
default: balance-rr
master:
description:
- master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
ip4:
description:
- 'The IPv4 address to this interface using this format ie: "192.0.2.24/24"'
ip4_method:
description:
- 'The IPv4 method to this interface using this format ie: "manual"'
version_added: 2.6
gw4:
description:
- 'The IPv4 gateway for this interface using this format ie: "192.0.2.1"'
dns4:
description:
- 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: "192.0.2.53 198.51.100.53"'
dns4_search:
description:
- 'A list of DNS search domains.'
version_added: 2.5
ip6:
description:
- 'The IPv6 address to this interface using this format ie: "abbe::cafe"'
ip6_method:
description:
- 'The IPv6 method to this interface using this format ie: "manual"'
version_added: 2.6
gw6:
description:
- 'The IPv6 gateway for this interface using this format ie: "2001:db8::1"'
dns6:
description:
- 'A list of upto 3 dns servers, ipv6 format e.g. To add two IPv6 DNS server addresses: "2001:4860:4860::8888 2001:4860:4860::8844"'
dns6_search:
description:
- 'A list of DNS search domains.'
version_added: 2.5
mtu:
description:
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
default: 1500
dhcp_client_id:
description:
- DHCP Client Identifier sent to the DHCP server.
version_added: "2.5"
ip6_dhcp_duid:
description:
- 'A string containing the DHCPv6 Unique Identifier (DUID) used by the dhcp client to identify itself to DHCPv6 servers (RFC 3315).'
- 'The special values "llt" and "ll" generate a DUID of type LLT or LL (see RFC 3315) based on the current MAC address of the device.'
version_added: 2.9
primary:
description:
- This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'
id:
descrption:
- This will allow to change the bridge slave device name (connection.id) to our preferred name.
version: 2.9
miimon:
description:
- This is only used with bond - miimon
default: 100
downdelay:
description:
- This is only used with bond - downdelay
updelay:
description:
- This is only used with bond - updelay
arp_interval:
description:
- This is only used with bond - ARP interval
arp_ip_target:
description:
- This is only used with bond - ARP IP target
stp:
description:
- This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge
type: bool
priority:
description:
- This is only used with 'bridge' - sets STP priority
default: 128
forwarddelay:
description:
- This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds
default: 15
hellotime:
description:
- This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds
default: 2
maxage:
description:
- This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds
default: 20
ageingtime:
description:
- This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds
default: 300
mac:
description:
- >
This is only used with bridge - MAC address of the bridge
(note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)
slavepriority:
description:
- This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave
default: 32
path_cost:
description:
- This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave
default: 100
hairpin:
description:
- This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
frame was received on.
type: bool
default: 'yes'
vlanid:
description:
- This is only used with VLAN - VLAN ID in range <0-4095>
vlandev:
description:
- This is only used with VLAN - parent device this VLAN is on, can use ifname
flags:
description:
- This is only used with VLAN - flags
ingress:
description:
- This is only used with VLAN - VLAN ingress priority mapping
egress:
description:
- This is only used with VLAN - VLAN egress priority mapping
vxlan_id:
description:
- This is only used with VXLAN - VXLAN ID.
version_added: "2.8"
vxlan_remote:
description:
- This is only used with VXLAN - VXLAN destination IP address.
version_added: "2.8"
vxlan_local:
description:
- This is only used with VXLAN - VXLAN local IP address.
version_added: "2.8"
ip_tunnel_dev:
description:
- This is only used with IPIP - parent device this IPIP tunnel, can use ifname.
version_added: "2.8"
ip_tunnel_remote:
description:
- This is only used with IPIP - IPIP destination IP address.
version_added: "2.8"
ip_tunnel_local:
description:
- This is only used with IPIP - IPIP local IP address.
version_added: "2.8"
"""
EXAMPLES = """
# These examples are using the following inventory:
#
# ## Directory layout:
#
# |_/inventory/cloud-hosts
# | /group_vars/openstack-stage.yml
# | /host_vars/controller-01.openstack.host.com
# | /host_vars/controller-02.openstack.host.com
# |_/playbook/library/nmcli.py
# | /playbook-add.yml
# | /playbook-del.yml
# ```
#
# ## inventory examples
# ### groups_vars
# ```yml
# ---
# #devops_os_define_network
# storage_gw: "192.0.2.254"
# external_gw: "198.51.100.254"
# tenant_gw: "203.0.113.254"
#
# #Team vars
# nmcli_team:
# - conn_name: tenant
# ip4: '{{ tenant_ip }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: external
# ip4: '{{ external_ip }}'
# gw4: '{{ external_gw }}'
# - conn_name: storage
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# nmcli_team_slave:
# - conn_name: em1
# ifname: em1
# master: tenant
# - conn_name: em2
# ifname: em2
# master: tenant
# - conn_name: p2p1
# ifname: p2p1
# master: storage
# - conn_name: p2p2
# ifname: p2p2
# master: external
#
# #bond vars
# nmcli_bond:
# - conn_name: tenant
# ip4: '{{ tenant_ip }}'
# gw4: ''
# mode: balance-rr
# - conn_name: external
# ip4: '{{ external_ip }}'
# gw4: ''
# mode: balance-rr
# - conn_name: storage
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# mode: balance-rr
# nmcli_bond_slave:
# - conn_name: em1
# ifname: em1
# master: tenant
# - conn_name: em2
# ifname: em2
# master: tenant
# - conn_name: p2p1
# ifname: p2p1
# master: storage
# - conn_name: p2p2
# ifname: p2p2
# master: external
#
# #ethernet vars
# nmcli_ethernet:
# - conn_name: em1
# ifname: em1
# ip4: '{{ tenant_ip }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: em2
# ifname: em2
# ip4: '{{ tenant_ip1 }}'
# gw4: '{{ tenant_gw }}'
# - conn_name: p2p1
# ifname: p2p1
# ip4: '{{ storage_ip }}'
# gw4: '{{ storage_gw }}'
# - conn_name: p2p2
# ifname: p2p2
# ip4: '{{ external_ip }}'
# gw4: '{{ external_gw }}'
# ```
#
# ### host_vars
# ```yml
# ---
# storage_ip: "192.0.2.91/23"
# external_ip: "198.51.100.23/21"
# tenant_ip: "203.0.113.77/23"
# ```
## playbook-add.yml example
---
- hosts: openstack-stage
remote_user: root
tasks:
- name: install needed network manager libs
yum:
name: '{{ item }}'
state: installed
with_items:
- NetworkManager-libnm
- libnm-qt-devel.x86_64
- nm-connection-editor.x86_64
- libsemanage-python
- policycoreutils-python
##### Working with all cloud nodes - Teaming
- name: try nmcli add team - conn_name only & ip4 gw4
nmcli:
type: team
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
state: present
with_items:
- '{{ nmcli_team }}'
- name: try nmcli add teams-slave
nmcli:
type: team-slave
conn_name: '{{ item.conn_name }}'
ifname: '{{ item.ifname }}'
master: '{{ item.master }}'
state: present
with_items:
- '{{ nmcli_team_slave }}'
###### Working with all cloud nodes - Bonding
- name: try nmcli add bond - conn_name only & ip4 gw4 mode
nmcli:
type: bond
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
mode: '{{ item.mode }}'
state: present
with_items:
- '{{ nmcli_bond }}'
- name: try nmcli add bond-slave
nmcli:
type: bond-slave
conn_name: '{{ item.conn_name }}'
ifname: '{{ item.ifname }}'
master: '{{ item.master }}'
state: present
with_items:
- '{{ nmcli_bond_slave }}'
##### Working with all cloud nodes - Ethernet
- name: nmcli add Ethernet - conn_name only & ip4 gw4
nmcli:
type: ethernet
conn_name: '{{ item.conn_name }}'
ip4: '{{ item.ip4 }}'
gw4: '{{ item.gw4 }}'
state: present
with_items:
- '{{ nmcli_ethernet }}'
## playbook-del.yml example
- hosts: openstack-stage
remote_user: root
tasks:
- name: try nmcli del team - multiple
nmcli:
conn_name: '{{ item.conn_name }}'
state: absent
with_items:
- conn_name: em1
- conn_name: em2
- conn_name: p1p1
- conn_name: p1p2
- conn_name: p2p1
- conn_name: p2p2
- conn_name: tenant
- conn_name: storage
- conn_name: external
- conn_name: team-em1
- conn_name: team-em2
- conn_name: team-p1p1
- conn_name: team-p1p2
- conn_name: team-p2p1
- conn_name: team-p2p2
# To add an Ethernet connection with static IP configuration, issue a command as follows
- nmcli:
conn_name: my-eth1
ifname: eth1
type: ethernet
ip4: 192.0.2.100/24
gw4: 192.0.2.1
state: present
# To add an Team connection with static IP configuration, issue a command as follows
- nmcli:
conn_name: my-team1
ifname: my-team1
type: team
ip4: 192.0.2.100/24
gw4: 192.0.2.1
state: present
autoconnect: yes
# Optionally, at the same time specify IPv6 addresses for the device as follows:
- nmcli:
conn_name: my-eth1
ifname: eth1
type: ethernet
ip4: 192.0.2.100/24
gw4: 192.0.2.1
ip6: '2001:db8::cafe'
gw6: '2001:db8::1'
state: present
# To add two IPv4 DNS server addresses:
- nmcli:
conn_name: my-eth1
type: ethernet
dns4:
- 192.0.2.53
- 198.51.100.53
state: present
# To make a profile usable for all compatible Ethernet interfaces, issue a command as follows
- nmcli:
ctype: ethernet
name: my-eth1
ifname: '*'
state: present
# To change the property of a setting e.g. MTU, issue a command as follows:
- nmcli:
conn_name: my-eth1
mtu: 9000
type: ethernet
state: present
# To add VxLan, issue a command as follows:
- nmcli:
type: vxlan
conn_name: vxlan_test1
vxlan_id: 16
vxlan_local: 192.168.1.2
vxlan_remote: 192.168.1.5
# To add ipip, issue a command as follows:
- nmcli:
type: ipip
conn_name: ipip_test1
ip_tunnel_dev: eth0
ip_tunnel_local: 192.168.1.2
ip_tunnel_remote: 192.168.1.5
# nmcli exits with status 0 if it succeeds and exits with a status greater
# than zero when there is a failure. The following list of status codes may be
# returned:
#
# - 0 Success - indicates the operation succeeded
# - 1 Unknown or unspecified error
# - 2 Invalid user input, wrong nmcli invocation
# - 3 Timeout expired (see --wait option)
# - 4 Connection activation failed
# - 5 Connection deactivation failed
# - 6 Disconnecting device failed
# - 7 Connection deletion failed
# - 8 NetworkManager is not running
# - 9 nmcli and NetworkManager versions mismatch
# - 10 Connection, device, or access point does not exist.
"""
RETURN = r"""#
"""
try:
import dbus
HAVE_DBUS = True
except ImportError:
HAVE_DBUS = False
try:
import gi
gi.require_version("NM", "1.0")
HAVE_NM_CLIENT = True
except (ImportError, ValueError):
HAVE_NM_CLIENT = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class Nmcli:
"""
This is the generic nmcli manipulation class that is subclassed based on platform.
A subclass may wish to override the following action methods:-
- create_connection()
- delete_connection()
- modify_connection()
- show_connection()
- up_connection()
- down_connection()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = "Generic"
distribution = None
if HAVE_DBUS:
bus = dbus.SystemBus()
# The following is going to be used in dbus code
DEVTYPES = {
1: "Ethernet",
2: "Wi-Fi",
5: "Bluetooth",
6: "OLPC",
7: "WiMAX",
8: "Modem",
9: "InfiniBand",
10: "Bond",
11: "VLAN",
12: "ADSL",
13: "Bridge",
14: "Generic",
15: "Team",
16: "VxLan",
17: "ipip",
}
STATES = {
0: "Unknown",
10: "Unmanaged",
20: "Unavailable",
30: "Disconnected",
40: "Prepare",
50: "Config",
60: "Need Auth",
70: "IP Config",
80: "IP Check",
90: "Secondaries",
100: "Activated",
110: "Deactivating",
120: "Failed",
}
def __init__(self, module):
self.module = module
self.state = module.params["state"]
self.autoconnect = module.params["autoconnect"]
self.conn_name = module.params["conn_name"]
self.master = module.params["master"]
self.ifname = module.params["ifname"]
self.type = module.params["type"]
self.ip4 = module.params["ip4"]
self.ip4_method = module.params["ip4_method"]
self.gw4 = module.params["gw4"]
self.dns4 = (
" ".join(module.params["dns4"]) if module.params.get("dns4") else None
)
self.dns4_search = (
" ".join(module.params["dns4_search"])
if module.params.get("dns4_search")
else None
)
self.ip6 = module.params["ip6"]
self.ip6_method = module.params["ip6_method"]
self.ip6_dhcp_duid = module.params["ip6_dhcp_duid"]
self.gw6 = module.params["gw6"]
self.dns6 = module.params["dns6"]
self.dns6_search = (
" ".join(module.params["dns6_search"])
if module.params.get("dns6_search")
else None
)
self.mtu = module.params["mtu"]
self.stp = module.params["stp"]
self.priority = module.params["priority"]
self.mode = module.params["mode"]
self.miimon = module.params["miimon"]
self.primary = module.params["primary"]
self.id = module.params["id"]
self.downdelay = module.params["downdelay"]
self.updelay = module.params["updelay"]
self.arp_interval = module.params["arp_interval"]
self.arp_ip_target = module.params["arp_ip_target"]
self.slavepriority = module.params["slavepriority"]
self.forwarddelay = module.params["forwarddelay"]
self.hellotime = module.params["hellotime"]
self.maxage = module.params["maxage"]
self.ageingtime = module.params["ageingtime"]
self.hairpin = module.params["hairpin"]
self.path_cost = module.params["path_cost"]
self.mac = module.params["mac"]
self.vlanid = module.params["vlanid"]
self.vlandev = module.params["vlandev"]
self.flags = module.params["flags"]
self.ingress = module.params["ingress"]
self.egress = module.params["egress"]
self.vxlan_id = module.params["vxlan_id"]
self.vxlan_local = module.params["vxlan_local"]
self.vxlan_remote = module.params["vxlan_remote"]
self.ip_tunnel_dev = module.params["ip_tunnel_dev"]
self.ip_tunnel_local = module.params["ip_tunnel_local"]
self.ip_tunnel_remote = module.params["ip_tunnel_remote"]
self.nmcli_bin = self.module.get_bin_path("nmcli", True)
self.dhcp_client_id = module.params["dhcp_client_id"]
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
return self.module.run_command(
cmd, use_unsafe_shell=use_unsafe_shell, data=data
)
@staticmethod
def merge_secrets(proxy, config, setting_name):
try:
# returns a dict of dicts mapping name::setting, where setting is a dict
# mapping key::value. Each member of the 'setting' dict is a secret
secrets = proxy.GetSecrets(setting_name)
# Copy the secrets into our connection config
for setting in secrets:
for key in secrets[setting]:
config[setting_name][key] = secrets[setting][key]
except:
pass
def dict_to_string(self, d):
# Try to trivially translate a dictionary's elements into nice string
# formatting.
dstr = ""
for key in d:
val = d[key]
str_val = ""
add_string = True
if isinstance(val, dbus.Array):
for elt in val:
if isinstance(elt, dbus.Byte):
str_val += "%s " % int(elt)
elif isinstance(elt, dbus.String):
str_val += "%s" % elt
elif isinstance(val, dbus.Dictionary):
dstr += self.dict_to_string(val)
add_string = False
else:
str_val = val
if add_string:
dstr += "%s: %s\n" % (key, str_val)
return dstr
def connection_to_string(self, config):
# dump a connection configuration to use in list_connection_info
setting_list = []
for setting_name in config:
setting_list.append(self.dict_to_string(config[setting_name]))
return setting_list
@staticmethod
def bool_to_string(boolean):
if boolean:
return "yes"
else:
return "no"
def list_connection_info(self):
# Ask the settings service for the list of connections it provides
bus = dbus.SystemBus()
service_name = "org.freedesktop.NetworkManager"
settings = None
try:
proxy = bus.get_object(
service_name, "/org/freedesktop/NetworkManager/Settings"
)
settings = dbus.Interface(proxy, "org.freedesktop.NetworkManager.Settings")
except dbus.exceptions.DBusException as e:
self.module.fail_json(
msg="Unable to read Network Manager settings from DBus system bus: %s"
% to_native(e),
details="Please check if NetworkManager is installed and"
" service network-manager is started.",
)
connection_paths = settings.ListConnections()
connection_list = []
# List each connection's name, UUID, and type
for path in connection_paths:
con_proxy = bus.get_object(service_name, path)
settings_connection = dbus.Interface(
con_proxy, "org.freedesktop.NetworkManager.Settings.Connection"
)
config = settings_connection.GetSettings()
# Now get secrets too; we grab the secrets for each type of connection
# (since there isn't a "get all secrets" call because most of the time
# you only need 'wifi' secrets or '802.1x' secrets, not everything) and
# merge that into the configuration data - To use at a later stage
self.merge_secrets(settings_connection, config, "802-11-wireless")
self.merge_secrets(settings_connection, config, "802-11-wireless-security")
self.merge_secrets(settings_connection, config, "802-1x")
self.merge_secrets(settings_connection, config, "gsm")
self.merge_secrets(settings_connection, config, "cdma")
self.merge_secrets(settings_connection, config, "ppp")
# Get the details of the 'connection' setting
s_con = config["connection"]
connection_list.append(s_con["id"])
connection_list.append(s_con["uuid"])
connection_list.append(s_con["type"])
connection_list.append(self.connection_to_string(config))
return connection_list
def connection_exists(self):
# we are going to use name and type in this instance to find if that connection exists and is of type x
connections = self.list_connection_info()
for con_item in connections:
if self.conn_name == con_item:
return True
def down_connection(self):
cmd = [self.nmcli_bin, "con", "down", self.conn_name]
return self.execute_command(cmd)
def up_connection(self):
cmd = [self.nmcli_bin, "con", "up", self.conn_name]
return self.execute_command(cmd)
def create_connection_team(self):
cmd = [self.nmcli_bin, "con", "add", "type", "team", "con-name"]
# format for creating team interface
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
options = {
"ip4": self.ip4,
"ipv4.method": self.ip4_method,
"gw4": self.gw4,
"ip6": self.ip6,
"ipv6.method": self.ip6_method,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"gw6": self.gw6,
"autoconnect": self.bool_to_string(self.autoconnect),
"ipv4.dns-search": self.dns4_search,
"ipv6.dns-search": self.dns6_search,
"ipv4.dhcp-client-id": self.dhcp_client_id,
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def modify_connection_team(self):
cmd = [self.nmcli_bin, "con", "mod", self.conn_name]
options = {
"ipv4.address": self.ip4,
"ipv4.method": self.ip4_method,
"ipv4.gateway": self.gw4,
"ipv4.dns": self.dns4,
"ipv6.address": self.ip6,
"ipv6.method": self.ip6_method,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"ipv6.gateway": self.gw6,
"ipv6.dns": self.dns6,
"autoconnect": self.bool_to_string(self.autoconnect),
"ipv4.dns-search": self.dns4_search,
"ipv6.dns-search": self.dns6_search,
"ipv4.dhcp-client-id": self.dhcp_client_id,
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def create_connection_team_slave(self):
cmd = [self.nmcli_bin, "connection", "add", "type", self.type, "con-name"]
# format for creating team-slave interface
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append("master")
if self.conn_name is not None:
cmd.append(self.master)
return cmd
def modify_connection_team_slave(self):
cmd = [
self.nmcli_bin,
"con",
"mod",
self.conn_name,
"connection.master",
self.master,
]
# format for modifying team-slave interface
if self.mtu is not None:
cmd.append("802-3-ethernet.mtu")
cmd.append(self.mtu)
return cmd
def create_connection_bond(self):
cmd = [self.nmcli_bin, "con", "add", "type", "bond", "con-name"]
# format for creating bond interface
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
options = {
"mode": self.mode,
"ip4": self.ip4,
"ipv4.method": self.ip4_method,
"gw4": self.gw4,
"ip6": self.ip6,
"ipv6.method": self.ip6_method,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"gw6": self.gw6,
"autoconnect": self.bool_to_string(self.autoconnect),
"ipv4.dns-search": self.dns4_search,
"ipv6.dns-search": self.dns6_search,
"miimon": self.miimon,
"downdelay": self.downdelay,
"updelay": self.updelay,
"arp-interval": self.arp_interval,
"arp-ip-target": self.arp_ip_target,
"primary": self.primary,
"ipv4.dhcp-client-id": self.dhcp_client_id,
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def modify_connection_bond(self):
cmd = [self.nmcli_bin, "con", "mod", self.conn_name]
# format for modifying bond interface
options = {
"ipv4.address": self.ip4,
"ipv4.method": self.ip4_method,
"ipv4.gateway": self.gw4,
"ipv4.dns": self.dns4,
"ipv6.address": self.ip6,
"ipv6.method": self.ip6_method,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"ipv6.gateway": self.gw6,
"ipv6.dns": self.dns6,
"autoconnect": self.bool_to_string(self.autoconnect),
"ipv4.dns-search": self.dns4_search,
"ipv6.dns-search": self.dns6_search,
"miimon": self.miimon,
"downdelay": self.downdelay,
"updelay": self.updelay,
"arp-interval": self.arp_interval,
"arp-ip-target": self.arp_ip_target,
"ipv4.dhcp-client-id": self.dhcp_client_id,
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def create_connection_bond_slave(self):
cmd = [self.nmcli_bin, "connection", "add", "type", "bond-slave", "con-name"]
# format for creating bond-slave interface
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
cmd.append("master")
if self.conn_name is not None:
cmd.append(self.master)
return cmd
def modify_connection_bond_slave(self):
cmd = [
self.nmcli_bin,
"con",
"mod",
self.conn_name,
"connection.master",
self.master,
]
# format for modifying bond-slave interface
return cmd
def create_connection_ethernet(self, conn_type="ethernet"):
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd = [self.nmcli_bin, "con", "add", "type"]
if conn_type == "ethernet":
cmd.append("ethernet")
elif conn_type == "generic":
cmd.append("generic")
cmd.append("con-name")
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
options = {
"ip4": self.ip4,
"ipv4.method": self.ip4_method,
"gw4": self.gw4,
"ip6": self.ip6,
"ipv6.method": self.ip6_method,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"gw6": self.gw6,
"autoconnect": self.bool_to_string(self.autoconnect),
"ipv4.dns-search": self.dns4_search,
"ipv6.dns-search": self.dns6_search,
"ipv4.dhcp-client-id": self.dhcp_client_id,
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def modify_connection_ethernet(self, conn_type="ethernet"):
cmd = [self.nmcli_bin, "con", "mod", self.conn_name]
# format for modifying ethernet interface
# To modify an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con mod con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
options = {
"ipv4.address": self.ip4,
"ipv4.method": self.ip4_method,
"ipv4.gateway": self.gw4,
"ipv4.dns": self.dns4,
"ipv6.address": self.ip6,
"ipv6.method": self.ip6_method,
"connection.id": self.id,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"ipv6.gateway": self.gw6,
"ipv6.dns": self.dns6,
"autoconnect": self.bool_to_string(self.autoconnect),
"ipv4.dns-search": self.dns4_search,
"ipv6.dns-search": self.dns6_search,
"802-3-ethernet.mtu": self.mtu,
"ipv4.dhcp-client-id": self.dhcp_client_id,
}
for key, value in options.items():
if value is not None:
if key == "802-3-ethernet.mtu" and conn_type != "ethernet":
continue
cmd.extend([key, value])
return cmd
def create_connection_bridge(self):
# format for creating bridge interface
# To add an Bridge connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1
cmd = [self.nmcli_bin, "con", "add", "type", "bridge", "con-name"]
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
options = {
"ipv4.addresses": self.ip4,
"ipv4.method": self.ip4_method,
"ipv4.gateway": self.gw4,
"ipv6.addresses": self.ip6,
"ipv6.method": self.ip6_method,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"ipv6.gateway": self.gw6,
"autoconnect": self.bool_to_string(self.autoconnect),
"bridge.ageing-time": self.ageingtime,
"bridge.forward-delay": self.forwarddelay,
"bridge.hello-time": self.hellotime,
"bridge.mac-address": self.mac,
"bridge.max-age": self.maxage,
"bridge.priority": self.priority,
"bridge.stp": self.bool_to_string(self.stp),
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def modify_connection_bridge(self):
# format for modifying bridge interface
# To add an Bridge connection with static IP configuration, issue a command as follows
# - nmcli: name=mod conn_name=my-eth1 ifname=eth1 type=bridge ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con mod my-eth1 ifname eth1 type bridge ip4 192.0.2.100/24 gw4 192.0.2.1
cmd = [self.nmcli_bin, "con", "mod", self.conn_name]
options = {
"ipv4.addresses": self.ip4,
"ipv4.method": self.ip4_method,
"ipv4.gateway": self.gw4,
"ipv6.addresses": self.ip6,
"ipv6.method": self.ip6_method,
"ipv6.dhcp-duid": self.ip6_dhcp_duid,
"ipv6.gateway": self.gw6,
"autoconnect": self.bool_to_string(self.autoconnect),
"bridge.ageing-time": self.ageingtime,
"bridge.forward-delay": self.forwarddelay,
"bridge.hello-time": self.hellotime,
"bridge.mac-address": self.mac,
"bridge.max-age": self.maxage,
"bridge.priority": self.priority,
"bridge.stp": self.bool_to_string(self.stp),
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def create_connection_bridge_slave(self):
# format for creating bond-slave interface
cmd = [self.nmcli_bin, "con", "add", "type", "bridge-slave", "con-name"]
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
options = {
"master": self.master,
"connection.id": self.id,
"bridge-port.path-cost": self.path_cost,
"bridge-port.hairpin": self.bool_to_string(self.hairpin),
"bridge-port.priority": self.slavepriority,
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def modify_connection_bridge_slave(self):
# format for modifying bond-slave interface
cmd = [self.nmcli_bin, "con", "mod", self.conn_name]
options = {
"master": self.master,
"connection.id": self.id,
"bridge-port.path-cost": self.path_cost,
"bridge-port.hairpin": self.bool_to_string(self.hairpin),
"bridge-port.priority": self.slavepriority,
}
for key, value in options.items():
if value is not None:
cmd.extend([key, value])
return cmd
def create_connection_vlan(self):
cmd = [self.nmcli_bin]
cmd.append("con")
cmd.append("add")
cmd.append("type")
cmd.append("vlan")
cmd.append("con-name")
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
else:
cmd.append("vlan%s" % self.vlanid)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
else:
cmd.append("vlan%s" % self.vlanid)
params = {
"dev": self.vlandev,
"id": self.vlanid,
"ip4": self.ip4 or "",
"ip4.method": self.ip4_method or "",
"gw4": self.gw4 or "",
"ip6": self.ip6 or "",
"ip6.method": self.ip6_method or "",
"ip6.dhcp-duid": self.ip6_dhcp_duid or "",
"gw6": self.gw6 or "",
"autoconnect": self.bool_to_string(self.autoconnect),
}
for k, v in params.items():
cmd.extend([k, v])
return cmd
def modify_connection_vlan(self):
cmd = [self.nmcli_bin]
cmd.append("con")
cmd.append("mod")
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
else:
cmd.append("vlan%s" % self.vlanid)
params = {
"vlan.parent": self.vlandev,
"vlan.id": self.vlanid,
"ipv4.address": self.ip4 or "",
"ipv4.method": self.ip4_method or "",
"ipv4.gateway": self.gw4 or "",
"ipv4.dns": self.dns4 or "",
"ipv6.address": self.ip6 or "",
"ipv6.method": self.ip6_method or "",
"ipv6.dhcp-duid": self.ip6_dhcp_duid or "",
"ipv6.gateway": self.gw6 or "",
"ipv6.dns": self.dns6 or "",
"autoconnect": self.bool_to_string(self.autoconnect),
}
for k, v in params.items():
cmd.extend([k, v])
return cmd
def create_connection_vxlan(self):
cmd = [self.nmcli_bin, "con", "add", "type", "vxlan", "con-name"]
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
else:
cmd.append("vxlan%s" % self.vxlanid)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
else:
cmd.append("vxan%s" % self.vxlanid)
params = {
"vxlan.id": self.vxlan_id,
"vxlan.local": self.vxlan_local,
"vxlan.remote": self.vxlan_remote,
"autoconnect": self.bool_to_string(self.autoconnect),
}
for k, v in params.items():
cmd.extend([k, v])
return cmd
def modify_connection_vxlan(self):
cmd = [self.nmcli_bin, "con", "mod"]
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
else:
cmd.append("vxlan%s" % self.vxlanid)
params = {
"vxlan.id": self.vxlan_id,
"vxlan.local": self.vxlan_local,
"vxlan.remote": self.vxlan_remote,
"autoconnect": self.bool_to_string(self.autoconnect),
}
for k, v in params.items():
cmd.extend([k, v])
return cmd
def create_connection_ipip(self):
cmd = [
self.nmcli_bin,
"con",
"add",
"type",
"ip-tunnel",
"mode",
"ipip",
"con-name",
]
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
elif self.ip_tunnel_dev is not None:
cmd.append("ipip%s" % self.ip_tunnel_dev)
cmd.append("ifname")
if self.ifname is not None:
cmd.append(self.ifname)
elif self.conn_name is not None:
cmd.append(self.conn_name)
else:
cmd.append("ipip%s" % self.ipip_dev)
if self.ip_tunnel_dev is not None:
cmd.append("dev")
cmd.append(self.ip_tunnel_dev)
params = {
"ip-tunnel.local": self.ip_tunnel_local,
"ip-tunnel.remote": self.ip_tunnel_remote,
"autoconnect": self.bool_to_string(self.autoconnect),
}
for k, v in params.items():
cmd.extend([k, v])
return cmd
def modify_connection_ipip(self):
cmd = [self.nmcli_bin, "con", "mod"]
if self.conn_name is not None:
cmd.append(self.conn_name)
elif self.ifname is not None:
cmd.append(self.ifname)
elif self.ip_tunnel_dev is not None:
cmd.append("ipip%s" % self.ip_tunnel_dev)
params = {
"ip-tunnel.local": self.ip_tunnel_local,
"ip-tunnel.remote": self.ip_tunnel_remote,
"autoconnect": self.bool_to_string(self.autoconnect),
}
for k, v in params.items():
cmd.extend([k, v])
return cmd
def create_connection(self):
cmd = []
if self.type == "team":
if (self.dns4 is not None) or (self.dns6 is not None):
cmd = self.create_connection_team()
self.execute_command(cmd)
cmd = self.modify_connection_team()
self.execute_command(cmd)
return self.up_connection()
elif (self.dns4 is None) or (self.dns6 is None):
cmd = self.create_connection_team()
elif self.type == "team-slave":
if self.mtu is not None:
cmd = self.create_connection_team_slave()
self.execute_command(cmd)
cmd = self.modify_connection_team_slave()
return self.execute_command(cmd)
else:
cmd = self.create_connection_team_slave()
elif self.type == "bond":
if (
(self.mtu is not None)
or (self.dns4 is not None)
or (self.dns6 is not None)
):
cmd = self.create_connection_bond()
self.execute_command(cmd)
cmd = self.modify_connection_bond()
self.execute_command(cmd)
return self.up_connection()
else:
cmd = self.create_connection_bond()
elif self.type == "bond-slave":
cmd = self.create_connection_bond_slave()
elif self.type == "ethernet":
if (
(self.mtu is not None)
or (self.dns4 is not None)
or (self.dns6 is not None)
):
cmd = self.create_connection_ethernet()
self.execute_command(cmd)
cmd = self.modify_connection_ethernet()
self.execute_command(cmd)
return self.up_connection()
else:
cmd = self.create_connection_ethernet()
elif self.type == "bridge":
cmd = self.create_connection_bridge()
elif self.type == "bridge-slave":
cmd = self.create_connection_bridge_slave()
elif self.type == "vlan":
cmd = self.create_connection_vlan()
elif self.type == "vxlan":
cmd = self.create_connection_vxlan()
elif self.type == "ipip":
cmd = self.create_connection_ipip()
elif self.type == "generic":
cmd = self.create_connection_ethernet(conn_type="generic")
if cmd:
return self.execute_command(cmd)
else:
self.module.fail_json(
msg="Type of device or network connection is required "
"while performing 'create' operation. Please specify 'type' as an argument."
)
def remove_connection(self):
# self.down_connection()
cmd = [self.nmcli_bin, "con", "del", self.conn_name]
return self.execute_command(cmd)
def modify_connection(self):
cmd = []
if self.type == "team":
cmd = self.modify_connection_team()
elif self.type == "team-slave":
cmd = self.modify_connection_team_slave()
elif self.type == "bond":
cmd = self.modify_connection_bond()
elif self.type == "bond-slave":
cmd = self.modify_connection_bond_slave()
elif self.type == "ethernet":
cmd = self.modify_connection_ethernet()
elif self.type == "bridge":
cmd = self.modify_connection_bridge()
elif self.type == "bridge-slave":
cmd = self.modify_connection_bridge_slave()
elif self.type == "vlan":
cmd = self.modify_connection_vlan()
elif self.type == "vxlan":
cmd = self.modify_connection_vxlan()
elif self.type == "ipip":
cmd = self.modify_connection_ipip()
elif self.type == "generic":
cmd = self.modify_connection_ethernet(conn_type="generic")
if cmd:
return self.execute_command(cmd)
else:
self.module.fail_json(
msg="Type of device or network connection is required "
"while performing 'modify' operation. Please specify 'type' as an argument."
)
def main():
# Parsing argument file
module = AnsibleModule(
argument_spec=dict(
autoconnect=dict(required=False, default=True, type="bool"),
state=dict(required=True, choices=["present", "absent"], type="str"),
conn_name=dict(required=True, type="str"),
master=dict(required=False, default=None, type="str"),
id=dict(required=False, default=None, type="str"),
ifname=dict(required=False, default=None, type="str"),
type=dict(
required=False,
default=None,
choices=[
"ethernet",
"team",
"team-slave",
"bond",
"bond-slave",
"bridge",
"bridge-slave",
"vlan",
"vxlan",
"ipip",
"generic",
],
type="str",
),
ip4=dict(required=False, default=None, type="str"),
ip4_method=dict(required=False, default=None, type="str"),
gw4=dict(required=False, default=None, type="str"),
dns4=dict(required=False, default=None, type="list"),
dns4_search=dict(type="list"),
dhcp_client_id=dict(required=False, default=None, type="str"),
ip6=dict(required=False, default=None, type="str"),
ip6_method=dict(required=False, default=None, type="str"),
ip6_dhcp_duid=dict(required=False, default=None, type="str"),
gw6=dict(required=False, default=None, type="str"),
dns6=dict(required=False, default=None, type="str"),
dns6_search=dict(type="list"),
# Bond Specific vars
mode=dict(
require=False,
default="balance-rr",
type="str",
choices=[
"balance-rr",
"active-backup",
"balance-xor",
"broadcast",
"802.3ad",
"balance-tlb",
"balance-alb",
],
),
miimon=dict(required=False, default=None, type="str"),
downdelay=dict(required=False, default=None, type="str"),
updelay=dict(required=False, default=None, type="str"),
arp_interval=dict(required=False, default=None, type="str"),
arp_ip_target=dict(required=False, default=None, type="str"),
primary=dict(required=False, default=None, type="str"),
# general usage
mtu=dict(required=False, default=None, type="str"),
mac=dict(required=False, default=None, type="str"),
# bridge specific vars
stp=dict(required=False, default=True, type="bool"),
priority=dict(required=False, default="128", type="str"),
slavepriority=dict(required=False, default="32", type="str"),
forwarddelay=dict(required=False, default="15", type="str"),
hellotime=dict(required=False, default="2", type="str"),
maxage=dict(required=False, default="20", type="str"),
ageingtime=dict(required=False, default="300", type="str"),
hairpin=dict(required=False, default=True, type="bool"),
path_cost=dict(required=False, default="100", type="str"),
# vlan specific vars
vlanid=dict(required=False, default=None, type="str"),
vlandev=dict(required=False, default=None, type="str"),
flags=dict(required=False, default=None, type="str"),
ingress=dict(required=False, default=None, type="str"),
egress=dict(required=False, default=None, type="str"),
# vxlan specific vars
vxlan_id=dict(required=False, default=None, type="str"),
vxlan_local=dict(required=False, default=None, type="str"),
vxlan_remote=dict(required=False, default=None, type="str"),
# ip-tunnel specific vars
ip_tunnel_dev=dict(required=False, default=None, type="str"),
ip_tunnel_local=dict(required=False, default=None, type="str"),
ip_tunnel_remote=dict(required=False, default=None, type="str"),
),
supports_check_mode=True,
)
if not HAVE_DBUS:
module.fail_json(msg="This module requires dbus python bindings")
if not HAVE_NM_CLIENT:
module.fail_json(msg="This module requires NetworkManager glib API")
nmcli = Nmcli(module)
(rc, out, err) = (None, "", "")
result = {"conn_name": nmcli.conn_name, "state": nmcli.state}
# check for issues
if nmcli.conn_name is None:
nmcli.module.fail_json(msg="Please specify a name for the connection")
# team-slave checks
if nmcli.type == "team-slave" and nmcli.master is None:
nmcli.module.fail_json(msg="Please specify a name for the master")
if nmcli.type == "team-slave" and nmcli.ifname is None:
nmcli.module.fail_json(
msg="Please specify an interface name for the connection"
)
if nmcli.state == "absent":
if nmcli.connection_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = nmcli.down_connection()
(rc, out, err) = nmcli.remove_connection()
if rc != 0:
module.fail_json(
name=("No Connection named %s exists" % nmcli.conn_name),
msg=err,
rc=rc,
)
elif nmcli.state == "present":
if nmcli.connection_exists():
# modify connection (note: this function is check mode aware)
# result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
result["Exists"] = "Connections do exist so we are modifying them"
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = nmcli.modify_connection()
if not nmcli.connection_exists():
result["Connection"] = "Connection %s of Type %s is being added" % (
nmcli.conn_name,
nmcli.type,
)
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = nmcli.create_connection()
if rc is not None and rc != 0:
module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
if rc is None:
result["changed"] = False
else:
result["changed"] = True
if out:
result["stdout"] = out
if err:
result["stderr"] = err
module.exit_json(**result)
if __name__ == "__main__":
main()
| 35.170025
| 150
| 0.571746
|
20aae5f3f51233527aabb1aad0271ead8957b768
| 695
|
py
|
Python
|
Unidad_2/buscar_precios.py
|
bloisejuli/curso_python_UNSAM
|
cfb6e6a8368ce239b5ff0ba0236dbf8c79772374
|
[
"MIT"
] | null | null | null |
Unidad_2/buscar_precios.py
|
bloisejuli/curso_python_UNSAM
|
cfb6e6a8368ce239b5ff0ba0236dbf8c79772374
|
[
"MIT"
] | null | null | null |
Unidad_2/buscar_precios.py
|
bloisejuli/curso_python_UNSAM
|
cfb6e6a8368ce239b5ff0ba0236dbf8c79772374
|
[
"MIT"
] | null | null | null |
#buscar_precios.py
#Alumna: Julieta Bloise
# Ejercicio 2.7:
# A partir de lo que hiciste en el Ejercicio 2.3, escribí una función buscar_precio(fruta) que busque en archivo ../Data/precios.csv el precio de determinada
# fruta (o verdura) y lo imprima en pantalla. Si la fruta no figura en el listado de precios, debe imprimir un mensaje que lo indique.
def buscar_precio (fruta):
f = open('../Data/precios.csv', 'rt')
for line in f:
row = line.split(',')
if (row[0] == fruta):
print("El precio del cajón de", fruta, "es:", row[1])
if (row[0] != fruta):
print(fruta, "no figura en el listado de precios.")
f.close()
| 31.590909
| 158
| 0.628777
|
397715c0b33ae22eb82b21ebf289779b304c7562
| 2,253
|
py
|
Python
|
Optimization_MCLP_FSalas_Num2.py
|
ivandeluna/daad_knapsack
|
afe80b553102a3c9c5e471e74ea86be56f236d11
|
[
"MIT"
] | null | null | null |
Optimization_MCLP_FSalas_Num2.py
|
ivandeluna/daad_knapsack
|
afe80b553102a3c9c5e471e74ea86be56f236d11
|
[
"MIT"
] | null | null | null |
Optimization_MCLP_FSalas_Num2.py
|
ivandeluna/daad_knapsack
|
afe80b553102a3c9c5e471e74ea86be56f236d11
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 5 15:27:03 2017
Francisco Salas
Resuelve Ec. (3) en (Azhmyakov et al. 2016, A Novel Numerical Approach to the MCLP Based Resilent Supply Chain Optimization)
@author: admin/Francisco Salas
Nota: Ejecutar después de Optimization_MCLP_FSalas_Num2.py
Del resultado de dicho programa se toma la variable D2, que es el valor del producto de la matriz A por y
Instrucciones : Introducir valores del vector W en linea 23
"""
#from numpy import *
import numpy as np
#% D2 es el producto A'*y, dada una y de 5*1 arbitraria (u optima)
#% (obtenido en Datos_vadim2016_v1)
# Son los coeficientes de la restricciones para cada uno de los elementos del
# vector z
#D = np.array([[1.4425],[1.1457],[0.9192],[1.0576],[1],[1.3754],[0],[1.2862]])
#% W son los pesos o los coeficientes de la funcion objetivo
W = np.array([32,19,41,26,37,49,50,11])
# D2 es una variable producida en Fuerza_bruta_03_Mlb
#[l,] = D2.shape; #se convierte en l = 8
#n=8 # Tamanio del vector
cl = 2**(l) # usar l = 8
elementos = cl-1 # numero de combinaciones
salida = np.zeros((cl-1,l))
for numero in range(0,cl):
#numero= # numero decimal
#d = np.array(numero)
power = 2**np.arange(l)
d = numero * np.ones((1,l))
b = np.floor((d%(2*power))/power)
#salida[0,]=b
salida[numero-1,]=b # matriz con los elementos en binario
Z2=salida
#% Se realiza la multiplicación para comprobar la restricción
#% con cada combinacion
X = np.inner(W,Z2)
# En Indice se guarda la combinacion (vector z) que cumple la restriccion
Indice = np.zeros((cl-1))
for i in range(0,cl-1):
if ((Z2[i,0] <= D2[0]) and
(Z2[i,1] <= D2[1]) and
(Z2[i,2] <= D2[2]) and
(Z2[i,3] <= D2[3]) and
(Z2[i,4] <= D2[4]) and
(Z2[i,5] <= D2[5]) and
(Z2[i,6] <= D2[6]) and
(Z2[i,7] <= D2[7])):
Indice[i] = 1
# Discriminar de vector de funcion objetivo X la combinacion que cumple la
# restriccion
XM =X*Indice
fOptimE3 = max(XM)
for j in range(0, cl-1):
if (XM[j]==fOptimE3):
solE3 = Z2[j]
print(solE3)
print(fOptimE3)
"""
La combinacion solucion es solE3
El valor optimo de la funcion es fOptimE3
(Comparar con la solucion en ec. (16))
"""
| 32.185714
| 124
| 0.642255
|
84149b19608cff2e7e3a7f97e633de8cb644493f
| 26,569
|
py
|
Python
|
fanficfare/adapters/adapter_storiesonlinenet.py
|
trishume/FanFicFare
|
2ddce1acd5258f8dae5b3860aec1c9643a7a0807
|
[
"Apache-2.0"
] | 3
|
2020-11-10T16:43:43.000Z
|
2021-04-09T07:12:31.000Z
|
fanficfare/adapters/adapter_storiesonlinenet.py
|
trishume/FanFicFare
|
2ddce1acd5258f8dae5b3860aec1c9643a7a0807
|
[
"Apache-2.0"
] | null | null | null |
fanficfare/adapters/adapter_storiesonlinenet.py
|
trishume/FanFicFare
|
2ddce1acd5258f8dae5b3860aec1c9643a7a0807
|
[
"Apache-2.0"
] | 1
|
2021-04-08T12:25:09.000Z
|
2021-04-08T12:25:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2013 Fanficdownloader team, 2020 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import re
from datetime import datetime
#
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
# py2 vs py3 transition
from ..six.moves.urllib.parse import urlparse, urlunparse
from ..six import text_type as unicode
from .base_adapter import BaseSiteAdapter, makeDate
def getClass():
return StoriesOnlineNetAdapter
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class StoriesOnlineNetAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
# logger.debug("StoriesOnlineNetAdapter.__init__ - url='%s'" % url)
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url
self.story.setMetadata('storyId',self.parsedUrl.path.split('/',)[2].split(':')[0])
if 'storyInfo' in self.story.getMetadata('storyId'):
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
## for -2020-12-25 date added by append_datepublished_to_storyurl
## adds to URL, but NOT id.
if '-' in self.story.getMetadata('storyId'):
self.story.setMetadata('storyId',self.story.getMetadata('storyId').split('-')[0])
logger.debug("storyId date removed:%s\n"%self.story.getMetadata('storyId'))
# normalized story URL.
self._setURL('https://' + self.getSiteDomain() + '/s/'+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev',self.getSiteAbbrev())
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%Y-%m-%d %I:%M:%S %p"
@classmethod
def getSiteAbbrev(cls):
return 'strol'
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'storiesonline.net'
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/s/1234 http://"+cls.getSiteDomain()+"/s/1234:4010 https://"+cls.getSiteDomain()+"/s/1234 https://"+cls.getSiteDomain()+"/s/1234:4010"
def getSiteURLPattern(self):
return r"https?://"+re.escape(self.getSiteDomain())+r"/(s|library)/(storyInfo.php\?id=)?(?P<id>\d+)((:\d+)?(;\d+)?$|(:i)?$)?"
@classmethod
def getTheme(cls):
## only one theme is supported.
return "Classic"
def needToLoginCheck(self, data):
return 'Free Registration' in data \
or "Invalid Password!" in data \
or "Invalid User Name!" in data \
or "Log In" in data \
or "Access to unlinked chapters requires" in data \
or "Log in to Storiesonline" in data \
or "WLPC log in System" in data
def performLogin(self, url):
if self.password:
username = self.username
password = self.password
else:
username = self.getConfig("username")
password = self.getConfig("password")
loginUrl = 'https://' + self.getSiteDomain() + '/sol-secure/login.php'
logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
username))
if not username or not password:
logger.info("Login Required for URL %s" % loginUrl)
raise exceptions.FailedToLogin(url,username)
## Double POST requirement has been removed as of Oct 2021
(data,useurl) = self.get_request_redirected(loginUrl,usecache=False)
# logger.debug(data)
if not self.needToLoginCheck(data):
## hitting login URL reminds system we're logged in?
logger.debug("don't need to login")
return
soup = self.make_soup(data)
params = {}
params['email'] = username
postAction = soup.find('form')['action']
parsedUrl = urlparse(useurl)
postUrl = urlunparse((parsedUrl.scheme,
parsedUrl.netloc,
postAction,
'','',''))
params['password'] = password
params['cmd'] = 'cred_set'
data = self.post_request(postUrl,params,usecache=False)
if self.needToLoginCheck(data):
logger.info("Failed to login to URL %s as %s" % (loginUrl,
username))
raise exceptions.FailedToLogin(url,username)
## Getting the chapter list and the meta data, plus 'is adult' checking.
def doExtractChapterUrlsAndMetadata(self, get_cover=True):
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = self.url
logger.debug("URL: "+url)
try:
data = self.get_request(url+":i")
# logger.debug(data)
except exceptions.HTTPErrorFFF as e:
if e.status_code in (401, 403, 410):
data = 'Log In' # to trip needToLoginCheck
else:
raise e
if self.needToLoginCheck(data):
# need to log in for this one.
self.performLogin(url)
data = self.get_request(url+":i",usecache=False)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
elif "Error! The story you're trying to access is being filtered by your choice of contents filtering." in data:
raise exceptions.FailedToDownload(self.getSiteDomain() +" says: Error! The story you're trying to access is being filtered by your choice of contents filtering.")
elif "Error! Daily Limit Reached" in data or "Sorry! You have reached your daily limit of" in data:
raise exceptions.FailedToDownload(self.getSiteDomain() +" says: Error! Daily Limit Reached")
soup = self.make_soup(data)
# logger.debug(data)
## Title
a = soup.find('h1')
self.story.setMetadata('title',stripHTML(a))
# Find authorid and URL from... author url. Sometimes in top,
# other times in footer.
authfrom = soup.find('div', {'id':'top-header'})
if authfrom is None or 'author' not in str(authfrom):
authfrom = soup.find('footer')
alist = authfrom.findAll('a', {'rel' : 'author'})
for a in alist:
self.story.addToList('authorId',a['href'].split('/')[2])
self.story.addToList('authorUrl','https://'+self.host+a['href'])
self.story.addToList('author',stripHTML(a).replace("'s Page",""))
# The rest of the metadata is within the article tag.
soup = soup.find('article')
# Find the chapters:
chapters = soup.findAll('a', href=re.compile(r'^/s/'+self.story.getMetadata('storyId')+r":\d+(/.*)?$"))
if len(chapters) != 0:
logger.debug("Number of chapters: {0}".format(len(chapters)))
for chapter in chapters:
# just in case there's tags, like <i> in chapter titles.
self.add_chapter(chapter,'https://'+self.host+chapter['href'])
else:
self.add_chapter(self.story.getMetadata('title'),'https://'+self.host+'/s/'+self.story.getMetadata('storyId'))
self.getStoryMetadataFromAuthorPage()
## append_datepublished_to_storyurl adds to URL, but NOT id.
## This is an ugly kludge to (hopefully) help address the
## site's unfortunately habit of *reusing* storyId numbers.
if self.getConfig("append_datepublished_to_storyurl",False):
logger.info("Applying append_datepublished_to_storyurl")
self._setURL('https://' + self.getSiteDomain() +
'/s/'+self.story.getMetadata('storyId')+
self.story.getMetadataRaw('datePublished').strftime("-%Y-%m-%d"))
logger.info("updated storyUrl:%s"%self.url)
# Some books have a cover in the index page.
# Samples are:
# https://storiesonline.net/s/11999
# https://storiesonline.net/s/10823
if get_cover:
# logger.debug("Looking for the cover image...")
cover_url = ""
img = soup.find('img')
if img:
cover_url=img['src']
# logger.debug("cover_url: %s"%cover_url)
if cover_url:
self.setCoverImage(url,cover_url)
# Remove all the metadata elements to leave and preamble text. This is usually
# a notice or a forward.
if self.num_chapters() > 1:
header = soup.find('header')
header.extract()
else:
soup = soup.find('header')
# Remove some tags based on their class or id
elements_to_remove = ['#det-link', '#s-details', '#index-list', '#s-title', '#s-auth', '.copy']
if not self.getConfig('include_images'):
elements_to_remove.append('img')
for element_name in elements_to_remove:
elements = soup.select(element_name)
for element in elements:
element.extract()
if len(soup.contents ) > 0 and (len(soup.text.strip()) > 0 or len(soup.find_all('img')) > 0):
self.story.setMetadata('notice', self.utf8FromSoup(url, soup))
def getStoryMetadataFromAuthorPage(self):
# surprisingly, the detailed page does not give enough details, so go to author's page
story_row = self.findStoryRow('tr')
self.has_universes = False
title_cell = story_row.find('td', {'class' : 'lc2'})
for cat in title_cell.findAll('div', {'class' : 'typediv'}):
self.story.addToList('genre',cat.text)
# in lieu of word count.
self.story.setMetadata('size', story_row.find('td', {'class' : 'num'}).text)
score = story_row.findNext('th', {'class' : 'ynum'}).text
if score != '-':
self.story.setMetadata('score', score)
description_element = story_row.findNext('td', {'class' : 'lc4'})
# logger.debug(description_element)
self.parseDescriptionField(description_element)
self.parseOtherAttributes(description_element)
def findStoryRow(self, row_class='tr'):
page=0
story_found = False
while not story_found:
page = page + 1
try:
data = self.get_request(self.story.getList('authorUrl')[0] + "/" + unicode(page))
except exceptions.HTTPErrorFFF as e:
if e.status_code == 404:
raise exceptions.FailedToDownload("Story not found in Author's list--Set Access Level to Full Access and change Listings Theme back to "+self.getTheme())
asoup = self.make_soup(data)
story_row = asoup.find(row_class, {'id' : 'sr' + self.story.getMetadata('storyId')})
if story_row:
logger.debug("Found story row on page %d" % page)
story_found = True
self.has_universes = "/universes" in data
break
return story_row
def parseDescriptionField(self, description_element):
# Parse the description field for the series or universe and the
# actual description.
try:
a = description_element.find('a', href=re.compile(r"/series/\d+/.*"))
# logger.debug("Looking for series - a='{0}'".format(a))
if a:
# if there's a number after the series name, series_contents is a two element list:
# [<a href="...">Title</a>, u' (2)']
series_contents = a.parent.contents
i = 0 if len(series_contents) == 1 else series_contents[1].strip(' ()')
seriesUrl = 'https://'+self.host+a['href']
self.story.setMetadata('seriesUrl',seriesUrl)
series_name = stripHTML(a)
# logger.debug("Series name= %s" % series_name)
series_soup = self.make_soup(self.get_request(seriesUrl))
if series_soup:
# logger.debug("Retrieving Series - looking for name")
series_name = stripHTML(series_soup.find('h1', {'id' : 'ptitle'}))
series_name = re.sub(r' . a (series by|collection from).*$','',series_name)
# logger.debug("Series name: '%s'" % series_name)
self.setSeries(series_name, i)
# Check if series is in a universe
if self.has_universes:
universe_url = self.story.getList('authorUrl')[0] + "&type=uni"
universes_soup = self.make_soup(self.get_request(universe_url) )
# logger.debug("Universe url='{0}'".format(universe_url))
if universes_soup:
universes = universes_soup.findAll('div', {'class' : 'ser-box'})
# logger.debug("Number of Universes: %d" % len(universes))
for universe in universes:
# logger.debug("universe.find('a')={0}".format(universe.find('a')))
# The universe id is in an "a" tag that has an id but nothing else. It is the first tag.
# The id is prefixed with the letter "u".
universe_id = universe.find('a')['id'][1:]
# logger.debug("universe_id='%s'" % universe_id)
universe_name = stripHTML(universe.find('div', {'class' : 'ser-name'})).partition(' ')[2]
# logger.debug("universe_name='%s'" % universe_name)
# If there is link to the story, we have the right universe
story_a = universe.find('a', href=re.compile('/s/'+self.story.getMetadata('storyId')))
if story_a:
# logger.debug("Story is in a series that is in a universe! The universe is '%s'" % universe_name)
self.story.setMetadata("universe", universe_name)
self.story.setMetadata('universeUrl','https://'+self.host+ '/library/universe.php?id=' + universe_id)
break
# else:
# logger.debug("No universe page")
except:
raise
try:
a = description_element.find('a', href=re.compile(r"/universe/\d+/.*"))
# logger.debug("Looking for universe - a='{0}'".format(a))
if a:
self.story.setMetadata("universe",stripHTML(a))
# Assumed only one universe, but it does have a URL--use universeHTML
universe_name = stripHTML(a)
universeUrl = 'https://'+self.host+a['href']
# logger.debug("Retrieving Universe - about to get page - universeUrl='{0}".format(universeUrl))
universe_soup = self.make_soup(self.get_request(universeUrl))
# logger.debug("Retrieving Universe - have page")
if universe_soup:
# logger.debug("Retrieving Universe - looking for name")
universe_name = stripHTML(universe_soup.find('h1', {'id' : 'ptitle'}))
universe_name = re.sub(r' .\s+A Universe from the Mind.*$','',universe_name)
# logger.debug("Universes name: '{0}'".format(universe_name))
self.story.setMetadata('universeUrl',universeUrl)
# logger.debug("Setting universe name: '{0}'".format(universe_name))
self.story.setMetadata('universe',universe_name)
if self.getConfig("universe_as_series") and not self.story.getMetadata('seriesUrl'):
# logger.debug("universe_as_series")
# take position in universe page as number in series.
for i, storya in enumerate(universe_soup.find_all('a',href=re.compile(r'^/s/\d+/'))):
if storya['href'].split('/')[2] == self.story.getMetadata('storyId'):
self.setSeries(universe_name, i+1)
self.story.setMetadata('seriesUrl',universeUrl)
break
# else:
# logger.debug("Do not have a universe")
except:
raise
# There's nothing around the desc to grab it by, and there's a
# variable number of links before it.
for line in description_element.contents:
line = unicode(line)
if line.strip() == '' or line.startswith("<span") or line.startswith("<br"):
# skip empty, <span (universe, series or context) and <br>.
# logger.debug("Discard: %s"%line)
pass
else:
# logger.debug("Use: %s"%line)
self.setDescription('https://'+self.host+'/s/'+self.story.getMetadata('storyId'),line)
break
def parseDate(self,label):
# date is passed as a timestamp and converted in JS. used to
# use noscript value instead, but found one story that didn't
# include it.
# <script> tag processing not working?
# logger.debug('parseDate label: "%s"' % label)
script = label.findNext('script')
# logger.debug("script:(%s)"%script)
# logger.debug("script.text:(%s)"%script.text)
# logger.debug("script:(stripHTML(%s))"%stripHTML(script))
noscript = label.findNext('noscript').text
# I honestly have no idea why both script.text and
# stripHTML(script) return empty string, but they do. BS or
# html5lib maybe?
script = "%s"%label.findNext('script')
try:
timestamp = script[script.index("Date(")+5:]
# remove milliseconds that JS likes.
timestamp = timestamp[:timestamp.index(")")-3]
# logger.debug("timestamp:(%s)"%timestamp)
value = datetime.fromtimestamp(float(timestamp))
except:
value = makeDate(stripHTML(noscript), self.dateformat)
# logger.debug('Have a date field label: "%s", noscript: "%s", timestamp: "%s", value: "%s"' % (label, noscript, timestamp, value))
return value
def parseOtherAttributes(self, other_attribute_element):
for b in other_attribute_element.findAll('b'):
#logger.debug('Getting metadata: "%s"' % b)
label = b.text
if label in ['Posted:', 'Concluded:', 'Updated:']:
value = self.parseDate(b)
#logger.debug('Have a date field label: "%s", value: "%s"' % (label, value))
else:
value = b.nextSibling
#logger.debug('label: "%s", value: "%s"' % (label, value))
if 'Sex' in label: # storiesonline.net uses '<b>Sex Contents:</b> No Sex'
self.story.setMetadata('rating', value)
if 'Age' in label: # finestories.com,scifistories.com use '<b>Age Rating:</b> Older than XX | '
self.story.setMetadata('rating', value.split('|')[0])
if 'Score' in label and value != '-':
self.story.setMetadata('score', value)
if 'Tags' in label or 'Codes' in label:
for code in re.split(r'\s*,\s*', value.strip()):
self.story.addToList('sitetags', code)
if 'Genre' in label:
for code in re.split(r'\s*,\s*', value.strip()):
self.story.addToList('genre', code)
if 'Posted' in label:
self.story.setMetadata('datePublished', value)
self.story.setMetadata('dateUpdated', value)
if 'Concluded' in label:
self.story.setMetadata('dateUpdated', value)
if 'Updated' in label:
self.story.setMetadata('dateUpdated', value)
status = other_attribute_element.find('span', {'class':'ab'})
if status != None:
if 'Incomplete and Inactive' in status.text:
self.story.setMetadata('status', 'Incomplete')
else:
self.story.setMetadata('status', 'In-Progress')
if "Last Activity" in status.text:
self.story.setMetadata('dateUpdated', value)
else:
self.story.setMetadata('status', 'Completed')
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self.get_request(url))
# The story text is wrapped in article tags. Most of the page header and
# footer are outside of this.
chaptertag = soup.find('article')
# some big chapters are split over several pages
pager = chaptertag.find('div', {'class' : 'pager'})
self.cleanPage(chaptertag)
if pager != None:
urls=pager.findAll('a')
urls=urls[:len(urls)-1]
# logger.debug("pager urls:%s"%urls)
pager.extract()
for ur in urls:
soup = self.make_soup(self.get_request("https://"+self.getSiteDomain()+ur['href']))
pagetag = soup.find('article')
self.cleanPage(pagetag)
for tag in pagetag.contents[1:]:
chaptertag.append(tag)
if None == chaptertag:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,chaptertag)
def cleanPage(self,pagetag):
"Consolidate 'page' clean up code so it can be called."
# logger.debug("cleanPage start: {0}".format(pagetag))
chapter_title = None
if self.getConfig('inject_chapter_title'):
h2tag = pagetag.find('h2')
if h2tag:
# I'm seeing an h1 now, but it's not logged in?
# Something's broken...
chapter_title = h2tag.extract()
# Strip te header section
tag = pagetag.find('header')
if tag:
#logger.debug("remove before header: {0}".format(tag))
tag.extract()
# some big chapters are split over several pages
# remove FIRST pager and everything before it.
tag = pagetag.find('div', {'class' : 'pager'})
while tag != None:
# logger.debug("remove before pager: {0}".format(tag))
prev = tag.previousSibling
tag.extract()
tag = prev
# Find the "Continues" marker on the current page and
# remove everything after that. This is actually
# effecting the *previous* 'page'. EXCEPT!--they are
# putting a 'conTag' at the *top* now, too. So this
# was nuking every page but the first and last. Now
# only if 'Continues'
for contag in pagetag.findAll('span', {'class' : 'conTag'}):
# remove everything after continues...
if 'Continuation' in contag.text:
tag = contag
while tag != None:
# logger.debug("remove before Continuation: {0}".format(tag))
prev = tag.previousSibling
tag.extract()
tag = prev
elif 'Continues' in contag.text:
tag = contag
while tag != None:
# logger.debug("remove after Continues: {0}".format(tag))
nxt = tag.nextSibling
tag.extract()
tag = nxt
# some big chapters are split over several pages
# remove LAST pager and everything before it.
# Only needed on last page.
tag = pagetag.find('div', {'class' : 'pager'})
while tag != None:
# logger.debug("remove after pager: {0}".format(tag))
nxt = tag.nextSibling
tag.extract()
tag = nxt
# If it is a chapter, there are dates at the start for when it was posted or modified. These plus
# everything before them can be discarded.
postedDates = pagetag.findAll('div', {'class' : 'date'})
# logger.debug(postedDates)
if postedDates:
a = postedDates[0].previousSibling
while a != None:
# logger.debug("before dates: {0}".format(a))
b = a.previousSibling
a.extract()
a = b
for a in pagetag.findAll('div', {'class' : 'date'}):
a.extract()
# Kill the vote form and everything after it.
a = pagetag.find('div', {'class' : 'vform'})
# logger.debug("Chapter end= '{0}'".format(a))
while a != None:
b = a.nextSibling
a.extract()
a=b
# For chapters, remove next chapter link and everything after it
a = pagetag.find('h3', {'class' : 'end'})
# logger.debug("Chapter end= '{0}'".format(a))
while a != None:
b = a.nextSibling
a.extract()
a=b
# inject_chapter_title
if chapter_title:
chapter_title.name='h3'
pagetag.insert(0,chapter_title)
| 44.653782
| 180
| 0.568181
|
9261dc363bdf48343e667f8704b12427d925cac5
| 22,522
|
py
|
Python
|
BOARD/Texttable.py
|
albcristi/ObstructionGame
|
22ea8e7dcab80fbcb283c9c0b50d6247028deb31
|
[
"MIT"
] | null | null | null |
BOARD/Texttable.py
|
albcristi/ObstructionGame
|
22ea8e7dcab80fbcb283c9c0b50d6247028deb31
|
[
"MIT"
] | null | null | null |
BOARD/Texttable.py
|
albcristi/ObstructionGame
|
22ea8e7dcab80fbcb283c9c0b50d6247028deb31
|
[
"MIT"
] | null | null | null |
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2018 Gerome Fournier <jef(at)foutaise.org>
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'MIT'
__version__ = '1.5.0'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import unicodedata
# define a text wrapping function to wrap some text
# to a specific width:
# - use cjkwrap if available (better CJK support)
# - fallback to textwrap otherwise
try:
import cjkwrap
def textwrapper(txt, width):
return cjkwrap.wrap(txt, width)
except ImportError:
try:
import textwrap
def textwrapper(txt, width):
return textwrap.wrap(txt, width)
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
# define a function to calculate the rendering width of a unicode character
# - use wcwidth if available
# - fallback to unicodedata information otherwise
try:
import wcwidth
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
return max(0, wcwidth.wcwidth(c))
except ImportError:
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
if unicodedata.east_asian_width(c) in 'WF':
return 2
elif unicodedata.combining(c):
return 0
else:
return 1
from functools import reduce
if sys.version_info >= (3, 0):
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
return sum([uchar_width(c) for c in obj2unicode(iterable)])
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class FallbackToText(Exception):
"""Used for failed conversion to float"""
pass
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
| 30.394062
| 100
| 0.532413
|
5d81ec936cc0af817de7ec424c31e5e89eb0bbe0
| 454
|
py
|
Python
|
tests/notebooks/mirror/ipynb_to_script_vim_folding_markers/jupyter.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 5,378
|
2018-09-01T22:03:43.000Z
|
2022-03-31T06:51:42.000Z
|
tests/notebooks/mirror/ipynb_to_script_vim_folding_markers/jupyter.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 812
|
2018-08-31T08:26:13.000Z
|
2022-03-30T18:12:11.000Z
|
tests/notebooks/mirror/ipynb_to_script_vim_folding_markers/jupyter.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 380
|
2018-09-02T01:40:07.000Z
|
2022-03-25T13:57:23.000Z
|
# ---
# jupyter:
# jupytext:
# cell_markers: '{{{,}}}'
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter notebook
#
# This notebook is a simple jupyter notebook. It only has markdown and code cells. And it does not contain consecutive markdown cells. We start with an addition:
a = 1
b = 2
a + b
# Now we return a few tuples
a, b
a, b, a+b
# And this is already the end of the notebook
| 17.461538
| 161
| 0.645374
|
c63fd5fb4a95e87150f7c8351dc3743da24db8a0
| 7,685
|
py
|
Python
|
src/main/python/libraries/edge/opensearch/atomresponsebysolr.py
|
mayadebellis/incubator-sdap-edge
|
c0899e508a06d614aa58ec9e991443640ba39e65
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/libraries/edge/opensearch/atomresponsebysolr.py
|
mayadebellis/incubator-sdap-edge
|
c0899e508a06d614aa58ec9e991443640ba39e65
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/libraries/edge/opensearch/atomresponsebysolr.py
|
mayadebellis/incubator-sdap-edge
|
c0899e508a06d614aa58ec9e991443640ba39e65
|
[
"Apache-2.0"
] | 1
|
2021-01-10T21:19:05.000Z
|
2021-01-10T21:19:05.000Z
|
import json
import urllib
from edge.opensearch.atomresponse import AtomResponse
from collections import defaultdict
class AtomResponseBySolr(AtomResponse):
def __init__(self):
super(AtomResponseBySolr, self).__init__()
def generate(self, solrResponse, pretty=False):
self._populate(solrResponse)
return super(AtomResponseBySolr, self).generate(pretty)
def _populate(self, solrResponse):
#response.title = 'OCSI Dataset Search: '+searchText
#response.description = 'Search result for "'+searchText+'"'
#response.link = searchUrl
self._populateChannel(solrResponse)
if solrResponse is None:
self.variables.append(
{'namespace': 'opensearch', 'name': 'totalResults', 'value': 1}
)
self.variables.append(
{'namespace': 'opensearch', 'name': 'startIndex', 'value': 1}
)
self.variables.append(
{'namespace': 'opensearch', 'name': 'itemsPerPage', 'value': 1}
)
self.parameters['startIndex'] = 0
url = self.link + '?' + urllib.urlencode(self.parameters)
self.variables.append({'name': 'link', 'attribute': {'href': url, 'rel': 'self', 'type': 'application/atom+xml'}})
self.variables.append({'name': 'link', 'attribute': {'href': url, 'rel': 'first', 'type': 'application/atom+xml'}})
item = [
{'name': 'title', 'value': 'Error'},
{'name': 'content', 'value': 'error'}
]
self.items.append(item)
else:
#logging.debug(solrResponse)
solrJson = json.loads(solrResponse)
numFound = int(solrJson['response']['numFound'])
start = int(solrJson['response']['start'])
rows = int(solrJson['responseHeader']['params']['rows'])
self.parameters['startIndex'] = start
self.variables.append({'name': 'link', 'attribute': {'href': self.link + '?' + urllib.urlencode(self.parameters), 'rel': 'self', 'type': 'application/atom+xml'}})
self.parameters['startIndex'] = 0
self.variables.append({'name': 'link', 'attribute': {'href': self.link + '?' + urllib.urlencode(self.parameters), 'rel': 'first', 'type': 'application/atom+xml'}})
if start > 0:
if (start - rows > 0):
self.parameters['startIndex'] = start - rows
self.variables.append({'name': 'link', 'attribute': {'href': self.link + '?' + urllib.urlencode(self.parameters), 'rel': 'previous', 'type': 'application/atom+xml'}})
if start + rows < numFound:
self.parameters['startIndex'] = start + rows
self.variables.append({'name': 'link', 'attribute': {'href': self.link + '?' + urllib.urlencode(self.parameters), 'rel': 'next', 'type': 'application/atom+xml'}})
self.variables.append(
{'namespace': 'opensearch', 'name': 'totalResults', 'value': solrJson['response']['numFound']}
)
self.variables.append(
{'namespace': 'opensearch', 'name': 'startIndex', 'value': solrJson['response']['start']}
)
self.variables.append(
{'namespace': 'opensearch', 'name': 'itemsPerPage', 'value': solrJson['responseHeader']['params']['rows']}
)
for doc in solrJson['response']['docs']:
"""
item = [
{'name': 'title', 'value': doc['Dataset-LongName'][0]},
{'name': 'description', 'value': doc['Dataset-Description'][0]},
{'name': 'link', 'value': self._configuration.get('portal', 'datasetUrl')+'/'+doc['Dataset-ShortName'][0]}
]
"""
item = []
'''
#Handle dataset_location_policy values differently
if 'DatasetLocationPolicy-Type' in doc and 'DatasetLocationPolicy-BasePath' in doc:
for i, x in enumerate(doc['DatasetLocationPolicy-Type']):
item.append({'namespace': 'podaac', 'name': self._camelCaseStripHyphen(x.title()), 'value': doc['DatasetLocationPolicy-BasePath'][i]})
del doc['DatasetLocationPolicy-Type']
del doc['DatasetLocationPolicy-BasePath']
multiValuedElementsKeys = ('DatasetRegion-', 'DatasetCharacter-', 'DatasetCitation-', 'DatasetContact-Contact-', 'DatasetDatetime-',
'DatasetInteger-', 'DatasetParameter-', 'DatasetProject-', 'DatasetReal-', 'DatasetResource-',
'DatasetSoftware-', 'DatasetSource-', 'DatasetVersion-', 'Collection-',
'GranuleArchive-', 'GranuleReference-', 'GranuleReal-')
multiValuedElements = defaultdict(list)
for docKey in doc.keys():
if docKey.startswith(multiValuedElementsKeys):
multiValuedElements[docKey.split('-', 1)[0]].append(docKey)
else:
item.append({'namespace': 'podaac', 'name': self._camelCaseStripHyphen(docKey), 'value': doc[docKey]})
for multiValuedKey in multiValuedElements:
for i, x in enumerate(doc[multiValuedElements[multiValuedKey][0]]):
values = {}
for key in multiValuedElements[multiValuedKey]:
values[self._camelCaseStripHyphen(key.split('-', 1)[1])] = doc[key][i]
item.append({'namespace': 'podaac', 'name': self._camelCaseStripHyphen(multiValuedKey), 'value': values})
'''
self._populateItem(solrResponse, doc, item)
self.items.append(item)
def _populateChannel(self, solrResponse):
pass
def _populateItem(self, solrResponse, doc, item):
pass
def _populateItemWithPodaacMetadata(self, doc, item, multiValuedElementsKeys):
ignoreElementsEndingWith = ('-Full', '-Long')
multiValuedElements = defaultdict(list)
for docKey in doc.keys():
if docKey.startswith(multiValuedElementsKeys):
multiValuedElements[docKey.split('-', 1)[0]].append(docKey)
elif not docKey.endswith(ignoreElementsEndingWith):
if len(doc[docKey]) > 1:
item.append({'namespace': 'podaac', 'name': self._camelCaseStripHyphen(docKey), 'value': [{'namespace': 'podaac', 'name': 'value', 'value': x} for x in doc[docKey]]})
else:
item.append({'namespace': 'podaac', 'name': self._camelCaseStripHyphen(docKey), 'value': doc[docKey][0]})
for multiValuedKey in multiValuedElements:
for i, x in enumerate(doc[multiValuedElements[multiValuedKey][0]]):
values = []
for key in multiValuedElements[multiValuedKey]:
if not key.endswith(ignoreElementsEndingWith):
values.append({'namespace': 'podaac', 'name': self._camelCaseStripHyphen(key.split('-', 1)[1]), 'value': doc[key][i]})
item.append({'namespace': 'podaac', 'name': self._camelCaseStripHyphen(multiValuedKey), 'value': values})
def _camelCaseStripHyphen(self, key):
#special case to remove duplicate element, contact from element tag
key = key.replace('-Element-', '', 1).replace('Contact-', '', 1)
return key[0].lower() + key[1:].replace('-', '')
| 56.925926
| 186
| 0.55823
|
258eca961c73efbf4547a667f0552972865d6671
| 325
|
py
|
Python
|
transactions/migrations/0002_remove_transaction_user.py
|
flyboy1565/fakebank
|
fbe5acfbfe4dbe38e6965f43f750882e6325cc65
|
[
"MIT"
] | 2
|
2021-01-07T23:48:03.000Z
|
2022-01-03T03:08:17.000Z
|
transactions/migrations/0002_remove_transaction_user.py
|
flyboy1565/fakebank
|
fbe5acfbfe4dbe38e6965f43f750882e6325cc65
|
[
"MIT"
] | 4
|
2020-06-05T23:44:37.000Z
|
2021-06-09T18:28:46.000Z
|
transactions/migrations/0002_remove_transaction_user.py
|
flyboy1565/fakebank
|
fbe5acfbfe4dbe38e6965f43f750882e6325cc65
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-09-08 04:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('transactions', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='transaction',
name='user',
),
]
| 18.055556
| 47
| 0.587692
|
b6062fdec7a22665780e98ed6b9a73a647cc8342
| 2,054
|
py
|
Python
|
base/curve.py
|
JNMaree/pyFin
|
27fc03c912a722f0cf801eede969257f447638ab
|
[
"MIT"
] | null | null | null |
base/curve.py
|
JNMaree/pyFin
|
27fc03c912a722f0cf801eede969257f447638ab
|
[
"MIT"
] | null | null | null |
base/curve.py
|
JNMaree/pyFin
|
27fc03c912a722f0cf801eede969257f447638ab
|
[
"MIT"
] | null | null | null |
# Native Python modules
import datetime as dt
# External Modules
import matplotlib.pyplot as plt
# Local modules
from base.ir import IR
from base.interpolate import lerp
# Define the spot rates at certain specified dates of a curve
class Spot:
def __init__(self, d_tenor: dt.date, y_frac: float, inst: str, interest_rate: IR) -> None:
self.d_tenor = d_tenor
self.y_frac = y_frac
self.instrument = inst
self.rate = interest_rate
def __str__(self) -> str:
sret = f'{self.instrument :>20}:\t{self.d_tenor :%Y-%m-%d}\t{self.y_frac :>10.6f}\t{self.rate}'
return sret
# Define a curve by specifying a list of Spots
class Curve:
def __init__(self, spot_rates: list[Spot]) -> None:
self.spots = spot_rates
self.dates = [] # Define arrays of individual values for interpolation
self.rates = []
self.fracs = []
for s in self.spots:
self.dates.append(s.d_tenor)
self.rates.append(s.rate.NACC)
self.fracs.append(s.y_frac)
def __str__(self) -> str:
sret = f'Curve: {self.spots[0].d_tenor} >>> {self.spots[len(self.spots)-1].d_tenor}:\n'
for s in self.spots:
sret += f'{s}\n'
return sret
# Calculate the interest rate at the specified date on the curve by using linear interpolation
def rate_at_date(self, date) -> IR:
if isinstance(date, (dt.date)):
return IR(lerp(self.dates, self.rates, date))
elif isinstance(date, (float)):
return IR(lerp(self.fracs, self.rates, date))
else:
raise TypeError(f'Incompatible Type<{type(date)}> specified for date!')
# Plot a rate-vs-time representation of the curve
def plot(self):
xv, yv = [], []
for s in self.spots:
xv.append(s.y_frac)
yv.append(s.rate.NACC * 100)
plt.plot(xv,yv)
plt.title('Curve')
plt.xlabel('Time (years)')
plt.ylabel('NACC rate (%)')
plt.show()
| 34.813559
| 103
| 0.599318
|
a49470d5b3fb927585ba4e7532c6aa74625d5dba
| 2,959
|
py
|
Python
|
Data Structures/Binary Tree.py
|
itsrohanvj/Data-Structures-Algorithms-in-Python
|
500bdb75e4f5fb4cbb409bb0fb5487ef36931efa
|
[
"MIT"
] | 1
|
2021-08-28T15:50:56.000Z
|
2021-08-28T15:50:56.000Z
|
Data Structures/Binary Tree.py
|
itsrohanvj/Data-Structures-Algorithms-in-Python
|
500bdb75e4f5fb4cbb409bb0fb5487ef36931efa
|
[
"MIT"
] | null | null | null |
Data Structures/Binary Tree.py
|
itsrohanvj/Data-Structures-Algorithms-in-Python
|
500bdb75e4f5fb4cbb409bb0fb5487ef36931efa
|
[
"MIT"
] | null | null | null |
#NODE DECLARATION
# THE COMPLETE CODE IS WRITTEN IN DETAILED WAY
class BinaryTree:
def __init__ (self, data):
self.data = data
self.left = None
self.right = None
#set data
def setData(self, data):
self.data = data
#get data
def getData(self):
return self.data
#get left child of a node
def getLeft(self):
return self.left
#get right child of a node
def getRight(self):
return self.right
def size(self):
return self.size
def isEmpty(self):
return self.size == 0
# USING QUEUE
class Queue:
def __init__(self, limit=1000):
self.que = []
self.limit = limit
self.front = None
self.rear = None
self.size = 0
def isEmpty(self):
return self.size < 0
def enQueue(self, item):
if self.size >= self.limit:
print ('Queue Overflow!')
return
else:
self.que.append(item)
if self.front is None:
self.front = self.rear= 0
else:
self.rear = self.size
self.size += 1
return self.que
def deQueue(self):
if self.size>0:
p=self.que.pop(0)
self.size-=1
if self.size == 0:
self.front= self.rear =None
else:
self.rear = self.size-1
# print(p.data)
return p
# Insert using level order traversal
def insertInBinaryTreeUsingLevelOrder(root, data):
newNode = BinaryTree(data)
if root is None:
root = newNode
return root
q = Queue()
q.enQueue(root)
node = None
while not q.isEmpty():
node = q.deQueue()
if data== node.getData():
return root
if node.left is not None:
q.enQueue(node.left)
else:
node.left = newNode
return root
if node.right is not None:
q.enQueue(node.right)
else:
node.right = newNode
return root
#PRINTING VALUES IN THE TREE-LEVEL ORDER TRAVERSAL.
def levelOrder (root):
Q = Queue()
if(root == None):
return None
Q.enQueue(root)
while(not Q.isEmpty()):
temp = Q.deQueue()
if temp is None:
break
print (temp.data)
if(temp.left):
Q.enQueue(temp.left)
if(temp.right):
Q.enQueue(temp.right)
#CALLING FUNCTIONS AND INSERTING VALUES.
root=BinaryTree(10)
root = insertInBinaryTreeUsingLevelOrder(root,11)
root = insertInBinaryTreeUsingLevelOrder(root,9)
root = insertInBinaryTreeUsingLevelOrder(root, 15)
root = insertInBinaryTreeUsingLevelOrder(root, 8)
root = insertInBinaryTreeUsingLevelOrder(root, 12)
root = insertInBinaryTreeUsingLevelOrder(root, 225)
levelOrder(root) #PRINTING VALUE
| 24.658333
| 52
| 0.551538
|
f4517716dc8751ee563de9fb2152212eed85c34f
| 186
|
py
|
Python
|
examples/new_word.py
|
smilelight/lightTEXT
|
b015d0e3524722fb5a8ee5ea83b7fbbd7408f797
|
[
"Apache-2.0"
] | 12
|
2020-01-26T09:16:21.000Z
|
2021-12-06T06:44:37.000Z
|
examples/new_word.py
|
smilelight/lightTEXT
|
b015d0e3524722fb5a8ee5ea83b7fbbd7408f797
|
[
"Apache-2.0"
] | null | null | null |
examples/new_word.py
|
smilelight/lightTEXT
|
b015d0e3524722fb5a8ee5ea83b7fbbd7408f797
|
[
"Apache-2.0"
] | 7
|
2020-04-30T00:37:32.000Z
|
2021-07-07T06:32:40.000Z
|
from lighttext import NewWordDetector
if __name__ == '__main__':
detector = NewWordDetector()
detector.load_file('new_word/test_new_word3.txt')
print(detector.get_top_k(5))
| 26.571429
| 53
| 0.752688
|
4a6eeba934475af3a4f467ce53248cecc01f1eee
| 4,222
|
py
|
Python
|
mc15.py
|
kmosiejczuk/minecraft-launchers
|
a0522a16569e9c4b3cf2c4dcc9ba2c5e31ce1182
|
[
"BSD-2-Clause"
] | 2
|
2021-02-08T13:54:02.000Z
|
2021-03-21T01:13:05.000Z
|
mc15.py
|
kmosiejczuk/minecraft-launchers
|
a0522a16569e9c4b3cf2c4dcc9ba2c5e31ce1182
|
[
"BSD-2-Clause"
] | null | null | null |
mc15.py
|
kmosiejczuk/minecraft-launchers
|
a0522a16569e9c4b3cf2c4dcc9ba2c5e31ce1182
|
[
"BSD-2-Clause"
] | 1
|
2021-05-26T23:25:00.000Z
|
2021-05-26T23:25:00.000Z
|
#!/usr/local/bin/python3 -u
import minecraft_launcher_lib as mll
import subprocess
# Minecraft version
mc_version = "1.15.2"
# Asset index is same but without final revision
asset_index = "1.15"
# Your email, username and password below
login = "yourEmailUsername"
password = "seekritPasswordHere"
# Get Minecraft directory
mc_directory = mll.utils.get_minecraft_directory()
libdir = mc_directory + '/libraries/'
lwjgl3_libs = '/usr/local/share/lwjgl3/lwjgl.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-openal.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-opengl.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-glfw.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-stb.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-tinyfd.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-natives-openbsd.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-opengl-natives-openbsd.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-tinyfd-natives-openbsd.jar:' \
+ '/usr/local/share/lwjgl3/lwjgl-stb-natives-openbsd.jar:'
# Make sure the desired version of Minecraft is installed
print("Installing version " + mc_version + " if needed... ", end="")
mll.install.install_minecraft_version(mc_version,mc_directory)
print("Done")
# Login
print("Logging in... ", end="")
login_data = mll.account.login_user( login, password )
print("Done")
# Useful figuring out new minecraft versions
#Get Minecraft command
#options = {
# "username": login_data["selectedProfile"]["name"],
# "uuid": login_data["selectedProfile"]["id"],
# "token": login_data["accessToken"]
#}
#minecraft_command = mll.command.get_minecraft_command(mc_version,mc_directory,options)
#print(minecraft_command)
username = login_data["selectedProfile"]["name"]
uuid = login_data["selectedProfile"]["id"]
token = login_data["accessToken"]
real_command = [
'/usr/local/jdk-11/bin/java',
'-Xms1G',
'-Xmx2G',
'-Djava.library.path=/usr/local/share/lwjgl3/',
'-Dminecraft.launcher.brand=minecraft-launcher-lib',
'-Dminecraft.launcher.version=2.1',
'-cp',
libdir + 'com/mojang/patchy/1.1/patchy-1.1.jar:'
+ libdir + 'oshi-project/oshi-core/1.1/oshi-core-1.1.jar:'
+ libdir + 'net/java/dev/jna/jna/4.4.0/jna-4.4.0.jar:'
+ libdir + 'net/java/dev/jna/platform/3.4.0/platform-3.4.0.jar:'
+ libdir + 'com/ibm/icu/icu4j-core-mojang/51.2/icu4j-core-mojang-51.2.jar:'
+ libdir + 'com/mojang/javabridge/1.0.22/javabridge-1.0.22.jar:'
+ libdir + 'net/sf/jopt-simple/jopt-simple/5.0.3/jopt-simple-5.0.3.jar:'
+ libdir + 'io/netty/netty-all/4.1.25.Final/netty-all-4.1.25.Final.jar:'
+ libdir + 'com/google/guava/guava/21.0/guava-21.0.jar:'
+ libdir + 'org/apache/commons/commons-lang3/3.5/commons-lang3-3.5.jar:'
+ libdir + 'commons-io/commons-io/2.5/commons-io-2.5.jar:'
+ libdir + 'commons-codec/commons-codec/1.10/commons-codec-1.10.jar:'
+ libdir + 'net/java/jinput/jinput/2.0.5/jinput-2.0.5.jar:'
+ libdir + 'net/java/jutils/jutils/1.0.0/jutils-1.0.0.jar:'
+ libdir + 'com/mojang/brigadier/1.0.17/brigadier-1.0.17.jar:'
+ libdir + 'com/mojang/datafixerupper/2.0.24/datafixerupper-2.0.24.jar:'
+ libdir + 'com/google/code/gson/gson/2.8.0/gson-2.8.0.jar:'
+ libdir + 'com/mojang/authlib/1.5.25/authlib-1.5.25.jar:'
+ libdir + 'org/apache/commons/commons-compress/1.8.1/commons-compress-1.8.1.jar:'
+ libdir + 'org/apache/httpcomponents/httpclient/4.3.3/httpclient-4.3.3.jar:'
+ libdir + 'commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:'
+ libdir + 'org/apache/httpcomponents/httpcore/4.3.2/httpcore-4.3.2.jar:'
+ libdir + 'it/unimi/dsi/fastutil/8.2.1/fastutil-8.2.1.jar:'
+ libdir + 'org/apache/logging/log4j/log4j-api/2.8.1/log4j-api-2.8.1.jar:'
+ libdir + 'org/apache/logging/log4j/log4j-core/2.8.1/log4j-core-2.8.1.jar:'
+ lwjgl3_libs
+ libdir + 'com/mojang/text2speech/1.11.3/text2speech-1.11.3.jar:'
+ mc_directory + '/versions/' + mc_version + '/' + mc_version + '.jar',
'net.minecraft.client.main.Main',
'--username', username,
'--version', mc_version,
'--gameDir', mc_directory,
'--assetsDir', mc_directory + '/assets',
'--assetIndex', asset_index,
'--uuid', uuid,
'--accessToken', token,
'--userType', 'mojang',
'--versionType', 'release'
]
# Start Minecraft
subprocess.call(real_command)
| 39.092593
| 87
| 0.692563
|
61d6e78300f0a9d31291a85f250ca629b49d6b1b
| 1,702
|
py
|
Python
|
logic.py
|
JeanRibes/ppc-freakout
|
f3dd5c3c20f226e8398915411e51a41850585758
|
[
"MIT"
] | null | null | null |
logic.py
|
JeanRibes/ppc-freakout
|
f3dd5c3c20f226e8398915411e51a41850585758
|
[
"MIT"
] | null | null | null |
logic.py
|
JeanRibes/ppc-freakout
|
f3dd5c3c20f226e8398915411e51a41850585758
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from queue import Queue
from data import *
from random import random
from copy import deepcopy
def generate_pile_random(N=20, max_value=9):
pile = Pile()
for _ in range(N):
value = int(random() * max_value) + 1
color = random() > 0.5
pile.append(Card(color, value))
return pile
def generate_pile_fixed(max_value):
pile = [Card(True, i) for i in range(1, max_value + 1)]
pile.extend(
[Card(False, i) for i in range(1, max_value + 1)]
)
return pile
def generate_pile(N, max_value):
#return generate_pile_fixed(max_value)
pile = generate_pile_random(N, max_value)
pile.extend(
generate_pile_fixed(max_value)) # toutes les cartes possibles plus un nombre de cartes alatoires
return pile
def shuffle(pile):
indexes = []
inp = deepcopy(pile)
for i, _ in enumerate(inp):
indexes.append(i)
out = []
while len(inp) > 0:
out.append(inp.pop(indexes[int(random() * len(inp))]))
return out
def move_valid(board:Board, card:Card):
ret = False
for cards in board.values():
for card_on_board in cards:
ret = True if card_on_board // card else ret
break
return ret
def broadcast(queues, item):
for q in queues:
q.put(item, block=False)
def flush(queue: Queue):
"""
merci StackOverflow
:param queue:
:return:
"""
while not queue.empty():
queue.get()
if __name__ == '__main__':
cartes = generate_pile(5, 8)
print(List(cartes))
shuffled = List(shuffle(cartes))
print(shuffled)
for i in cartes:
if i not in shuffled:
print('erreur')
| 23
| 105
| 0.619859
|
a7f08a1d39146f627144ad0eaa4265a6db15cd82
| 146
|
py
|
Python
|
clase 6/clases/animales/__init__.py
|
jbian/computacion_para_ingenieria
|
492c90c0a53cf02f22f6b9f66e65a700bd09e916
|
[
"Apache-2.0"
] | 2
|
2022-02-11T21:43:28.000Z
|
2022-02-13T00:51:51.000Z
|
clase 6/clases/animales/__init__.py
|
jbian/computacion_para_ingenieria
|
492c90c0a53cf02f22f6b9f66e65a700bd09e916
|
[
"Apache-2.0"
] | null | null | null |
clase 6/clases/animales/__init__.py
|
jbian/computacion_para_ingenieria
|
492c90c0a53cf02f22f6b9f66e65a700bd09e916
|
[
"Apache-2.0"
] | 1
|
2022-01-31T15:04:31.000Z
|
2022-01-31T15:04:31.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 12 07:04:05 2022
@author: Diesel soft server
"""
from Perro import Perro
from Abeja import Abeja
| 14.6
| 35
| 0.678082
|
86d200cb96f6d7f7a1ac6f69a79f6f3f1f4eb87a
| 5,304
|
py
|
Python
|
lab1/main.py
|
RedCuckoo/computer-graphics-labs
|
991ce6e10ca556e17d8450d54ec9080eb9045032
|
[
"MIT"
] | null | null | null |
lab1/main.py
|
RedCuckoo/computer-graphics-labs
|
991ce6e10ca556e17d8450d54ec9080eb9045032
|
[
"MIT"
] | null | null | null |
lab1/main.py
|
RedCuckoo/computer-graphics-labs
|
991ce6e10ca556e17d8450d54ec9080eb9045032
|
[
"MIT"
] | null | null | null |
from graphics import *
from sympy.geometry import Point2D as syPoint
from sympy.geometry import Segment as sySegment
from sympy.geometry import Line as syLine
width = 400
height = 400
def generate_line_turned(point, intersection_point):
global width
return syLine(syPoint(point.x, point.y), syPoint(width, intersection_point.y + 1))
def does_intersect(line, line_to_intersect):
intersection_list = line.intersect(line_to_intersect)
return len(intersection_list) != 0
def equals_with_precision(point1, point2):
return abs(point1.x - point2.x) < 1 and abs(point1.y - point2.y) < 1
def contains(polygonPoints, point):
global width
prev_point = syPoint(0, 0)
check_line = sySegment(syPoint(point.x, point.y), syPoint(width, point.y))
counter = 0
for i in range(len(polygonPoints)):
p = syPoint(polygonPoints[i].x, polygonPoints[i].y)
if prev_point == syPoint(0, 0):
prev_point = p
continue
side = sySegment(syPoint(prev_point.x, prev_point.y), p)
intersects = does_intersect(side, check_line)
if intersects:
intersection_point = side.intersection(check_line)[0]
if intersection_point != prev_point and intersection_point != p:
counter += 1
# elif intersection_point == prev_point and intersection_point == p:
# continue
elif intersection_point == p:
new_line = generate_line_turned(point, p)
new_intersects1 = does_intersect(new_line, side)
new_intersects2 = does_intersect(new_line,
sySegment(p,
syPoint(polygonPoints[(i + 1) % len(polygonPoints)].x,
polygonPoints[(i + 1) % len(polygonPoints)].y)))
if new_intersects1 and new_intersects2 or not new_intersects1 and not new_intersects2:
continue
else:
counter += 1
if i == len(polygonPoints) - 1:
first_side = sySegment(p, syPoint(polygonPoints[0].x, polygonPoints[0].y))
first_intersects = does_intersect(first_side, check_line)
if first_intersects:
intersection_point = first_side.intersection(check_line)[0]
if intersection_point != prev_point and not equals_with_precision(intersection_point, polygonPoints[0]):
counter += 1
# elif intersection_point == prev_point and equals_with_precision(intersection_point, polygonPoints[0]):
# continue
elif equals_with_precision(intersection_point, polygonPoints[0]):
new_line = generate_line_turned(point, intersection_point)
new_intersects1 = does_intersect(new_line, first_side)
new_intersects2 = \
does_intersect(new_line,
sySegment(syPoint(polygonPoints[0].x, polygonPoints[0].y),
syPoint(polygonPoints[1].x, polygonPoints[1].y)))
if new_intersects1 and new_intersects2 or not new_intersects1 and not new_intersects2:
continue
else:
counter += 1
prev_point = p
if counter % 2 == 1:
return True
else:
return False
def draw_point_after_double_click(point):
if draw_point_after_double_click.prevPoint is not None:
draw_point_after_double_click.prevPoint.undraw()
draw_point_after_double_click.prevPoint = draw_point(point)
draw_point_after_double_click.prevPoint = None
def draw_point(point):
global win
p = Point(point.x, point.y)
p.draw(win)
return p
def on_click(point):
global win
global eps
global width
global result_text
if on_click.previousPoint == 0:
on_click.previousPoint = point
elif on_click.finished:
draw_point_after_double_click(point)
result_text.undraw()
if contains(on_click.points, draw_point_after_double_click.prevPoint):
result_text.setText("Inside polygon")
result_text.draw(win)
else:
result_text.setText("Outside polygon")
result_text.draw(win)
return
elif abs(point.x - on_click.previousPoint.x) < eps or abs(point.y - on_click.previousPoint.y) < eps:
polygon = Polygon(on_click.points)
polygon.draw(win)
polygon.setFill('red')
on_click.finished = True
return
p = draw_point(point)
on_click.points.append(p)
on_click.previousPoint = p
on_click.previousPoint = 0
on_click.points = []
on_click.finished = False
def main():
global win
global width
global height
global result_text
win = GraphWin("Lab 1 : detecting if point belongs to polygon", width, height)
text = Text(Point(width/2, 10), "Click for dot, double click to finish polygon")
text.draw(win)
win.setMouseHandler(on_click)
win.getKey()
win.close()
result_text = Text(Point(width / 2, 30), "")
win = None
eps = 2
main()
| 32.740741
| 120
| 0.613311
|
a003e8312b7a2f1570e566146368c4a07f4c6a23
| 2,534
|
py
|
Python
|
integration_tests/test_complex.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 31
|
2022-01-07T23:56:33.000Z
|
2022-03-29T16:09:02.000Z
|
integration_tests/test_complex.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 197
|
2021-12-29T19:01:41.000Z
|
2022-03-31T15:58:25.000Z
|
integration_tests/test_complex.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 17
|
2022-01-06T15:34:36.000Z
|
2022-03-31T13:55:33.000Z
|
from ltypes import i32, i64, f32, f64, c32, c64
def test_real_imag():
x: c64
x = 2 + 3j
a: f64
b: f64
eps: f64
eps = 1e-12
a = x.real
b = x.imag
assert abs(a - 2) < eps
assert abs(b - 3) < eps
def test_complex():
x: c64
x = complex(4.5, 6.7)
eps: f64
eps = 1e-12
assert abs(x.real - 4.5) < eps
assert abs(x.imag - 6.7) < eps
x = complex(-4, 2)
assert abs(x.real - (-4.0)) < eps
assert abs(x.imag - 2.0) < eps
x = complex(4, 7.89)
assert abs(x.real - 4.0) < eps
assert abs(x.imag - 7.89) < eps
x = complex(5.6, 0)
assert abs(x.real - 5.6) < eps
assert abs(x.imag - 0.0) < eps
a: f64
a = 534.6
x = complex(a, -a) # (f64, f64)
assert abs(x.real - 534.60000000000002274) < eps
assert abs(x.imag - (-534.60000000000002274)) < eps
a2: f32
a2 = -423.5430806348152437
a3: f32
a3 = 34.5
x2: c32
x2 = complex(a2, a3) # (f32, f32)
assert abs(x2.imag - 34.5) < eps
i1: i32
i1 = -5
i2: i64
i2 = -6
x = complex(a3, a) # (f32, f64)
x = complex(a, a3) # (f64, f32)
x = complex(i1, i2) # (i32, i64)
x = complex(i1, -i1) # (i32, i32)
x = complex(-i2, -i2) # (i64, i64)
x = complex(i2, -i1) # (i64, i32)
def test_complex_abs():
x: c32
x = complex(3, 4)
eps: f64
eps = 1e-12
assert abs(abs(x) - 5.0) < eps
y: c64
y = complex(6, 8)
assert abs(abs(y) - 10.0) < eps
def test_complex_binop_32():
x: c32
y: c32
z: c32
x = 2 + 3j
y = 4 + 5j
z = x + y
z = x - y
z = x * y
# TODO:
#z = x / y
z = x ** y
def test_complex_binop_64():
x: c64
y: c64
z: c64
x = 2 + 3j
y = 4 + 5j
z = x + y
z = x - y
z = x * y
# TODO:
#z = x / y
z = x ** y
def test_complex_unary_minus():
c: c32
c = complex(3, 4.5)
_c: c32
_c = -c
assert abs(_c.real - (-3.0)) < 1e-12
assert abs(_c.imag - (-4.5)) < 1e-12
_c = complex(5, -78)
_c = -_c
assert abs(_c.real - (-5.0)) < 1e-12
assert abs(_c.imag - 78.0) < 1e-12
c2: c64
c2 = complex(-4.5, -7.8)
c2 = -c2
assert abs(c2.real - 4.5) < 1e-12
assert abs(c2.imag - 7.8) < 1e-12
c2 = 3+4j
c2 = -c2
assert abs(c2.real - (-3.0)) < 1e-12
assert abs(c2.imag - (-4.0)) < 1e-12
def check():
test_real_imag()
test_complex()
test_complex_abs()
test_complex_binop_32()
test_complex_binop_64()
test_complex_unary_minus()
check()
| 19.492308
| 55
| 0.498027
|
2a03b3789ff38b06ada5c0ba0599c9f8592b4819
| 7,335
|
py
|
Python
|
rllib/tests/test_model_imports.py
|
ChaceAshcraft/ray
|
a72237f1712e2805f6799de3489e326e2965d624
|
[
"Apache-2.0"
] | 3
|
2021-06-22T19:57:41.000Z
|
2021-06-23T07:16:44.000Z
|
rllib/tests/test_model_imports.py
|
ChaceAshcraft/ray
|
a72237f1712e2805f6799de3489e326e2965d624
|
[
"Apache-2.0"
] | 72
|
2021-02-06T08:07:16.000Z
|
2022-03-26T07:17:49.000Z
|
rllib/tests/test_model_imports.py
|
ChaceAshcraft/ray
|
a72237f1712e2805f6799de3489e326e2965d624
|
[
"Apache-2.0"
] | 2
|
2021-05-05T21:05:16.000Z
|
2021-06-22T21:16:03.000Z
|
#!/usr/bin/env python
import h5py
import numpy as np
from pathlib import Path
import unittest
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check, framework_iterator
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class MyKerasModel(TFModelV2):
"""Custom model for policy gradient algorithms."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(MyKerasModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
self.inputs = tf.keras.layers.Input(
shape=obs_space.shape, name="observations")
layer_1 = tf.keras.layers.Dense(
16,
name="layer1",
activation=tf.nn.relu,
kernel_initializer=normc_initializer(1.0))(self.inputs)
layer_out = tf.keras.layers.Dense(
num_outputs,
name="out",
activation=None,
kernel_initializer=normc_initializer(0.01))(layer_1)
if self.model_config["vf_share_layers"]:
value_out = tf.keras.layers.Dense(
1,
name="value",
activation=None,
kernel_initializer=normc_initializer(0.01))(layer_1)
self.base_model = tf.keras.Model(self.inputs,
[layer_out, value_out])
else:
self.base_model = tf.keras.Model(self.inputs, layer_out)
def forward(self, input_dict, state, seq_lens):
if self.model_config["vf_share_layers"]:
model_out, self._value_out = self.base_model(input_dict["obs"])
else:
model_out = self.base_model(input_dict["obs"])
self._value_out = tf.zeros(
shape=(tf.shape(input_dict["obs"])[0], ))
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
def import_from_h5(self, import_file):
# Override this to define custom weight loading behavior from h5 files.
self.base_model.load_weights(import_file)
class MyTorchModel(TorchModelV2, nn.Module):
"""Generic vision network."""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.device = torch.device("cuda"
if torch.cuda.is_available() else "cpu")
self.layer_1 = nn.Linear(obs_space.shape[0], 16).to(self.device)
self.layer_out = nn.Linear(16, num_outputs).to(self.device)
self.value_branch = nn.Linear(16, 1).to(self.device)
self.cur_value = None
def forward(self, input_dict, state, seq_lens):
layer_1_out = self.layer_1(input_dict["obs"])
logits = self.layer_out(layer_1_out)
self.cur_value = self.value_branch(layer_1_out).squeeze(1)
return logits, state
def value_function(self):
assert self.cur_value is not None, "Must call `forward()` first!"
return self.cur_value
def import_from_h5(self, import_file):
# Override this to define custom weight loading behavior from h5 files.
f = h5py.File(import_file)
self.layer_1.load_state_dict({
"weight": torch.Tensor(
np.transpose(f["layer1"][DEFAULT_POLICY_ID]["layer1"][
"kernel:0"].value)),
"bias": torch.Tensor(
np.transpose(
f["layer1"][DEFAULT_POLICY_ID]["layer1"]["bias:0"].value)),
})
self.layer_out.load_state_dict({
"weight": torch.Tensor(
np.transpose(
f["out"][DEFAULT_POLICY_ID]["out"]["kernel:0"].value)),
"bias": torch.Tensor(
np.transpose(
f["out"][DEFAULT_POLICY_ID]["out"]["bias:0"].value)),
})
self.value_branch.load_state_dict({
"weight": torch.Tensor(
np.transpose(
f["value"][DEFAULT_POLICY_ID]["value"]["kernel:0"].value)),
"bias": torch.Tensor(
np.transpose(
f["value"][DEFAULT_POLICY_ID]["value"]["bias:0"].value)),
})
def model_import_test(algo, config, env):
# Get the abs-path to use (bazel-friendly).
rllib_dir = Path(__file__).parent.parent
import_file = str(rllib_dir) + "/tests/data/model_weights/weights.h5"
agent_cls = get_agent_class(algo)
for fw in framework_iterator(config, ["tf", "torch"]):
config["model"]["custom_model"] = "keras_model" if fw != "torch" else \
"torch_model"
agent = agent_cls(config, env)
def current_weight(agent):
if fw == "tf":
return agent.get_weights()[DEFAULT_POLICY_ID][
"default_policy/value/kernel"][0]
elif fw == "torch":
return float(agent.get_weights()[DEFAULT_POLICY_ID][
"value_branch.weight"][0][0])
else:
return agent.get_weights()[DEFAULT_POLICY_ID][4][0]
# Import weights for our custom model from an h5 file.
weight_before_import = current_weight(agent)
agent.import_model(import_file=import_file)
weight_after_import = current_weight(agent)
check(weight_before_import, weight_after_import, false=True)
# Train for a while.
for _ in range(1):
agent.train()
weight_after_train = current_weight(agent)
# Weights should have changed.
check(weight_before_import, weight_after_train, false=True)
check(weight_after_import, weight_after_train, false=True)
# We can save the entire Agent and restore, weights should remain the
# same.
file = agent.save("after_train")
check(weight_after_train, current_weight(agent))
agent.restore(file)
check(weight_after_train, current_weight(agent))
# Import (untrained) weights again.
agent.import_model(import_file=import_file)
check(current_weight(agent), weight_after_import)
class TestModelImport(unittest.TestCase):
def setUp(self):
ray.init()
ModelCatalog.register_custom_model("keras_model", MyKerasModel)
ModelCatalog.register_custom_model("torch_model", MyTorchModel)
def tearDown(self):
ray.shutdown()
def test_ppo(self):
model_import_test(
"PPO",
config={
"num_workers": 0,
"model": {
"vf_share_layers": True,
},
},
env="CartPole-v0")
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 36.859296
| 79
| 0.608725
|
ba9cb4bd90ed03146a38cf3328cbc324b6e3a6f3
| 3,213
|
py
|
Python
|
src/main/python/tests/test_matrix_aggregations.py
|
escher-m/systemds
|
6dea896dc0db29c07bfcd24b73a7d37f91b59620
|
[
"Apache-2.0"
] | 1
|
2020-06-04T11:53:14.000Z
|
2020-06-04T11:53:14.000Z
|
src/main/python/tests/test_matrix_aggregations.py
|
escher-m/systemds
|
6dea896dc0db29c07bfcd24b73a7d37f91b59620
|
[
"Apache-2.0"
] | 1
|
2020-06-07T15:47:24.000Z
|
2020-06-07T15:47:24.000Z
|
src/main/python/tests/test_matrix_aggregations.py
|
t201/systemds
|
b66a3c006ce6a3a888653e2d1accec479cc756fd
|
[
"Apache-2.0"
] | null | null | null |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Make the `systemds` package importable
import os
import sys
import warnings
import unittest
import numpy as np
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
from systemds.context import SystemDSContext
dim = 5
m1 = np.array(np.random.randint(100, size=dim * dim) + 1.01, dtype=np.double)
m1.shape = (dim, dim)
m2 = np.array(np.random.randint(5, size=dim * dim) + 1, dtype=np.double)
m2.shape = (dim, dim)
sds = SystemDSContext()
class TestMatrixAggFn(unittest.TestCase):
def setUp(self):
warnings.filterwarnings(action="ignore",
message="unclosed",
category=ResourceWarning)
def tearDown(self):
warnings.filterwarnings(action="ignore",
message="unclosed",
category=ResourceWarning)
def test_sum1(self):
self.assertTrue(np.allclose(sds.matrix(m1).sum().compute(), m1.sum()))
def test_sum2(self):
self.assertTrue(np.allclose(sds.matrix(m1).sum(axis=0).compute(), m1.sum(axis=0)))
def test_sum3(self):
self.assertTrue(np.allclose(sds.matrix(m1).sum(axis=1).compute(), m1.sum(axis=1).reshape(dim, 1)))
def test_mean1(self):
self.assertTrue(np.allclose(sds.matrix(m1).mean().compute(), m1.mean()))
def test_mean2(self):
self.assertTrue(np.allclose(sds.matrix(m1).mean(axis=0).compute(), m1.mean(axis=0)))
def test_mean3(self):
self.assertTrue(np.allclose(sds.matrix(m1).mean(axis=1).compute(), m1.mean(axis=1).reshape(dim, 1)))
def test_full(self):
self.assertTrue(np.allclose(sds.full((2, 3), 10.1).compute(), np.full((2, 3), 10.1)))
def test_seq(self):
self.assertTrue(np.allclose(sds.seq(3).compute(), np.arange(4).reshape(4, 1)))
def test_var1(self):
self.assertTrue(np.allclose(sds.matrix(m1).var().compute(), m1.var(ddof=1)))
def test_var2(self):
self.assertTrue(np.allclose(sds.matrix(m1).var(axis=0).compute(), m1.var(axis=0, ddof=1)))
def test_var3(self):
self.assertTrue(np.allclose(sds.matrix(m1).var(axis=1).compute(), m1.var(axis=1, ddof=1).reshape(dim, 1)))
if __name__ == "__main__":
unittest.main(exit=False)
sds.close()
| 35.307692
| 114
| 0.641768
|
28c7fbcf7042ecfd0ee49efcef8c0a2c67f7b867
| 750
|
py
|
Python
|
tests/test_very_active_minutes.py
|
mrphil007/fitbit-to-sqlite
|
4d3251b21d06535a42b1b6dad47ded8d91085a14
|
[
"Apache-2.0"
] | 10
|
2020-09-10T16:54:42.000Z
|
2021-12-16T10:16:50.000Z
|
tests/test_very_active_minutes.py
|
mrphil007/fitbit-to-sqlite
|
4d3251b21d06535a42b1b6dad47ded8d91085a14
|
[
"Apache-2.0"
] | null | null | null |
tests/test_very_active_minutes.py
|
mrphil007/fitbit-to-sqlite
|
4d3251b21d06535a42b1b6dad47ded8d91085a14
|
[
"Apache-2.0"
] | null | null | null |
from fitbit_to_sqlite.utils import save_very_active_minutes
import pathlib
import sqlite_utils
from .utils import create_zip
def test_very_active_minutes():
zf = create_zip()
db = sqlite_utils.Database(memory=True)
very_active_minutes = [
f.filename for f in zf.filelist if "very_active" in f.filename
]
save_very_active_minutes(db, zf, very_active_minutes)
very_active_minutes = list(sorted(db["very_active_minutes"].rows, key=lambda r: r["date"]))
assert [
{
"date": "2018-01-01",
"value": 33
},
{
"date": "2018-01-02",
"value": 59
},
{
"date": "2018-01-03",
"value": 42
}
] == very_active_minutes
| 25.862069
| 95
| 0.597333
|
45f5bd0ff1a1905a3e83607a61398e8182571556
| 4,983
|
py
|
Python
|
app/user/tests/test_user_api.py
|
rodnaskorn/recipe-app-api
|
a3d16d77ef51ea39d9ac433772de99f29e2fc1cd
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
rodnaskorn/recipe-app-api
|
a3d16d77ef51ea39d9ac433772de99f29e2fc1cd
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
rodnaskorn/recipe-app-api
|
a3d16d77ef51ea39d9ac433772de99f29e2fc1cd
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
"""Helper function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (Public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with a valid payload is successful"""
payload = {
'email': 'test@test.com',
'password': 'test123',
'name': 'john doe',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {
'email': 'test@test.com',
'password': 'test123',
'name': 'john doe'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status. HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {
'email': 'test@test.com',
'password': 'pw',
'name': 'john doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@test.com', 'password': 'test123'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@test.com', password='test123')
payload = {'email': 'test@test.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exist"""
payload = {'email': 'test@test.com', 'password': 'test123'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_user_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='test@test.com',
password='test123',
name='john doe'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me URL"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 34.846154
| 77
| 0.644993
|
3d1afaeb2273e2a70b478f18a676e59d0696cf4c
| 8,220
|
py
|
Python
|
model/__init__.py
|
daleroberts/datacube-2nd-order-stats
|
a37ca0e97079e9f5f922c5beb5eec2f10895c6fe
|
[
"Apache-2.0"
] | 9
|
2019-05-23T16:48:21.000Z
|
2021-02-06T23:55:46.000Z
|
model/__init__.py
|
daleroberts/datacube-2nd-order-stats
|
a37ca0e97079e9f5f922c5beb5eec2f10895c6fe
|
[
"Apache-2.0"
] | null | null | null |
model/__init__.py
|
daleroberts/datacube-2nd-order-stats
|
a37ca0e97079e9f5f922c5beb5eec2f10895c6fe
|
[
"Apache-2.0"
] | 1
|
2021-02-06T23:56:20.000Z
|
2021-02-06T23:56:20.000Z
|
import logging
import numpy as np
import xarray as xr
from datacube.model import Measurement
from datacube_stats.statistics import Statistic
from copy import copy
from .fast import smad, emad, bcmad, geomedian
LOG = logging.getLogger(__name__)
def sizefmt(num, suffix="B"):
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
class CosineDistanceMAD(Statistic):
def __init__(self, num_threads=3):
super().__init__()
self.num_threads = num_threads
LOG.info("num_threads: %i", num_threads)
def compute(self, data: xr.Dataset) -> xr.Dataset:
squashed = data.to_array().transpose("y", "x", "time", "variable")
fdata = squashed.data.astype(np.float32) / 10000.
fdata[(squashed.data == -999)] = np.nan
fdata[(squashed.data == 0)] = np.nan
del squashed
LOG.info("Data array size: %s", sizefmt(fdata.nbytes))
mask = np.isnan(fdata).any(axis=2)
ndepth = np.count_nonzero(mask, axis=-1)
mindepth, mediandepth, maxdepth = np.min(ndepth), np.median(ndepth), np.max(ndepth)
LOG.info("data mindepth: %s maxdepth: %s mediandepth: %s", mindepth, maxdepth, mediandepth)
LOG.info("Computing geometric median mosaic")
gm = geomedian(fdata, num_threads=self.num_threads)
LOG.info("Computing spectral MAD mosaic")
dev = smad(fdata, gm, num_threads=self.num_threads)
da = xr.DataArray(dev, dims=("y", "x"), name="dev")
return xr.Dataset(data_vars={"dev": da})
def measurements(self, m):
mm = [Measurement(name="dev", dtype="float32", nodata=0, units="1")]
LOG.debug("Returning measurements: %s", mm)
return mm
class EuclideanDistanceMAD(Statistic):
def __init__(self, num_threads=3):
super().__init__()
self.num_threads = num_threads
LOG.info("num_threads: %i", num_threads)
def compute(self, data: xr.Dataset) -> xr.Dataset:
squashed = data.to_array().transpose("y", "x", "time", "variable")
fdata = squashed.data.astype(np.float32) / 10000.
fdata[(squashed.data == -999)] = np.nan
fdata[(squashed.data == 0)] = np.nan
del squashed
LOG.info("Data array size: %s", sizefmt(fdata.nbytes))
mask = np.isnan(fdata).any(axis=2)
ndepth = np.count_nonzero(mask, axis=-1)
mindepth, mediandepth, maxdepth = np.min(ndepth), np.median(ndepth), np.max(ndepth)
LOG.info("data mindepth: %s maxdepth: %s mediandepth: %s", mindepth, maxdepth, mediandepth)
LOG.info("Computing geometric median mosaic")
gm = geomedian(fdata, num_threads=self.num_threads)
LOG.info("Computing spectral MAD mosaic")
dev = emad(fdata, gm, num_threads=self.num_threads)
da = xr.DataArray(dev, dims=("y", "x"), name="dev")
return xr.Dataset(data_vars={"dev": da})
def measurements(self, m):
mm = [Measurement(name="dev", dtype="float32", nodata=0, units="1")]
LOG.debug("Returning measurements: %s", mm)
return mm
class BrayCurtisDistanceMAD(Statistic):
def __init__(self, num_threads=3):
super().__init__()
self.num_threads = num_threads
LOG.info("num_threads: %i", num_threads)
def compute(self, data: xr.Dataset) -> xr.Dataset:
squashed = data.to_array().transpose("y", "x", "time", "variable")
fdata = squashed.data.astype(np.float32) / 10000.
fdata[(squashed.data == -999)] = np.nan
fdata[(squashed.data == 0)] = np.nan
del squashed
LOG.info("Data array size: %s", sizefmt(fdata.nbytes))
mask = np.isnan(fdata).any(axis=2)
ndepth = np.count_nonzero(mask, axis=-1)
mindepth, mediandepth, maxdepth = np.min(ndepth), np.median(ndepth), np.max(ndepth)
LOG.info("data mindepth: %s maxdepth: %s mediandepth: %s", mindepth, maxdepth, mediandepth)
LOG.info("Computing geometric median mosaic")
gm = geomedian(fdata, num_threads=self.num_threads)
LOG.info("Computing Bray Curtis distance MAD mosaic")
dev = bcmad(fdata, gm, num_threads=self.num_threads)
da = xr.DataArray(dev, dims=("y", "x"), name="dev")
return xr.Dataset(data_vars={"dev": da})
def measurements(self, m):
mm = [Measurement(name="dev", dtype="float32", nodata=0, units="1")]
LOG.debug("Returning measurements: %s", mm)
return mm
class TernaryMAD(Statistic):
def __init__(self, num_threads=3):
super().__init__()
self.num_threads = num_threads
LOG.info("num_threads: %i", num_threads)
def compute_on_array(self, data: np.array) -> np.array:
np.seterr(all="ignore")
LOG.info("Data array size: %s dimensions: %s", sizefmt(data.nbytes), data.shape)
mask = np.isnan(data).any(axis=2)
ndepth = np.count_nonzero(mask, axis=-1)
mindepth, mediandepth, maxdepth = np.min(ndepth), np.median(ndepth), np.max(ndepth)
LOG.info("data mindepth: %s maxdepth: %s mediandepth: %s", mindepth, maxdepth, mediandepth)
LOG.info("Computing geometric median mosaic")
gm = geomedian(data, num_threads=self.num_threads)
LOG.info("Computing cosine distance MAD mosaic")
sdev = -np.log(smad(data, gm, num_threads=self.num_threads))
LOG.info("Computing Euclidean distance MAD mosaic")
edev = -np.log(emad(data, gm, num_threads=self.num_threads))
LOG.info("Computing Bray-Curtis distance MAD mosaic")
bcdev = -np.log(bcmad(data, gm, num_threads=self.num_threads))
LOG.info("Stacking results")
result = np.dstack([sdev, edev, bcdev])
LOG.info("Mosaic size: %s dimensions: %s", sizefmt(result.nbytes), result.shape)
return result
def compute(self, data: xr.Dataset) -> xr.Dataset:
np.seterr(all="ignore")
squashed_together_dimensions, normal_datacube_dimensions = self._vars_to_transpose(data)
squashed = data.to_array(dim="variable").transpose(*squashed_together_dimensions)
assert squashed.dims == squashed_together_dimensions
output_coords = copy(squashed.coords)
if "time" in output_coords:
del output_coords["time"]
if "source" in output_coords:
del output_coords["source"]
fdata = squashed.data.astype(np.float32) / 10000.
fdata[(squashed.data == -999)] = np.nan
fdata[(squashed.data == 0)] = np.nan
tmp = self.compute_on_array(fdata)
da = xr.DataArray(tmp[:, :, 0], dims=("y", "x"), name="sdev")
db = xr.DataArray(tmp[:, :, 1], dims=("y", "x"), name="edev")
dc = xr.DataArray(tmp[:, :, 2], dims=("y", "x"), name="bcdev")
ds = xr.Dataset(data_vars={"sdev": da, "edev": db, "bcdev": dc})
LOG.info("Finished computing")
return ds
def measurements(self, m):
mm = [
Measurement(name="sdev", dtype="float32", nodata=np.nan, units="1"),
Measurement(name="edev", dtype="float32", nodata=np.nan, units="1"),
Measurement(name="bcdev", dtype="float32", nodata=np.nan, units="1"),
]
LOG.debug("Returning measurements: %s", mm)
return mm
@staticmethod
def _vars_to_transpose(data):
is_projected = "x" in data.dims and "y" in data.dims
is_geographic = "longitude" in data.dims and "latitude" in data.dims
if is_projected and is_geographic:
raise StatsProcessingError("Data to process contains BOTH geographic and projected dimensions")
elif not is_projected and not is_geographic:
raise StatsProcessingError("Data to process contains NEITHER geographic nor projected dimensions")
elif is_projected:
return ("y", "x", "variable", "time"), ("variable", "y", "x")
else:
return ("latitude", "longitude", "variable", "time"), ("variable", "latitude", "longitude")
SMAD = CosineDistanceMAD
EMAD = EuclideanDistanceMAD
BCMAD = BrayCurtisDistanceMAD
| 34.107884
| 110
| 0.622384
|
de9e72ecb892cc8701822b44cdbaf4338c186761
| 1,866
|
py
|
Python
|
database.py
|
BodhaanshRavipati325/SkinCancerDetetctor
|
5b46619eb6055c32f84ccc043970e50189f5e43f
|
[
"MIT"
] | 3
|
2020-11-09T15:13:19.000Z
|
2021-05-25T10:33:09.000Z
|
database.py
|
aahmad4/Dermatol
|
6a360f5bc223bfd61871d62c88ed57f7470b8cd1
|
[
"MIT"
] | null | null | null |
database.py
|
aahmad4/Dermatol
|
6a360f5bc223bfd61871d62c88ed57f7470b8cd1
|
[
"MIT"
] | null | null | null |
# Module being imported for current date and time
import datetime
# Creating a database allowing user credentials to be added to textfile
class DataBase:
# Initializing the class
def __init__(self, filename):
self.filename = filename
self.users = None
self.file = None
self.load()
# Allowing the database to read the users.txt file
def load(self):
self.file = open(self.filename, "r")
self.users = {}
# Defining the format of the txt file and how user credentials will be displayed
for line in self.file:
email, password, name, created = line.strip().split(";")
self.users[email] = (password, name, created)
self.file.close()
def get_user(self, email):
if email in self.users:
return self.users[email]
else:
return -1
def add_user(self, email, password, name):
if email.strip() not in self.users:
self.users[email.strip()] = (password.strip(), name.strip(), DataBase.get_date())
self.save()
return 1
else:
print("Email exists already")
return -1
def validate(self, email, password):
if self.get_user(email) != -1:
return self.users[email][0] == password
else:
return False
# Saving the txt file with the user credentials
def save(self):
with open(self.filename, "w") as f:
for user in self.users:
f.write(user + ";" + self.users[user][0] + ";" + self.users[user][1] + ";" + self.users[user][2] + "\n")
# Allowing todays date to be shown when adding user credentials to txt file
@staticmethod
def get_date():
return str(datetime.datetime.now()).split(" ")[0]
| 33.321429
| 121
| 0.56806
|
8bf83b52ec7dc357568eca25eec0912c19101541
| 8,734
|
py
|
Python
|
epp_env/lib/python3.6/site-packages/djxml/xmlmodels/base.py
|
jprsurendra/EmployeeProjectPanel
|
e994f8493a14def28133d509a9211f368eb5c88c
|
[
"Apache-2.0"
] | null | null | null |
epp_env/lib/python3.6/site-packages/djxml/xmlmodels/base.py
|
jprsurendra/EmployeeProjectPanel
|
e994f8493a14def28133d509a9211f368eb5c88c
|
[
"Apache-2.0"
] | 8
|
2020-02-12T03:21:51.000Z
|
2022-03-12T00:07:01.000Z
|
virtual/lib/python3.6/site-packages/djxml/xmlmodels/base.py
|
Steve-design/Jirani
|
e386b1ede05f6c2067af2621c21ce802ec72ae73
|
[
"MIT"
] | null | null | null |
import re
import sys
import codecs
import functools
import copy
import six
from lxml import etree
from django.core.exceptions import (ObjectDoesNotExist, FieldError,
MultipleObjectsReturned,)
from django.db.models.base import subclass_exception
try:
from django.utils.encoding import (
smart_bytes as smart_str, force_text as force_unicode)
except ImportError:
from django.utils.encoding import smart_str, force_unicode
from .signals import xmlclass_prepared
from .options import Options, DEFAULT_NAMES
from .loading import register_xml_models, get_xml_model
class XmlModelBase(type):
"""
Metaclass for xml models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(XmlModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, XmlModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
for attr_name in DEFAULT_NAMES:
if attr_name == 'app_label':
continue
if getattr(meta, attr_name, None) is None:
for base in parents:
if not hasattr(base, '_meta'):
continue
attr_val = getattr(base._meta, attr_name)
if attr_val is not None:
kwargs[attr_name] = attr_val
break
new_class.add_to_class('_meta', Options(meta, **kwargs))
new_class.add_to_class('DoesNotExist', subclass_exception('DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta'))
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception('MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta'))
or (MultipleObjectsReturned,), module))
# Bail out early if we have already created this class.
m = get_xml_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
field_names = set([f.name for f in new_class._meta.local_fields])
for base in parents:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents
continue
for field in base._meta.local_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
new_class._meta.parents.update(base._meta.parents)
new_class._prepare()
register_xml_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_xml_model(new_class._meta.app_label, name, False)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
if getattr(value, 'is_lxml_extension', False):
cls._meta.add_extension(value, extension_name=name)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
xmlclass_prepared.send(sender=cls)
@six.add_metaclass(XmlModelBase)
class XmlModel(object):
def __init__(self, root_element_tree):
fields_iter = iter(self._meta.fields)
for field in fields_iter:
if getattr(field, 'is_root_field', False):
val = root_element_tree
else:
val = None
setattr(self, field.attname, val)
super(XmlModel, self).__init__()
def _get_etree_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.etree.attname)
_default_xpath_eval = None
@property
def default_xpath_eval(self):
if self._default_xpath_eval is None:
self._default_xpath_eval = self._get_xpath_eval()
return self._default_xpath_eval
def _merge_xpath_kwargs(self, ns=None, ext=None):
"""
Merge user-provided namespace and extension keywords with the model
defaults.
"""
opts = self._meta
xpath_kwargs = {
'namespaces': getattr(opts, 'namespaces', {}),
'extensions': dict([(k, functools.partial(method, self))
for k, method in six.iteritems(opts.extensions)]),}
if ns is not None:
xpath_kwargs['namespaces'].update(ns)
if ext is not None:
xpath_kwargs['extensions'].update(ext)
return xpath_kwargs
def _get_xpath_eval(self, namespaces=None, extensions=None):
xpath_kwargs = self._merge_xpath_kwargs(ns=namespaces, ext=extensions)
return etree.XPathEvaluator(self._get_etree_val(), **xpath_kwargs)
def xpath(self, query, namespaces=None, extensions=None):
"""
Evaluate and return the results of an XPath query expression on the
xml model.
query: The XPath query string
namespaces: (optional) dict of extra prefix/uri namespaces pairs to
pass to lxml.etree.XPathEvaluator()
extensions: (optional) Extra extensions to pass on to
lxml.etree.XPathEvaluator()
"""
if namespaces is None and extensions is None:
xpath_eval = self.default_xpath_eval
else:
xpath_eval = self._get_xpath_eval(ns=namespaces, ext=extensions)
return xpath_eval(query)
@classmethod
def create_from_string(cls, xml_source, parser=None):
opts = cls._meta
if parser is None:
parser = opts.get_parser()
# lxml doesn't like it when the <?xml ?> header has an encoding,
# so we strip out encoding="utf-8" with a regex
xml_source = re.sub(r'(<\?xml[^\?]*?) encoding="(?:utf-8|UTF-8)"([^\?]*?\?>)',
r'\1\2', xml_source)
tree = etree.XML(xml_source, parser)
return cls(tree)
@classmethod
def create_from_file(cls, xml_file):
with codecs.open(xml_file, encoding='utf-8', mode='r') as f:
xml_source = f.read()
return cls.create_from_string(xml_source)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str(u'<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) \
and self._get_etree_val() == other._get_etree_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_etree_val())
| 36.391667
| 103
| 0.596634
|
7dc6584c6fe3787899b47bd330d24eb95fe4fdb0
| 1,867
|
py
|
Python
|
custom_components/home_connect_alt/const.py
|
code-echobase/home-connect-hass
|
7bb8b608f144a034ade4a07fe067e5c1db4297c4
|
[
"MIT"
] | 1
|
2022-02-14T07:38:41.000Z
|
2022-02-14T07:38:41.000Z
|
custom_components/home_connect_alt/const.py
|
Krocko/home-connect-hass
|
1519b48b9aecc79a74514192c86a19aca70aef3f
|
[
"MIT"
] | null | null | null |
custom_components/home_connect_alt/const.py
|
Krocko/home-connect-hass
|
1519b48b9aecc79a74514192c86a19aca70aef3f
|
[
"MIT"
] | null | null | null |
"""Constants for the Home Connect New integration."""
DOMAIN = "home_connect_alt"
SIM_HOST = "https://simulator.home-connect.com"
API_HOST = "https://api.home-connect.com"
ENDPOINT_AUTHORIZE = "/security/oauth/authorize"
ENDPOINT_TOKEN = "/security/oauth/token"
SCOPES = "IdentifyAppliance Monitor Control Settings"
CONF_SIMULATE = "simulate"
CONF_LANG = "language"
CONF_CACHE = "cache"
HOME_CONNECT_DEVICE = {
"identifiers": {(DOMAIN, "homeconnect")},
"name": "Home Connect Service",
"manufacturer": "BSH"
}
SPECIAL_ENTITIES = {
"ignore": [
"BSH.Common.Option.FinishInRelative"
],
"status": {
"BSH.Common.Status.DoorState": { "type": "binary_sensor", "class": "door", "on_state": "BSH.Common.EnumType.DoorState.Open" },
},
"options": {
"BSH.Common.Option.FinishInRelative": { "unit": None, "class": f"{DOMAIN}__timespan"},
"BSH.Common.Option.ElapsedProgramTime": { "unit": None, "class": f"{DOMAIN}__timespan"},
"BSH.Common.Option.RemainingProgramTime": {"class": "timestamp" }
}
}
DEVICE_ICON_MAP = {
"Dryer": "mdi:tumble-dryer",
"Washer": "mdi:washing-machine",
"Dishwasher": "mdi:dishwasher",
"CoffeeMaker": "mdi:coffee-maker",
"Oven": "mdi:stove",
"FridgeFreezer": "mdi:fridge",
"Fridge": "mdi:fridge",
"Refrigerator": "mdi:fridge",
"Freezer": "mdi:fridge",
"CleaningRobot": "mdi:robot-vacuum",
"Hood": "mdi:hvac"
}
PUBLISHED_EVENTS = [
"BSH.Common.Status.OperationState",
"*.event.*"
]
TRIGGERS_CONFIG = {
#"program_started": { "key": "BSH.Common.Event.ProgramFinished" },
"program_started": { "key": "BSH.Common.Status.OperationState", "value": "BSH.Common.EnumType.OperationState.Run" },
"program_finished": { "key": "BSH.Common.Status.OperationState", "value": "BSH.Common.EnumType.OperationState.Finished" }
}
| 31.644068
| 134
| 0.65774
|
05acf70c7a2efd47c8af7af796ee3cf3d265aa10
| 11,269
|
py
|
Python
|
neorl/hybrid/pesacore/woa.py
|
Jimmy-INL/neorl
|
4d11d7527c4e3548e40f928d52b15e7e1e373337
|
[
"MIT"
] | 1
|
2021-07-06T20:31:38.000Z
|
2021-07-06T20:31:38.000Z
|
neorl/hybrid/pesacore/woa.py
|
Jimmy-INL/neorl
|
4d11d7527c4e3548e40f928d52b15e7e1e373337
|
[
"MIT"
] | null | null | null |
neorl/hybrid/pesacore/woa.py
|
Jimmy-INL/neorl
|
4d11d7527c4e3548e40f928d52b15e7e1e373337
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#"""
#Created on Thu Dec 3 14:42:29 2020
#
#@author: Majdi
#"""
import random
import numpy as np
import math
import time
from collections import defaultdict
import sys
import uuid
import multiprocessing
import multiprocessing.pool
class NoDaemonProcess(multiprocessing.Process):
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class MyPool(multiprocessing.pool.Pool):
Process = NoDaemonProcess
#multiprocessing trick to paralllelize nested functions in python (un-picklable objects!)
def globalize(func):
def result(*args, **kwargs):
return -func(*args, **kwargs)
result.__name__ = result.__qualname__ = uuid.uuid4().hex
setattr(sys.modules[result.__module__], result.__name__, result)
return result
class WOAmod(object):
"""
Whale Optimization Algorithm
:param mode: (str) problem type, either ``min`` for minimization problem or ``max`` for maximization
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param fit: (function) the fitness function
:param nwhales: (int): number of whales in the population
:param a0: (float): initial value for coefficient ``a``, which is annealed from ``a0`` to 0 (see **Notes** below for more info).
:param b: (float): constant for defining the shape of the logarithmic spiral
:param ncores: (int) number of parallel processors (must be ``<= nwhales``)
:param seed: (int) random seed for sampling
"""
def __init__(self, mode, bounds, fit, nwhales=5, a0=2, b=1, ncores=1, seed=None):
if seed:
random.seed(seed)
np.random.seed(seed)
#--mir
self.mode=mode
if mode == 'min':
self.fit=fit
elif mode == 'max':
self.fit = globalize(lambda x: fit(x)) #use the function globalize to serialize the nested fit
else:
raise ValueError('--error: The mode entered by user is invalid, use either `min` or `max`')
self.bounds=bounds
self.ncores = ncores
self.nwhales=nwhales
assert a0 > 0, '--error: a0 must be positive'
self.a0=a0
self.b=b
self.dim = len(bounds)
self.lb=np.array([self.bounds[item][1] for item in self.bounds])
self.ub=np.array([self.bounds[item][2] for item in self.bounds])
def init_sample(self, bounds):
indv=[]
for key in bounds:
if bounds[key][0] == 'int':
indv.append(random.randint(bounds[key][1], bounds[key][2]))
elif bounds[key][0] == 'float':
indv.append(random.uniform(bounds[key][1], bounds[key][2]))
elif bounds[key][0] == 'grid':
indv.append(random.sample(bounds[key][1],1)[0])
else:
raise Exception ('unknown data type is given, either int, float, or grid are allowed for parameter bounds')
return indv
def eval_whales(self):
#---------------------
# Fitness calcs
#---------------------
core_lst=[]
for case in range (0, self.Positions.shape[0]):
core_lst.append(self.Positions[case, :])
if self.ncores > 1:
p=MyPool(self.ncores)
fitness_lst = p.map(self.fit_worker, core_lst)
p.close(); p.join()
else:
fitness_lst=[]
for item in core_lst:
fitness_lst.append(self.fit_worker(item))
return fitness_lst
def select(self, pos, fit):
best_fit=np.min(fit)
min_idx=np.argmin(fit)
best_pos=pos[min_idx,:]
return best_pos, best_fit
def ensure_bounds(self, vec):
vec_new = []
# cycle through each variable in vector
for i, (key, val) in enumerate(self.bounds.items()):
# variable exceedes the minimum boundary
if vec[i] < self.bounds[key][1]:
vec_new.append(self.bounds[key][1])
# variable exceedes the maximum boundary
if vec[i] > self.bounds[key][2]:
vec_new.append(self.bounds[key][2])
# the variable is fine
if self.bounds[key][1] <= vec[i] <= self.bounds[key][2]:
vec_new.append(vec[i])
return vec_new
def fit_worker(self, x):
#This worker is for parallel calculations
# Clip the whale with position outside the lower/upper bounds and return same position
x=self.ensure_bounds(x)
# Calculate objective function for each search agent
fitness = self.fit(x)
return fitness
def UpdateWhales(self):
# Update the Position of the whales agents
for i in range(0, self.nwhales):
r1 = random.random()
r2 = random.random()
self.A = 2 * self.a * r1 - self.a
C = 2 * r2
l = (self.fac - 1) * random.random() + 1
p = random.random()
for j in range(0, self.dim):
if p < 0.5:
if abs(self.A) >= 1:
r_index = math.floor(self.nwhales * random.random())
X_rand = self.Positions[r_index, :]
self.Positions[i, j] = X_rand[j] - self.A * abs(C * X_rand[j] - self.Positions[i, j])
elif abs(self.A) < 1:
self.Positions[i, j] = self.best_position[j] - self.A * abs(C * self.best_position[j] - self.Positions[i, j])
elif p >= 0.5:
distance2Leader = abs(self.best_position[j] - self.Positions[i, j])
self.Positions[i, j] = (distance2Leader * math.exp(self.b * l)
* math.cos(l * 2 * math.pi) + self.best_position[j])
self.Positions[i,:]=self.ensure_bounds(self.Positions[i,:])
def evolute(self, ngen, x0=None, verbose=True):
"""
This function evolutes the WOA algorithm for number of generations.
:param ngen: (int) number of generations to evolute
:param x0: (list of lists) initial position of the whales (must be of same size as ``nwhales``)
:param verbose: (bool) print statistics to screen
:return: (dict) dictionary containing major WOA search results
"""
self.history = {'local_fitness':[], 'global_fitness':[], 'a': [], 'A': []}
self.best_fitness=float("inf")
self.verbose=verbose
self.Positions = np.zeros((self.nwhales, self.dim))
if x0:
assert len(x0) == self.nwhales, '--error: the length of x0 ({}) MUST equal the number of whales in the group ({})'.format(len(x0), self.nwhales)
for i in range(self.nwhales):
self.Positions[i,:] = x0[i]
else:
#self.Positions=self.init_sample(self.bounds) #TODO, update later for mixed-integer optimisation
# Initialize the positions of whales
for i in range(self.dim):
self.Positions[:, i] = (np.random.uniform(0, 1, self.nwhales) * (self.ub[i] - self.lb[i]) + self.lb[i])
fitness0=self.eval_whales()
self.best_position, self.best_fitness = self.select(self.Positions, fitness0)
for k in range(0, ngen):
# a is annealed from 2 to 0
self.a = self.a0 - k * ((self.a0) / (ngen))
# fac is annealed from -1 to -2 to estimate l
self.fac = -1 + k * ((-1) / ngen)
#-----------------------------
# Update Whale Positions
#-----------------------------
self.UpdateWhales()
#----------------------
# Evaluate New Whales
#----------------------
self.fitness=self.eval_whales()
for i, fits in enumerate(self.fitness):
#save the best of the best!!!
if fits < self.best_fitness:
self.best_fitness=fits
self.best_position=self.Positions[i, :].copy()
#--mir
if self.mode=='max':
self.fitness_best_correct=-self.best_fitness
self.local_fitness=-np.min(self.fitness)
else:
self.fitness_best_correct=self.best_fitness
self.local_fitness=np.min(self.fitness)
self.history['local_fitness'].append(self.local_fitness)
self.history['global_fitness'].append(self.fitness_best_correct)
self.history['a'].append(self.a)
self.history['A'].append(self.A)
# Print statistics
if self.verbose and i % self.nwhales:
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('WOA step {}/{}, nwhales={}, Ncores={}'.format((k+1)*self.nwhales, ngen*self.nwhales, self.nwhales, self.ncores))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print('Best Whale Fitness:', np.round(self.fitness_best_correct,6))
print('Best Whale Position:', np.round(self.best_position,6))
print('a:', np.round(self.a,3))
print('A:', np.round(self.A,3))
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
if self.verbose:
print('------------------------ WOA Summary --------------------------')
print('Best fitness (y) found:', self.fitness_best_correct)
print('Best individual (x) found:', self.best_position)
print('--------------------------------------------------------------')
#-------------------------------------
#return population ranked for PESA2
#-------------------------------------
pesa_pop=defaultdict(list)
for i in range(0, self.Positions.shape[0]):
pesa_pop[i].append(list(self.Positions[i, :]))
if self.mode=='max':
pesa_pop[i].append(-self.fitness[i])
else:
pesa_pop[i].append(self.fitness[i])
return self.best_position, self.fitness_best_correct, pesa_pop
| 40.82971
| 186
| 0.505457
|
87c0f425695d2f109f0451401853ce055e73e1c5
| 3,511
|
py
|
Python
|
models/resnet_zoo.py
|
amjltc295/hand_track_classification
|
71fdc980d3150646cd531e28878ff1eb63c7efea
|
[
"MIT"
] | 6
|
2019-07-08T12:01:17.000Z
|
2021-11-01T06:01:28.000Z
|
models/resnet_zoo.py
|
georkap/hand_track_classification
|
962faa1697864e892475989a97fa6ed9c2f1d7b3
|
[
"MIT"
] | null | null | null |
models/resnet_zoo.py
|
georkap/hand_track_classification
|
962faa1697864e892475989a97fa6ed9c2f1d7b3
|
[
"MIT"
] | 3
|
2019-07-08T12:25:45.000Z
|
2020-06-05T20:27:57.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 15:24:51 2018
resnet loader from pytorch model zoo
ref: https://github.com/pytorch/examples/pull/58
@author: Γιώργος
"""
import torch.nn as nn
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def resnet_base_loader(pretrained, version):
assert version in ['18','34','50','101','152']
if version=='18':
model_ft = resnet18loader(pretrained)
elif version=='34':
model_ft = resnet34loader(pretrained)
elif version=='50':
model_ft = resnet50loader(pretrained)
elif version=='101':
model_ft = resnet101loader(pretrained)
elif version=='152':
model_ft = resnet152loader(pretrained)
else:
print('Should never be here')
return model_ft
def resnet_loader(num_classes, dropout, pretrained, feature_extraction, version, channels, pad_input):
model_ft = resnet_base_loader(pretrained, version)
set_parameter_requires_grad(model_ft, feature_extraction)
if pad_input:
modules = []
modules.append(nn.Conv2d(channels, channels, kernel_size=3, stride=2, padding=(96,0), bias=False))
if channels != 3:
prev_conv = nn.Conv2d(channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
else: # if still on RGB keep the pretrained weights for the layer
prev_conv = model_ft.conv1
modules.append(prev_conv)
model_ft.conv1 = nn.Sequential(*modules)
else:
if channels != 3:
model_ft.conv1 = nn.Conv2d(channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Dropout(p=dropout),
nn.Linear(num_ftrs, num_classes))
return model_ft
def location_resnet_loader(num_classes, dropout, pretrained, feature_extraction, version):
model_ft = resnet_base_loader(pretrained, version)
set_parameter_requires_grad(model_ft, feature_extraction)
model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(40,240), stride=2, padding=3,
bias=False)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Dropout(p=dropout),
nn.Linear(num_ftrs, num_classes))
return model_ft
def resnet18loader(pretrained):
model_ft = models.resnet18(pretrained=pretrained)
return model_ft
def resnet34loader(pretrained):
model_ft = models.resnet34(pretrained=pretrained)
return model_ft
def resnet50loader(pretrained):
model_ft = models.resnet50(pretrained=pretrained)
return model_ft
def resnet101loader(pretrained):
model_ft = models.resnet101(pretrained=pretrained)
return model_ft
def resnet152loader(num_classes, dropout, pretrained, feature_extraction, channels):
model_ft = models.resnet152(pretrained=pretrained)
return model_ft
| 35.464646
| 106
| 0.691256
|
8f5a82aeec119c7ccef370671fdc312b8410a640
| 33,137
|
py
|
Python
|
Providers/Scripts/2.6x-2.7x/Scripts/nxOMSAutomationWorker.py
|
simathih/PowerShell-DSC-for-Linux
|
ff78ea6aec3ea76aa04fdcda6d1d08d269449394
|
[
"MIT"
] | null | null | null |
Providers/Scripts/2.6x-2.7x/Scripts/nxOMSAutomationWorker.py
|
simathih/PowerShell-DSC-for-Linux
|
ff78ea6aec3ea76aa04fdcda6d1d08d269449394
|
[
"MIT"
] | null | null | null |
Providers/Scripts/2.6x-2.7x/Scripts/nxOMSAutomationWorker.py
|
simathih/PowerShell-DSC-for-Linux
|
ff78ea6aec3ea76aa04fdcda6d1d08d269449394
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ====================================
import ConfigParser
import imp
import logging
import logging.handlers
import os
import re
import signal
import subprocess
import sys
import time
import pwd
import traceback
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
try:
serializerfactory = imp.load_source('serializerfactory',
'../../modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/worker/serializerfactory.py')
except:
# this is the path when running tests
serializerfactory = imp.load_source('serializerfactory',
'../../nxOMSAutomationWorker/automationworker/worker/serializerfactory.py')
try:
linuxutil = imp.load_source('linuxutil',
'../../modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/worker/linuxutil.py')
except:
linuxutil = imp.load_source('linuxutil',
'../../nxOMSAutomationWorker/automationworker/worker/linuxutil.py')
LG = nxDSCLog.DSCLog
def Set_Marshall(ResourceSettings):
try:
settings = read_settings_from_mof_json(ResourceSettings)
if not is_oms_primary_workspace(settings.workspace_id):
# not primary workspace
# return unconditional [0] for a NOOP on non-primary workspace
log(DEBUG, "Set_Marshall skipped: non primary workspace. Set marshall returned [0]")
return [0]
if not nxautomation_user_exists():
log(ERROR, "Set_Marshall skipped: please update omsagent to the latest version")
return [0]
# compatibility from 1.4 remove state.conf file
if os.path.isfile(STATE_CONF_FILE_PATH):
os.remove(STATE_CONF_FILE_PATH)
# if an update is required from 1.4
# major changes were made in 1.5.0.0 that are incompatible with the 1.4 way of doing things
if is_any_1_4_process_running(get_nxautomation_ps_output(), settings.workspace_id):
log(DEBUG, "Hybrid worker 1.4 detected, attempting to kill")
kill_process_by_pattern_string(settings.workspace_id)
try:
kill_any_worker_running_as_omsagent(
worker_pgrep_pattern="/opt/microsoft/omsconfig/modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/worker/main.py")
except:
log(INFO, "Unable to kill old omsagent worker")
pass
# Kill worker managers that might already be running
log(DEBUG, "Killing the instance of worker manager already running")
kill_worker_manager(settings.workspace_id)
# Kill all stray processes
for ws_id in get_stray_worker_and_manager_wsids(get_nxautomation_ps_output(), settings.workspace_id):
log(DEBUG, "Workspace id %s has worker and manager processes running in improper context. Terminating."
% ws_id)
kill_process_by_pattern_string(WORKSPACE_ID_PREFIX + ws_id)
# Set up conf and working directories if it doesn't exit
if not os.path.isdir(WORKER_STATE_DIR):
os.makedirs(WORKER_STATE_DIR, PERMISSION_LEVEL_0770)
if not os.path.isdir(WORKING_DIRECTORY_PATH):
os.makedirs(WORKING_DIRECTORY_PATH, PERMISSION_LEVEL_0770)
# if the directory does not have permision level 770, reset the permission level
if os.stat(WORKER_STATE_DIR).st_mode & PERMISSION_LEVEL_0777 != PERMISSION_LEVEL_0770:
# bitwise AND with PERMISSION_LEVEL_0777 will give true permission level
os.chmod(WORKER_STATE_DIR, PERMISSION_LEVEL_0770)
# set cert permissions
proc = subprocess.Popen(["sudo", "-u", AUTOMATION_USER, "python", OMS_UTIL_FILE_PATH, "--initialize"])
if proc.wait() != 0:
raise Exception("call to omsutil.py --initialize failed")
except Exception:
log(ERROR, "Set_Marshall returned [-1] with following error: %s" % traceback.format_exc())
return [-1]
try:
# Create the configuration object
write_omsconf_file(settings.workspace_id, settings.auto_register_enabled, settings.diy_enabled)
os.chmod(OMS_CONF_FILE_PATH, PERMISSION_LEVEL_0770)
log(DEBUG, "oms.conf file was written")
except Exception:
log(ERROR, "Set_Marshall returned [-1] with following error: %s" % traceback.format_exc())
return [-1]
try:
# register the auto worker if required
if settings.auto_register_enabled:
# Write worker.conf file
oms_workspace_id, agent_id = get_workspaceid_agentid_from_oms_config()
# If both proxy files exist use the new one
# If neither exist use the new path, path will have no file in it, but no file means no proxy set up
# If one of them exists, use that
proxy_conf_path = PROXY_CONF_PATH_NEW
if not os.path.isfile(PROXY_CONF_PATH_NEW) and os.path.isfile(PROXY_CONF_PATH_LEGACY):
proxy_conf_path = PROXY_CONF_PATH_LEGACY
args = ["python", REGISTRATION_FILE_PATH, "--register", "-w", settings.workspace_id, "-a", agent_id,
"-c", OMS_CERTIFICATE_PATH, "-k", OMS_CERT_KEY_PATH, "-f", WORKING_DIRECTORY_PATH, "-s",
WORKER_STATE_DIR, "-e", settings.azure_dns_agent_svc_zone, "-p", proxy_conf_path, "-g",
KEYRING_PATH]
diy_account_id = get_diy_account_id()
if diy_account_id:
args.append("-y")
args.append(diy_account_id)
asset_tag, is_azure_vm, vm_id = get_optional_metadata()
args.append("-i")
args.append(vm_id)
if is_azure_vm:
args.append("-z")
azure_resource_id = get_azure_resource_id_from_oms_config()
if azure_resource_id:
args.append("-v")
args.append(azure_resource_id)
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
# log(DEBUG, "Trying to register Linux hybrid worker with args: %s" % str(args))
if proc.returncode == -5:
log(ERROR, "Linux Hybrid Worker registration failed: DIY and auto-register account ids do not match")
log(INFO, "Worker manager with be started without auto registered worker")
elif proc.returncode != 0:
raise Exception("Linux Hybrid Worker registration failed: Return code %s :" % str(proc.returncode)
+ stderr + "\n" + stdout)
elif not os.path.isfile(AUTO_REGISTERED_WORKER_CONF_PATH):
raise Exception("Linux Hybrid Worker registration file could not be created")
else:
os.chmod(AUTO_REGISTERED_WORKER_CONF_PATH, PERMISSION_LEVEL_0770)
except Exception:
log(ERROR, "Set_Marshall returned [-1] with following error: %s" % traceback.format_exc())
return [-1]
try:
# start the worker manager proc
if (settings.auto_register_enabled or settings.diy_enabled) and start_worker_manager_process(
settings.workspace_id) < 0:
log(ERROR, "Worker manager process could not be started. Set_Marshall returned [-1]")
return [-1]
elif not settings.auto_register_enabled and not settings.diy_enabled:
log(DEBUG,
"No solutions requiring linux hybrid worker are enabled. Terminating the hybrid worker processes")
# Kill all workers and managers
kill_process_by_pattern_string(WORKSPACE_ID_PREFIX + settings.workspace_id)
if is_hybrid_worker_or_manager_running(settings.workspace_id):
raise Exception("Could not kill worker and manager processes")
log(INFO, "Set_Marshall returned [0]. Exited successfully")
return [0]
except Exception:
log(ERROR, "Set_Marshall returned [-1] with following error: %s" % traceback.format_exc())
return [-1]
def Test_Marshall(ResourceSettings):
"""
Test method for the DSC resoruce
If it returns [0] no further action is taken
If it returns [-1] Set_Marshall is called
It tests for whether oms.conf exists and the worker manager is running and the latest version
:param ResourceSettings:
:return: [0] if all tests pass [-1] otherwise
"""
try:
settings = read_settings_from_mof_json(ResourceSettings)
if not is_oms_primary_workspace(settings.workspace_id):
# not primary workspace
# return unconditional [0] for a NOOP on non-primary workspace
log(DEBUG, "Test_Marshall skipped: non primary workspace. Test_Marshall returned [0]")
return [0]
if not nxautomation_user_exists():
log(ERROR, "Test_Marshall skipped: please update omsagent to the latest version")
return [0]
if get_stray_worker_and_manager_wsids(get_nxautomation_ps_output(), settings.workspace_id):
log(INFO, "Test_Marshall returned [-1]: process started by other workspaces detected")
return [-1]
if not os.path.isfile(OMS_CONF_FILE_PATH):
log(INFO, "Test_Marshall returned [-1]: oms.conf file not found")
return [-1]
if is_any_1_4_process_running(get_nxautomation_ps_output(), settings.workspace_id):
log(INFO, "Test_Marshall returned [-1]: an older version of Hybrid Worker was found")
return [-1]
if (settings.auto_register_enabled or settings.diy_enabled) and not is_worker_manager_running_latest_version(
settings.workspace_id):
# Either the worker manager is not running, or its not latest
log(INFO, "Test_Marshall returned [-1]: worker manager isn't running or is not latest")
return [-1]
if not settings.auto_register_enabled and not settings.diy_enabled and is_hybrid_worker_or_manager_running(
settings.workspace_id):
log(INFO, "Test_Marshall returned [-1]: worker or manager is running when no solution is enabled")
return [-1]
if not is_oms_config_consistent_with_mof(settings.auto_register_enabled, settings.diy_enabled):
# Current oms.conf is inconsistent with the mof
log(INFO, "Test_Marshall returned [-1]: oms.conf differs from configuration mof")
return [-1]
if settings.auto_register_enabled:
if not os.path.isfile(AUTO_REGISTERED_WORKER_CONF_PATH):
log(INFO, "Test_Marshall returned [-1]: auto register is enabled but registration file is absent")
return [-1]
elif not is_certificate_valid(AUTO_REGISTERED_WORKER_CONF_PATH, OMS_CERTIFICATE_PATH):
# worker.conf file is present, check if the certificates are most recent
log(INFO, "Test_Marshall returned [-1]: certificate mismatch for auto registered worker")
return [-1]
except Exception:
log(INFO, "Test_Marshall returned [-1]: %s" % traceback.format_exc())
return [-1]
# All went well
log(DEBUG, "Test_Marshall returned [0]")
return [0]
def Get_Marshall(ResourceSettings):
settings = read_settings_from_mof_json(ResourceSettings)
retval = 0
retd = dict()
retd['WorkspaceId'] = protocol.MI_String(settings.workspace_id)
retd['AzureDnsAgentSvcZone'] = protocol.MI_String(settings.azure_dns_agent_svc_zone)
retd['UpdatesEnabled'] = protocol.MI_Boolean(settings.auto_register_enabled)
retd['DiyEnabled'] = protocol.MI_Boolean(settings.diy_enabled)
return retval, retd
# ###########################################################
# Begin user defined DSC functions
# ###########################################################
WORKSPACE_ID_PREFIX = "rworkspace:"
ERROR = logging.ERROR
DEBUG = logging.DEBUG
INFO = logging.INFO
OPTION_OMS_WORKSPACE_ID = "WORKSPACE_ID"
OPTION_AGENT_ID = "AGENT_GUID"
OPTION_AZURE_RESOURCE_ID = "AZURE_RESOURCE_ID"
SECTION_OMS_GLOBAL = "oms-global"
OPTION_AUTO_REGISTERED_WORKER_CONF_PATH = "auto_registered_worker_conf_path"
OPTION_MANUALLY_REGISTERED_WORKER_CONF_PATH = "manually_registered_worker_conf_path"
OPTION_WORKSPACE_ID = "workspace_id"
SECTION_OMS_WORKER_CONF = "oms-worker-conf"
OPTION_RESOURCE_VERSION = "resource_version"
OPTION_HYBRID_WORKER_PATH = "hybrid_worker_path"
OPTION_DISABLE_WORKER_CREATION = "disable_worker_creation"
SECTION_OMS_METADATA = "oms-metadata"
OPTION_JRDS_CERT_THUMBPRINT = "jrds_cert_thumbprint"
SECTION_WORKER_REQUIRED = "worker-required"
OPTION_ACCOUNT_ID = "account_id"
WORKER_STATE_DIR = "/var/opt/microsoft/omsagent/state/automationworker"
DIY_WORKER_STATE_DIR = os.path.join(WORKER_STATE_DIR, "diy")
OMS_CONF_FILE_PATH = os.path.join(WORKER_STATE_DIR, "oms.conf")
AUTO_REGISTERED_WORKER_CONF_PATH = os.path.join(WORKER_STATE_DIR, "worker.conf")
DIY_WORKER_CONF_PATH = os.path.join(DIY_WORKER_STATE_DIR, "worker.conf")
STATE_CONF_FILE_PATH = os.path.join(WORKER_STATE_DIR, "state.conf")
OMS_PRIMARY_WORKSPACE_CONF_DIR = "/etc/opt/microsoft/omsagent/conf"
DSC_RESOURCE_VERSION_FILE = "/opt/microsoft/omsconfig/modules/nxOMSAutomationWorker/VERSION"
OMS_ADMIN_CONFIG_FILE = os.path.join(OMS_PRIMARY_WORKSPACE_CONF_DIR, "omsadmin.conf")
OMS_AGENTID_FILE= "/etc/opt/microsoft/omsagent/agentid"
WORKING_DIRECTORY_PATH = "/var/opt/microsoft/omsagent/run/automationworker"
WORKER_MANAGER_START_PATH = "/opt/microsoft/omsconfig/modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/worker/main.py"
HYBRID_WORKER_START_PATH = "/opt/microsoft/omsconfig/modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/worker/hybridworker.py"
PROXY_CONF_PATH_LEGACY = os.path.join(OMS_PRIMARY_WORKSPACE_CONF_DIR, "proxy.conf")
PROXY_CONF_PATH_NEW = "/etc/opt/microsoft/omsagent/proxy.conf"
REGISTRATION_FILE_PATH = "/opt/microsoft/omsconfig/modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/scripts/register_oms.py"
OMS_CERTIFICATE_PATH = "/etc/opt/microsoft/omsagent/certs/oms.crt"
OMS_CERT_KEY_PATH = "/etc/opt/microsoft/omsagent/certs/oms.key"
KEYRING_PATH = "/etc/opt/omi/conf/omsconfig/keyring.gpg"
OMS_UTIL_FILE_PATH = "/opt/microsoft/omsconfig/modules/nxOMSAutomationWorker/DSCResources/MSFT_nxOMSAutomationWorkerResource/automationworker/worker/omsutil.py"
# permission level rwx rwx ---
# leading zero is necessary because this is an octal number
# Note: for python 3.x use 0o770 instead of 0770
PERMISSION_LEVEL_0770 = 0770
PERMISSION_LEVEL_0777 = 0777
AUTOMATION_USER = "nxautomation"
OMSAGENT_USER = "omsagent"
LOCAL_LOG_LOCATION = "/var/opt/microsoft/omsagent/log/nxOMSAutomationWorker.log"
LOG_LOCALLY = False
def get_diy_account_id():
"""
Gets the account id from diy conf file
:return: The account id if the configuration file exists, otherwise None
"""
try:
diy_config = ConfigParser.ConfigParser()
diy_config.read(DIY_WORKER_CONF_PATH)
return diy_config.get(SECTION_WORKER_REQUIRED, OPTION_ACCOUNT_ID)
except:
return None
def get_optional_metadata():
unknown = "Unknown"
asset_tag = unknown
vm_id = unknown
is_azure_vm = False
try:
proc = subprocess.Popen(["sudo", "-u", AUTOMATION_USER, "python", OMS_UTIL_FILE_PATH, "--dmidecode"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
dmidecode, error = proc.communicate()
if proc.returncode != 0 or not dmidecode:
raise Exception("Unable to invoke omsutil.py --dmidecode: %s" % error)
is_azure_vm = linuxutil.is_azure_vm(dmidecode)
if is_azure_vm:
asset_tag = linuxutil.get_azure_vm_asset_tag()
vm_id = linuxutil.get_vm_unique_id_from_dmidecode(sys.byteorder, dmidecode)
except Exception, e:
log(INFO, "unable to get_optional_metadata: %s" % str(e))
return asset_tag, is_azure_vm, vm_id
def get_manually_registered_worker_conf_path(workspace_id):
return "/var/opt/microsoft/omsagent/%s/state/automationworker/diy/worker.conf" % workspace_id
def is_certificate_valid(worker_conf_path, certificate_path):
"""
certificate is vaild when thumbprints match
worker.conf stores the certificate thumbprint that was used to register linux hybrid worker for updates and take
action solution (auto registered)
if the thumbprint doesn't match with the certificate in the certificate_path, then certificates have been rotated
and re-registration is required
:param worker_conf_path:
:param certificate_path:
:return: True if thumbprints match, false otherwise.
"""
try:
worker_conf = ConfigParser.ConfigParser()
worker_conf.read(worker_conf_path)
worker_certificate_thumbprint = worker_conf.get(SECTION_OMS_METADATA, OPTION_JRDS_CERT_THUMBPRINT)
issuer, subject, omsagent_certificate_thumbprint = linuxutil.get_cert_info(certificate_path)
if worker_certificate_thumbprint == omsagent_certificate_thumbprint:
return True
except:
pass
return False
class Settings:
workspace_id = ""
azure_dns_agent_svc_zone = ""
auto_register_enabled = ""
diy_enabled = ""
def __init__(self, workpsace_id, azure_dns_agent_svc_zone, updates_enabled, diy_enabled):
self.workspace_id = workpsace_id
self.azure_dns_agent_svc_zone = azure_dns_agent_svc_zone
self.auto_register_enabled = updates_enabled
self.diy_enabled = diy_enabled
def read_settings_from_mof_json(json_serialized_string):
"""
Deserializes a JSON serialized string
:param json_serialized_string: the serialized JOSN string
:return: Settings object
"""
try:
json_serializer = serializerfactory.get_serializer(sys.version_info)
settings = json_serializer.loads(json_serialized_string)
workspace_id = settings[0]["WorkspaceId"].encode("ascii", "ignore")
azure_dns_agent_svc_zone = settings[0]["AzureDnsAgentSvcZone"].encode("ascii", "ignore")
updates_enabled = settings[0]["Solutions"]["Updates"]["Enabled"]
diy_enabled = settings[0]["Solutions"]["AzureAutomation"]["Enabled"]
return Settings(workspace_id, azure_dns_agent_svc_zone, updates_enabled, diy_enabled)
except Exception, e:
log(ERROR, "Json parameters deserialization Error: %s" % str(e))
raise e
def is_hybrid_worker_or_manager_running(workspace_id):
search_expression = WORKSPACE_ID_PREFIX + workspace_id
result, retcode = run_pgrep_command(search_expression)
if result and retcode == 0:
log(DEBUG, "Hybrid worker and manager processes detected: %s" % result)
return True
else:
log(DEBUG, "No hybrid worker or manager processes found")
return False
def is_oms_config_consistent_with_mof(updates_enabled, diy_enabled, oms_conf_file_path=OMS_CONF_FILE_PATH):
if not os.path.isfile(oms_conf_file_path):
return False
oms_config = ConfigParser.ConfigParser()
oms_config.read(oms_conf_file_path)
if not oms_config.has_section(SECTION_OMS_WORKER_CONF):
return False
updates_present = oms_config.has_option(SECTION_OMS_WORKER_CONF, OPTION_AUTO_REGISTERED_WORKER_CONF_PATH)
diy_present = oms_config.has_option(SECTION_OMS_WORKER_CONF, OPTION_MANUALLY_REGISTERED_WORKER_CONF_PATH)
return updates_present == updates_enabled and diy_present == diy_enabled
def write_omsconf_file(workspace_id, updates_enabled, diy_enabled):
oms_config = ConfigParser.ConfigParser()
if os.path.isfile(OMS_CONF_FILE_PATH):
oms_config.read(OMS_CONF_FILE_PATH)
# oms.conf region [oms-worker-conf]
if not oms_config.has_section(SECTION_OMS_WORKER_CONF):
oms_config.add_section(SECTION_OMS_WORKER_CONF)
if updates_enabled:
oms_config.set(SECTION_OMS_WORKER_CONF, OPTION_AUTO_REGISTERED_WORKER_CONF_PATH,
AUTO_REGISTERED_WORKER_CONF_PATH)
else:
oms_config.remove_option(SECTION_OMS_WORKER_CONF, OPTION_AUTO_REGISTERED_WORKER_CONF_PATH)
if diy_enabled:
oms_config.set(SECTION_OMS_WORKER_CONF, OPTION_MANUALLY_REGISTERED_WORKER_CONF_PATH,
get_manually_registered_worker_conf_path(workspace_id))
else:
oms_config.remove_option(SECTION_OMS_WORKER_CONF, OPTION_MANUALLY_REGISTERED_WORKER_CONF_PATH)
# oms.conf region [oms-global]
if not oms_config.has_section(SECTION_OMS_GLOBAL):
oms_config.add_section(SECTION_OMS_GLOBAL)
oms_config.set(SECTION_OMS_GLOBAL, OPTION_RESOURCE_VERSION, get_module_version())
oms_config.set(SECTION_OMS_GLOBAL, OPTION_HYBRID_WORKER_PATH, HYBRID_WORKER_START_PATH)
oms_config.set(SECTION_OMS_GLOBAL, OPTION_WORKSPACE_ID, workspace_id)
if not oms_config.has_option(SECTION_OMS_GLOBAL, OPTION_DISABLE_WORKER_CREATION):
oms_config.set(SECTION_OMS_GLOBAL, OPTION_DISABLE_WORKER_CREATION, "False")
oms_config_fp = open(OMS_CONF_FILE_PATH, 'wb')
oms_config.write(oms_config_fp)
oms_config_fp.close()
def is_oms_primary_workspace(workspace_id):
"""
Detect if the passed workspace id is primary workspace on multi-homing enabled OMS agent
Mulit-homing for OMS is 2 tiered, one primary and multiple secondary workspaces are allowed
Currently, the Automation Worker should only run on primary workspace
A primary OMS workspace is pointed to by the symbolic links of old style (single homing) paths like
/etc/opt/microsoft/omsagent/ the actual location of which is /etc/opt/microsoft/omsagent/<workspace id>.
A sufficient test for checking whether a given workspace id belongs to a primary is to compare it against the
workspace id found in the oms config file in the old style path
:return: True, if the given workspace id belongs to the primary OMS workspace, False otherwise
"""
if not os.path.exists(OMS_PRIMARY_WORKSPACE_CONF_DIR):
log(INFO, "Primary workspace conf directory not found")
return False
oms_workspace_id, agent_id = get_workspaceid_agentid_from_oms_config()
if oms_workspace_id == workspace_id:
return True
else:
return False
def read_omsconfig_file():
if os.path.isfile(OMS_ADMIN_CONFIG_FILE):
# the above path always points to the oms configuration file of the primary workspace
keyvals = config_file_to_kv_pair(OMS_ADMIN_CONFIG_FILE)
if os.path.isfile(OMS_AGENTID_FILE):
# OMS_AGENTID_FILE is a new addition to the omsagent. If the file is not present, the agentid is supposed to be present in the OMS_ADMIN_CONFIG_FILE
agentid_file = open(OMS_AGENTID_FILE, "r")
agent_id = agentid_file.read().strip()
agentid_file.close()
keyvals[OPTION_AGENT_ID] = agent_id
return keyvals
else:
error_string = "could not find file " + OMS_ADMIN_CONFIG_FILE
log(DEBUG, error_string)
raise ConfigParser.Error(error_string)
def get_azure_resource_id_from_oms_config():
keyvals = read_omsconfig_file()
try:
return keyvals[OPTION_AZURE_RESOURCE_ID].strip()
except KeyError:
log(DEBUG, "Azure resource id was not specified in omsadmin config file")
return ""
def get_workspaceid_agentid_from_oms_config():
# Reads the oms config file
# Returns: AgentID config value
keyvals = read_omsconfig_file()
try:
return keyvals[OPTION_OMS_WORKSPACE_ID].strip(), keyvals[OPTION_AGENT_ID].strip()
except KeyError, exception:
log(DEBUG, str(exception))
raise ConfigParser.Error(str(exception))
def config_file_to_kv_pair(filename):
# gets key value pairs from files with similar format to omsadmin.conf
retval = dict()
f = open(filename, "r")
contents = f.read()
f.close()
lines = contents.splitlines()
for line in lines:
# Find first '='; everything before is key, everything after is value
midpoint = line.find("=")
if (midpoint == 0 or midpoint == -1):
# Skip over lines without = or lines that begin with =
continue
key = line[:midpoint]
value = line[midpoint + 1:]
retval[key] = value
return retval
def start_worker_manager_process(workspace_id):
"""
Start the worker_manager_process
:param workspace_id:
:return: the pid of the worker manager process
"""
proc = subprocess.Popen(["sudo", "-u", AUTOMATION_USER, "python", WORKER_MANAGER_START_PATH, OMS_CONF_FILE_PATH,
WORKSPACE_ID_PREFIX + workspace_id, get_module_version()])
for i in range(0, 5):
time.sleep(3)
pid = get_worker_manager_pid_and_version(workspace_id, throw_error_on_multiple_found=False)[0]
if pid > 0:
# if the pid is greater than 0
return pid
# Failure path
os.kill(proc.pid, signal.SIGTERM)
return -1
def is_any_1_4_process_running(processes, workspace_id):
for ps in processes:
if ps:
version = ps.split(" ")[-1]
if WORKER_MANAGER_START_PATH in ps and workspace_id in ps and version == "1.4":
return True
return False
def get_worker_manager_pid_and_version(workspace_id, throw_error_on_multiple_found=True):
"""
Returns the PID of the worker manager
:return: pid of the worker manager, -1 if it isn't running
"""
processes = get_nxautomation_ps_output()
manager_processes_found = 0
pid = -1
version = "0.0"
for process_line in processes:
if process_line:
process_line = str(process_line)
# make sure process_line is not null or empty
split_line = process_line.split(" ")
args = " ".join(split_line[1:])
if WORKER_MANAGER_START_PATH in args and workspace_id in args:
pid = int(split_line[0])
version = split_line[-1]
manager_processes_found += 1
if throw_error_on_multiple_found and manager_processes_found > 1:
raise AssertionError("More than one manager processes found")
if pid == -1:
log(INFO, "Failed to detect instance of worker manager")
return pid, version
class Filter:
workpsace_id = ""
def __init__(self, workspace_id):
self.workpsace_id = workspace_id
def detect_stray_workspace(self, ps_string):
uuid_pattern = re.compile("[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}")
return uuid_pattern.findall(ps_string.lower()) and self.workpsace_id not in ps_string
def get_nxautomation_ps_output():
if 'COLUMNS' in os.environ:
os.environ['COLUMNS'] = "3000"
proc = subprocess.Popen(["ps", "-u", AUTOMATION_USER, "-o", "pid=", "-o", "args="], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
command, error = proc.communicate()
if proc.returncode != 0 or error:
log(INFO, "Failed to read nxautomation user processes")
return []
command = command.strip()
if command:
processes = [x.strip() for x in command.split('\n')]
else:
processes = []
return processes
def is_worker_manager_running_latest_version(workspace_id):
try:
pid, running_version = get_worker_manager_pid_and_version(workspace_id)
except AssertionError:
# more than one manager processes were found
return False
available_version = get_module_version()
log(DEBUG, "running version is: " + running_version)
log(DEBUG, "latest available version is: " + available_version)
return pid > 0 and running_version == available_version
def kill_stray_processes(workspace_id):
processes = get_nxautomation_ps_output()
for wrkspc_id in get_stray_worker_and_manager_wsids(processes, workspace_id):
kill_process_by_pattern_string(wrkspc_id)
def get_stray_worker_and_manager_wsids(processes, workspace_id):
"""
Gets the pids of the workers and that are running in the context of another user
:param workspace_id:
:return: list of pids not running in context of workspace_id
"""
uuid_pattern = re.compile("rworkspace:([a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12})")
all_workspaces = [uuid_pattern.search(x).group(1) for x in processes if uuid_pattern.findall(x.lower())]
return set(all_workspaces).difference([workspace_id])
def kill_worker_manager(workspace_id):
""" Worker manger process if it exists
Exceptions:
throws exception if process was running and could not be killed
"""
pattern_match_string = "python\s.*main\.py.*%s%s\s" % (WORKSPACE_ID_PREFIX, workspace_id)
retval = kill_process_by_pattern_string(pattern_match_string)
# can't depend on the return value to ensure that the process was killed since it pattern matches
pid, version = get_worker_manager_pid_and_version(workspace_id)
if pid > 0:
# worker was not killed
raise OSError("Could not kill worker manager process")
if retval == 0:
log(DEBUG, "Processes for worker manager were terminated successfully")
else:
log(DEBUG, "No worker manager processes to be killed")
def kill_process_by_pattern_string(pattern_match_string):
result, retcode = run_pgrep_command(pattern_match_string)
if retcode == 0:
log(DEBUG, "The following worker processes will be terminated: %s" % result)
else:
log(DEBUG, "No process to terminate")
# the above code is for logging only, we don't use its output to determine which process to kill
return subprocess.call(["sudo", "pkill", "-u", AUTOMATION_USER, "-f", pattern_match_string])
def kill_any_worker_running_as_omsagent(worker_pgrep_pattern):
proc = subprocess.Popen(["pgrep", "-u", OMSAGENT_USER, "-f", worker_pgrep_pattern], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result, error = proc.communicate()
result = str(result)
result = result.replace('\n', ' ')
if proc.returncode == 0:
log(DEBUG, "The following old worker processes will be terminated: %s" % result)
else:
log(DEBUG, "No old worker process to terminate")
# the above code is for logging only, we don't use its output to determine which process to kill
subprocess.call(["pkill", "-u", OMSAGENT_USER,"-f", worker_pgrep_pattern])
def run_pgrep_command(pattern_match_string):
proc = subprocess.Popen(["pgrep", "-u", AUTOMATION_USER, "-f", pattern_match_string], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, error = proc.communicate()
result = str(result)
result = result.replace('\n', ' ')
return result, proc.returncode
def get_module_version():
"""
Gets the version of the installed nxOMSAutomationWorker module
:return: str: module version number
"""
version_file_handle = open(DSC_RESOURCE_VERSION_FILE, 'r')
version = version_file_handle.read().strip()
version_file_handle.close()
return version
def nxautomation_user_exists():
"""
Tests if the user nxautomation exists on the machine
Newer OMS agent installs will have that user
:return: True if user "nxautomation" exists on the system, False otherwise
"""
try:
pwd.getpwnam(AUTOMATION_USER)
except KeyError:
# if the user was not found
log(INFO, "%s was NOT found on the system" % (AUTOMATION_USER))
return False
log(INFO, "%s was found on the system" % (AUTOMATION_USER))
return True
def config_file_to_kv_pair(filename):
# gets key value pairs from files with similar format to omsadmin.conf
retval = dict()
f = open(filename, "r")
contents = f.read()
f.close()
lines = contents.splitlines()
for line in lines:
# Find first '='; everything before is key, everything after is value
midpoint = line.find("=")
if (midpoint == 0 or midpoint == -1):
# Skip over lines without = or lines that begin with =
continue
key = line[:midpoint]
value = line[midpoint + 1:]
retval[key] = value
return retval
def log(level, message):
try:
LG().Log(logging._levelNames[level], message)
except:
pass
if LOG_LOCALLY:
try:
log_local(level, message)
except:
pass
def log_local(level, message):
log_fh = open(LOCAL_LOG_LOCATION, 'ab')
log_fh.write("%s: %s" % (logging._levelNames[level], message))
log_fh.close()
| 42.757419
| 174
| 0.694782
|
1fd983344b926d97b22f37713540dd123d203ff4
| 476
|
py
|
Python
|
ex033.py
|
gabrielwai/exercicios_de_Python
|
3767775748db7c501a6e0364edf7ba4f079e62f9
|
[
"MIT"
] | null | null | null |
ex033.py
|
gabrielwai/exercicios_de_Python
|
3767775748db7c501a6e0364edf7ba4f079e62f9
|
[
"MIT"
] | null | null | null |
ex033.py
|
gabrielwai/exercicios_de_Python
|
3767775748db7c501a6e0364edf7ba4f079e62f9
|
[
"MIT"
] | null | null | null |
n1 = int(input('Digite um número inteiro qualquer: '))
n2 = int(input('Digite um outro número inteiro qualquer: '))
n3 = int(input('Digite outro número inteiro qualquer: '))
if n1 > n2:
if n1 > n3:
maior = n1
if n2 > n3:
menor = n3
else:
menor = n2
else:
maior = n3
menor = n2
else:
if n2 > n3:
maior = n2
if n1 > n3:
menor = n3
else:
| 19.04
| 61
| 0.462185
|
cf40d31617e617908a1ca9b1912e79c31916e506
| 29,221
|
py
|
Python
|
src/tools/walker.py
|
Hiestaa/my-tornado-media-library
|
6decb97ad02d0ee1613c53dbb1729474e2ea9b42
|
[
"MIT"
] | 1
|
2019-09-14T20:46:23.000Z
|
2019-09-14T20:46:23.000Z
|
src/tools/walker.py
|
Hiestaa/my-tornado-media-library
|
6decb97ad02d0ee1613c53dbb1729474e2ea9b42
|
[
"MIT"
] | null | null | null |
src/tools/walker.py
|
Hiestaa/my-tornado-media-library
|
6decb97ad02d0ee1613c53dbb1729474e2ea9b42
|
[
"MIT"
] | 1
|
2021-08-24T03:20:46.000Z
|
2021-08-24T03:20:46.000Z
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import os
import subprocess
import time
import logging
from threading import Thread, Event
from tqdm import tqdm
import re
import cv2
from PIL import Image
from tools.utils import extends, timeFormat
from tools.analyzer.analyzers import AlbumAnalyzer
from server import model
from conf import Conf
FFMPEG_CMDS = {
'generateSnapshots': '{ffmpegpath} -i "{videoPath}" -f image2 -vf fps=fps={frameRate} -s {ssw}x{ssh} "{snapFolder}\\thumb%03d.png"',
'extractDuration': '{ffprobe} -i "{videoPath}" -show_entries format=duration -v quiet -of csv="p=0"',
'extractFps': '{ffprobe} -i "{videoPath}" -v 0 -of csv=p=0 -select_streams 0 -show_entries stream=r_frame_rate',
'extractDimensions': '{ffprobe} -i "{videoPath}" -v 0 -of csv=p=0 -select_streams 0 -show_entries stream=width,height'
}
class Walker(Thread):
"""
This object is dedicated to walk through all the files
an perform some action on them
"""
def __init__(self, progress=None, progressCb=None, async=True):
"""
Initialize a new walker that will recursively erun through
the files of the data folders and perform actions on it.
If `async` is set to True (default), the walker tasks
will be performed on a separate thread
The progress dict will be populated with 4 fields:
`file`: the name of the current file being processed
`step`: the processing step currently applied to this file
`dones`: number of files processed
`fileList`: list of files that have been processed. Each file is represented by an object with the fields:
`fileName`, `success` and `error` (error message if success is false)
`duration`: times spent on the process
`finished`: False unless the whole walking process is finished.
`interrupted`: False unless the walking process has been interrupted.
`errorred`: False unless an error happened somewhere along the walking process
The progress dict will be passed in to `progressCb` after each update.
"""
super(Walker, self).__init__()
logging.info("Initializing %s walker"
% ('new asynchroneous' if async else 'new'))
self._progress = progress or {}
self._progressCb = progressCb
self._async = async
self._start_t = time.time()
self._tags = []
self._stop_event = Event()
def start(self):
if self._async:
logging.info("Starting walker process asynchroneously")
super(Walker, self).start()
else:
logging.info("Starting walker process")
self.run()
def stop(self):
self._stop_event.set()
def resubscribe(self, progressCb):
self._progressCb = progressCb
def _stopped(self):
return self._stop_event.is_set()
def _send_progress(self):
if self._progressCb:
self._progressCb(self._progress)
def run(self):
try:
self._run()
except Exception as e:
logging.error("An error occurred during the walking process")
logging.exception(e)
self._progress = self._progress or {}
self._progress['errorred'] = True
self._send_progress()
def _run(self):
# reinit progress informations
self._start_t = time.time()
self._progress = extends(
self._progress or {}, file='', step='Initializing', dones=0,
duration=0.0, finished=False, interrupted=False, errorred=False, fileList=[])
self._progress['fileList'] = []
self._progress['file'] = ''
self._progress['step'] = 'Initializing'
self._progress['dones'] = 0
self._progress['duration'] = 0
self._progress['finished'] = False
self._progress['interrupted'] = False
self._send_progress()
self._tags = model.getService('tag').getAutoTags()
self.walk(
Conf['data']['videos']['rootFolder'],
[(self.__vid_exists, 'Checking existence'),
(self.__generate_snapshots, 'Generating snapshots'),
(self.__extract_vid_infos, 'Extracting informations'),
(self.__save_vid, 'Saving video in database'),
(self.__autotag_vid, 'Auto-tagging video'),
# self.__generate_minivid,
(self.__update_video_progress, 'Updating progress')],
Conf['data']['videos']['allowedTypes']
)
self.walk(
Conf['data']['albums']['rootFolder'],
[(self.__find_album, 'Looking for related album'),
(self.__picture_exists, 'Checking existence'),
(self.__update_album_infos, 'Retrieving image informations'),
(self.__save_album, 'Saving or updating album in database'),
(self.__autotag_album, 'Auto-tagging album'),
(self.__update_album_progress, 'Updating progress')],
Conf['data']['albums']['allowedTypes']
)
# self.__fix_albums_dimensions()
self.__albums_analysis()
self._progress['duration'] = time.time() - self._start_t
self._progress['finished'] = True
self._send_progress()
def __find_album(self, imgPath, data):
"""
Find the album related to this picture.
Create an 'album' entry the data dict containing the
name of this album.
"""
album = os.path.basename(os.path.abspath(os.path.join(imgPath, os.pardir)))
logging.debug("Album of img: %s is %s" % (os.path.basename(imgPath), album))
return extends(data, album=album)
def __picture_exists(self, imgPath, data):
"""
Check if the album already holds the current image.
Create a 'picture_exist' and an 'album_exist' entry
in the data dict.
Will also create the album_id entry containing the id of the
album document if it does exist.
"""
logging.debug("Checking existence of the image.")
logging.debug(">> data: %s" % str(data))
self._progress['file'] = data['album']
albumPath = os.path.dirname(imgPath).replace(
Conf['data']['albums']['rootFolder'], '')
found = model.getService('album').getByRealName(data['album'])
if found is None:
found = model.getService('album').getByPath(albumPath + os.path.sep)
if found is None:
data = extends(data, album_exist=False, picture_exist=False, album_id=None)
elif any(os.path.basename(imgPath) == pic['filename'] for pic in found['picturesDetails']):
data = extends(data, album_exist=True, picture_exist=True, album_id=found['_id'])
else:
data = extends(data, album_exist=True, picture_exist=False, album_id=found['_id'])
return data
def __update_album_infos(self, imgPath, data):
"""
Open the image to check the resolution, set of update the
average resolution of the album as well as the picsNumber.
If the picture does not exist yet, create the fields
'picsNumber', 'averageWidth' and 'averageHeight' in the data dict.
"""
logging.debug("Setting or Updating album infos")
logging.debug(">> data: %s" % str(data))
if data['album_exist'] and data['picture_exist']:
return data
try:
f = Image.open(imgPath)
w, h = f.size
except:
return extends(data, error="Unable to open image %s" % os.path.basename(imgPath))
if data['album_exist']:
found = model.getService('album').getByRealName(data['album'])
avgW = float(found['averageWidth'])
avgH = float(found['averageWidth'])
nb = found['picsNumber']
data = extends(
data,
width=w,
height=h,
averageWidth=((avgW * nb + w) / (nb + 1)),
averageHeight=((avgH * nb + h) / (nb + 1)))
else:
data = extends(
data,
width=w,
height=h,
averageWidth=w,
averageHeight=h)
return data
def __save_album(self, imgPath, data):
"""
Insert or update the document matching the album of the current picture
in the album collection.
FIXME: do we manage subfolders ?
"""
logging.debug("Updating albums collection.")
logging.debug(">> data: %s" % str(data))
if data['album_exist'] and data['picture_exist']:
return data
if 'error' in data and data['error']:
return data
if data['album_exist']:
model.getService('album').set(
_id=data['album_id'], field='averageWidth', value=data['averageWidth'])
model.getService('album').set(
_id=data['album_id'], field='averageHeight', value=data['averageHeight'])
model.getService('album').addPicture(data['album_id'], os.path.basename(imgPath), data['width'], data['height'])
else:
_id = model.getService('album').insert(
album=data['album'], fullPath=os.path.dirname(imgPath), picturesDetails=[{
'filename': os.path.basename(imgPath),
'width': data['width'],
'height': data['height'],
'analyzerVersion': None,
'starred': False,
'display': 0
}],
averageWidth=data['averageWidth'], averageHeight=data['averageHeight'])
data = extends(data, inserted_id=_id)
return data
def __autotag_album(self, imgPath, data):
logging.debug("Auto-tagging album")
logging.debug(">> data: %s" % str(data))
# do only tag if the album did not exist yet
if data['album_exist'] or not data['inserted_id']:
return data
tagged = [];
for tag in self._tags:
if re.search(tag['autotag'], imgPath, flags=re.I):
logging.debug(
"ImgPath: %s matches autotag: %s for tag: %s - %s"
% (imgPath, tag['autotag'], tag['name'], tag['value']))
tagged.append(tag)
model.getService('album').addTag(data['inserted_id'], tag['_id'])
else:
logging.debug(
"ImgPath: %s does NOT match autotag: %s"
% (imgPath, tag['autotag']))
if len(tagged) > 0:
data['msg'] = 'Tagged as: ' + ', '.join(
map(lambda t: t['name'].title() + ' - ' + t['value'].title(), tagged))
return extends(data, tagged=tagged)
def __update_album_progress(self, imgPath, data):
logging.debug("Updating progress.")
# if the album already existed, ignore it
if not data['album_exist']:
self._progress['dones'] += 1
if 'error' in data and data['error']:
fileObj = {'fileName': data['album'], 'success': False, 'error': data['error']}
elif 'msg' in data and data['msg']:
fileObj = {'fileName': data['album'], 'success': True, 'error': data['msg']}
else:
fileObj = {'fileName': data['album'], 'success': True, 'error': None}
if 'inserted_id' in data:
fileObj['link'] = '/slideshow/albumId=' + data['inserted_id']
fileObj['id'] = data['inserted_id']
fileObj['snapshot'] = '/download/album/' + data['inserted_id'] + '/0'
self._progress['fileList'].append(fileObj)
return data
def __fix_albums_dimensions(self):
albums = model.getService('album').getAll()
start_t = time.time()
logging.info("Verifying %d existing albums", len(albums))
toDelete = []
c = 0
for album in tqdm(albums, desc="[Albums"):
if album['fullPath'] in ['starred', 'random']:
continue
for picIdx, pic in enumerate(tqdm(album['picturesDetails'], desc="[Pictures")):
c += 1
imgPath = Conf['data']['albums']['rootFolder'] + album['fullPath'] + pic['filename']
try:
f = Image.open(imgPath)
w, h = f.size
model.getService('album').setPictureDim(album['_id'], picIdx, w, h)
except:
logging.error("Unable to open file: %s", imgPath)
toDelete.append((album['_id'], picIdx))
logging.info("Verified %d pictures in %s. %d Staged for delete.",
c, timeFormat(time.time() - start_t), len(toDelete))
def __albums_analysis(self):
albums = model.getService('album').getUnanalyzedAlbums(AlbumAnalyzer.__version__)
logging.info("Preparing dataset for face detection (%d albums to process)", len(albums))
start_t = time.time()
self._progress['step'] = 'Missing Album Face Detection'
self._progress['file'] = Conf['data']['albums']['rootFolder']
self._send_progress()
totalFaces = 0
progressBar = tqdm(total=len(albums), desc='[Albums Analysis')
for album in albums:
progressBar.set_description('[Analysing album %s' % album['name'])
progressBar.update()
pictures = album['picturesDetails']
# filter the list of pictures down to only the ones we manage to open with opencv
# (these would fail during analysis anyway so better filter them out now)
imgPaths = []
retainedPictures = []
for pic in tqdm(pictures, desc="[Pre-Filtering "):
filename = Conf['data']['albums']['rootFolder'] + pic['path']
try:
# image = cv2.imread(filename)
# if image is None:
# raise IOError('Unable to read image: %s' % filename)
retainedPictures.append(pic)
imgPaths.append(filename)
except Exception as e:
logging.error("Unable to open image `%s' with opencv2." % (pic['path']))
logging.exception(e)
# model.getService('album').removePicture(pic['albumId'], pic['pictureIdx'])
# called whenever an image is processed
def progress(idx, annotation, render=False):
filepath = imgPaths[idx]
self._progress['file'] = filepath
if not render:
return
COLORS = [
(0, 255, 0),
(0, 0, 255),
(255, 0, 0),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(0, 0, 0),
(255, 255, 255)
]
title = retainedPictures[idx]['path']
image = cv2.imread(filepath)
for fidx, face in enumerate(annotation['faces']):
x = face['boundaries'][0]['x']
y = face['boundaries'][0]['y']
x2 = face['boundaries'][1]['x']
y2 = face['boundaries'][1]['y']
cv2.rectangle(image, (x, y), (x2, y2), COLORS[fidx % len(COLORS)], 5)
for landmark in face.get('landmarks', []):
x = landmark['x']
y = landmark['y']
cv2.circle(image, (x, y), 5, COLORS[fidx % len(COLORS)], 5)
small = cv2.resize(image, (0,0), fx=0.2, fy=0.2)
cv2.imshow(title, small)
cv2.waitKey(0)
# perform the analysis
logging.debug("Performing missing face detection for %d pictures", len(retainedPictures))
analyzer = AlbumAnalyzer(
imgPaths=imgPaths, annotator=Conf['data']['albums']['annotator'], progress=progress)
res = analyzer()
albumsResults = []
for idx, annotation in enumerate(res):
albumsResults.append(res)
totalFaces += len(annotation['faces'])
progress(idx, annotation)
model.getService('album').setPictureAnalysis(
retainedPictures[idx]['albumId'], retainedPictures[idx]['pictureIdx'],
AlbumAnalyzer.__version__)
progressBar.close()
logging.info("Saved %d detected faces in %s!", totalFaces, timeFormat(time.time() - start_t))
def __vid_exists(self, videoPath, data):
"""
check that the video exist, create the field
'exist' in the data dict and set it to True or False
"""
logging.debug("Checking existence of the video")
logging.debug(">> data: %s" % str(data))
videoPath = videoPath.replace('/', os.path.sep)
videoPath = videoPath.replace('\\', os.path.sep)
found = model.getService('video').getByPath(videoPath)
if found is not None:
logging.debug("Video does alread exist!")
data = extends(data, exists=True)
else:
logging.debug("Video does not exist!")
data = extends(data, exists=False)
return data
def __generate_snapshots(self, videoPath, data):
"""
This will use ffmpeg to create a snapshot of the video.
"""
# do not rerun the snapshot creation process if data already exists
if data['exists']:
return data
logging.debug("Generating snapshots of video")
logging.debug(">> Data: %s" % str(data))
spec = {
'ffmpegpath': Conf['data']['ffmpeg']['exePath'],
'videoPath': videoPath,
'ssw': Conf['data']['ffmpeg']['snapshotDimensions'][0], # width
'ssh': Conf['data']['ffmpeg']['snapshotDimensions'][1], # height
'snapFolder': '.'.join(videoPath.split('.')[:-1]), # same except trailing extension
'frameRate': Conf['data']['ffmpeg']['frameRate']
}
return_code = 0
# actual generation
try:
if not os.path.exists(spec['snapFolder']):
os.makedirs(spec['snapFolder'])
nbCreatedSnapshots = len(os.listdir(spec['snapFolder']))
if nbCreatedSnapshots == 0:
command = FFMPEG_CMDS['generateSnapshots'].format(**spec)
logging.info("> %s", command)
return_code = subprocess.call(command, shell=True)
nbCreatedSnapshots = len(os.listdir(spec['snapFolder']))
else:
data = extends(data, msg="Snapshots found, generation not needed.")
except Exception as e:
logging.warning("Unable to generate snapshots: %s." % repr(e).encode())
return_code = 1
# verifications
if not os.path.exists(spec['snapFolder']) or nbCreatedSnapshots == 0:
return extends(data, snapshotsError=True)
if return_code == 0:
snapFolder = spec['snapFolder'][len(Conf['data']['videos']['rootFolder']):]
return extends(data, snapshotsFolder=spec['snapFolder'], snapshotsError=False, nbCreatedSnapshots=nbCreatedSnapshots)
else:
return extends(data, snapshotsError=True)
def __ffmpeg_get_duration(self, videoPath):
command = FFMPEG_CMDS['extractDuration'].format(**{
'ffprobe': Conf['data']['ffmpeg']['probePath'],
'videoPath': videoPath
})
logging.debug("> %s" % command)
res = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0].strip()
logging.debug("[OUT]: %s" % res)
return float(res)
def __ffmpeg_get_fps(self, videoPath):
command = FFMPEG_CMDS['extractFps'].format(**{
'ffprobe': Conf['data']['ffmpeg']['probePath'],
'videoPath': videoPath
})
logging.debug("> %s" % command)
res = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0].strip()
logging.debug("[OUT]: %s" % res)
res = res.split(b'/')
if res == '0/0':
return 24 # assumes 24
return (float(res[0]) or 24) / (float(res[1]) or 1)
def __ffmpeg_get_dimensions(self, videoPath):
command = FFMPEG_CMDS['extractDimensions'].format(**{
'ffprobe': Conf['data']['ffmpeg']['probePath'],
'videoPath': videoPath
})
logging.debug("> %s" % command)
res = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0].strip()
logging.debug("[OUT]: %s" % res)
res = res.split(b',')
if len(res) == 0 or len(res) == 1:
return 1920, 1080
return int(res[0]), int(res[1])
def __extract_vid_infos(self, videoPath, data):
def error(data, msg):
logging.warning(msg)
return extends(data, cvError=True, cvErrorMessage=msg)
if data['exists'] or data['snapshotsError']:
return data
logging.debug("Extracting informations from video")
logging.debug(">> Data: %s" % str(data))
try:
fps = self.__ffmpeg_get_fps(videoPath)
duration = self.__ffmpeg_get_duration(videoPath)
length = duration * fps
w, h = self.__ffmpeg_get_dimensions(videoPath)
except Exception as e:
logging.exception(e)
return error(data, "Unable to extract video details")
if length == 0:
return error(data, "Unable to find video duration")
if w == 0:
return error(data, "Unable to find video width")
if h == 0:
return error(data, "Unable to find video height")
if fps == 0:
return error(data, "Unable to find video fps")
return extends(
data, videoDuration=duration, videoResolution=(w, h),
videoFPS=fps, cvError=False, fileSize=os.path.getsize(videoPath))
def __save_vid(self, videoPath, data):
# ignore videos that resulted in a snapshot error or that were already existing
# also ignore if an error occured while opening the video using openCV
# unless the insertOnCVError configuration value is set to True
if data['exists'] or data['snapshotsError'] or (
data['cvError'] and not Conf['data']['videos']['insertOnCVError']):
return extends(data, inserted=False)
logging.debug("Saving video")
logging.debug(">> Data: %s" % str(data))
_id = model.getService('video').insert(
filename=videoPath.split(os.path.sep)[-1],
path=videoPath, fileSize=data['fileSize'],
description='', snapshotsFolder=data['snapshotsFolder'],
display=0, seen=0, favorite=0,
duration=data['videoDuration'], resolution=data['videoResolution'],
fps=data['videoFPS'], tags=[],
nbSnapshots=len([
name for name in os.listdir(data['snapshotsFolder'])
if os.path.isfile(os.path.join(
data['snapshotsFolder'], name))])
)
return extends(data, inserted=True, inserted_id=_id)
def __autotag_vid(self, videoPath, data):
logging.debug("Auto-tagging video")
logging.debug(">> data: %s" % str(data))
# do only tag if the album did not exist yet
if data['exists'] or not data['inserted']:
return data
tagged = [];
for tag in self._tags:
if re.search(tag['autotag'], videoPath, flags=re.I):
logging.debug(
"VideoPath: %s matches autotag: %s for tag: %s - %s"
% (videoPath, tag['autotag'], tag['name'], tag['value']))
tagged.append(tag)
model.getService('video').addTag(data['inserted_id'], tag['_id'])
else:
logging.debug(
"videoPath: %s does NOT match autotag: %s"
% (videoPath, tag['autotag']))
if len(tagged) > 0:
data['msg'] = 'Tagged as: ' + ', '.join(
map(lambda t: t['name'].title() + ' - ' + t['value'].title(), tagged))
return extends(data, tagged=tagged)
def __update_video_progress(self, videoPath, data):
logging.debug("Updating progress.")
# if the video already existed, ignore it
if not data['exists']:
self._progress['dones'] += 1
if data['snapshotsError']:
fileObj = {'fileName': os.path.basename(videoPath), 'success': False, 'error': 'Snapshot creation failure.'}
elif data['cvError']:
fileObj = {'fileName': os.path.basename(videoPath), 'success': False, 'error': 'OpenCV failure: %s' % data['cvErrorMessage']}
elif not data['inserted']:
fileObj = {'fileName': os.path.basename(videoPath), 'success': False, 'error': 'Unable to insert video in database.'}
elif 'msg' in data and data['msg']:
fileObj = {'fileName': os.path.basename(videoPath), 'success': True, 'error': data['msg']}
else:
fileObj = {'fileName': os.path.basename(videoPath), 'success': True, 'error': None}
if 'inserted_id' in data:
if 'nbCreatedSnapshots' in data and data['nbCreatedSnapshots'] > 1:
snapshot = int(data['nbCreatedSnapshots'] / 2)
else:
snapshot = 0
fileObj['link'] = '/videoplayer/videoId=' + data['inserted_id']
fileObj['id'] = data['inserted_id']
fileObj['snapshot'] = '/download/snapshot/' + data['inserted_id'] + '/' + str(snapshot)
self._progress['fileList'].append(fileObj)
return data
def _interrupt(self):
"""
Called on the walker thread when the process is actually interrupting
(as soon as possible after the stop flag is set)
Set the interrupted progress status and log the info.
"""
self._progress['interrupted'] = True
self._progress['finished'] = True
logging.info("Walker thread interrupted.")
def walk(self, root, steps, types=None):
"""
This will call the given steps on any file contained in the given
folder or its subfolders, each step should be a (callback, description) tuple.
types can be specified to call the callback only of the files with
one of the given extensions. This is expected to be a list of strings.
The prototype of the steps is expected to be:
`function (videoPath, data)` where `videoPath` is the path of the
current video, and data is the data returned by the previous callback
for this video (or an empty dict for the first one.)
"""
logging.info("Starting walking process from folder: %s" % root)
if self._stopped():
return self._interrupt()
folders = [os.path.join(root, file) for file in os.listdir(root)
if os.path.isdir(os.path.join(root, file))]
progressBar = tqdm(total=len(folders),
desc='[Walking')
for dirpath, dirnames, filenames in os.walk(root):
if dirpath in folders:
progressBar.set_description('[Walking: %s' % dirpath)
progressBar.update()
dirpath = dirpath.replace('\\', os.path.sep)
dirpath = dirpath.replace('/', os.path.sep)
for f in filenames:
if types is None or f.split('.')[-1] in types:
filepath = os.path.join(dirpath, f)
logging.info("Processing: %s" % filepath)
self._progress['file'] = f
res = {}
for cb, description in steps:
self._progress['duration'] = time.time() - self._start_t
self._progress['step'] = description
if self._stopped():
return self._interrupt()
try:
self._send_progress()
res = cb(filepath, res)
except Exception as e:
logging.error("Error occurred during step %s", str(cb))
logging.error(repr(e))
logging.exception(e)
self._progress['fileList'].append({
'fileName': f,
'success': False,
'error': "Error while executing step %s: %s" % (str(cb), repr(e))
})
self._send_progress()
break
progressBar.close()
| 43.483631
| 141
| 0.555012
|
03efcb8e1fbaed59dfab62cc5f97c1ef4ed44644
| 4,916
|
py
|
Python
|
app/user/tests/test_user_api.py
|
juancarestre/recipe-app-api2
|
7f93a2a01ebe811cba84526f0c1202dca7800b7a
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
juancarestre/recipe-app-api2
|
7f93a2a01ebe811cba84526f0c1202dca7800b7a
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
juancarestre/recipe-app-api2
|
7f93a2a01ebe811cba84526f0c1202dca7800b7a
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
"""Helper function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating using with a valid payload is successful"""
payload = {
'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'name',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {'email': 'test@londonappdev.com', 'password': 'pw'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@londonappdev.com', password='testpass')
payload = {'email': 'test@londonappdev.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doens't exist"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='test@londonappdev.com',
password='testpass',
name='fname',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me URL"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 36.414815
| 77
| 0.663141
|
f5396855bc3629550f48ea07a3a45c91f0aa7cc6
| 4,385
|
py
|
Python
|
train_wavernn.py
|
Dacrol/WaveRNN-server
|
5189829cec71938ff7ec2e3eb59e73af1382430a
|
[
"MIT"
] | null | null | null |
train_wavernn.py
|
Dacrol/WaveRNN-server
|
5189829cec71938ff7ec2e3eb59e73af1382430a
|
[
"MIT"
] | null | null | null |
train_wavernn.py
|
Dacrol/WaveRNN-server
|
5189829cec71938ff7ec2e3eb59e73af1382430a
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
from torch import optim
import torch.nn.functional as F
from utils.display import stream, simple_table
from utils.dataset import get_vocoder_datasets
from utils.distribution import discretized_mix_logistic_loss
import hparams as hp
from models.fatchord_wavernn import Model
from gen_wavernn import gen_testset
from utils.paths import Paths
import argparse
def voc_train_loop(model, loss_func, optimiser, train_set, test_set, lr, total_steps):
for p in optimiser.param_groups: p['lr'] = lr
total_iters = len(train_set)
epochs = (total_steps - model.get_step()) // total_iters + 1
for e in range(1, epochs + 1):
start = time.time()
running_loss = 0.
for i, (x, y, m) in enumerate(train_set, 1):
x, m, y = x.cuda(), m.cuda(), y.cuda()
y_hat = model(x, m)
if model.mode == 'RAW' :
y_hat = y_hat.transpose(1, 2).unsqueeze(-1)
elif model.mode == 'MOL' :
y = y.float()
y = y.unsqueeze(-1)
loss = loss_func(y_hat, y)
optimiser.zero_grad()
loss.backward()
optimiser.step()
running_loss += loss.item()
speed = i / (time.time() - start)
avg_loss = running_loss / i
step = model.get_step()
k = step // 1000
if step % hp.voc_checkpoint_every == 0 :
gen_testset(model, test_set, hp.voc_gen_at_checkpoint, hp.voc_gen_batched,
hp.voc_target, hp.voc_overlap, paths.voc_output)
model.checkpoint(paths.voc_checkpoints)
msg = f'| Epoch: {e}/{epochs} ({i}/{total_iters}) | Loss: {avg_loss:.4f} | {speed:.1f} steps/s | Step: {k}k | '
stream(msg)
model.save(paths.voc_latest_weights)
model.log(paths.voc_log, msg)
print(' ')
if __name__ == "__main__" :
# Parse Arguments
parser = argparse.ArgumentParser(description='Train WaveRNN Vocoder')
parser.add_argument('--lr', '-l', type=float, help='[float] override hparams.py learning rate')
parser.add_argument('--batch_size', '-b', type=int, help='[int] override hparams.py batch size')
parser.add_argument('--force_train', '-f', action='store_true', help='Forces the model to train past total steps')
parser.add_argument('--gta', '-g', action='store_true', help='train wavernn on GTA features')
parser.set_defaults(lr=hp.voc_lr)
parser.set_defaults(batch_size=hp.voc_batch_size)
args = parser.parse_args()
batch_size = args.batch_size
force_train = args.force_train
train_gta = args.gta
lr = args.lr
print('\nInitialising Model...\n')
# Instantiate WaveRNN Model
voc_model = Model(rnn_dims=hp.voc_rnn_dims,
fc_dims=hp.voc_fc_dims,
bits=hp.bits,
pad=hp.voc_pad,
upsample_factors=hp.voc_upsample_factors,
feat_dims=hp.num_mels,
compute_dims=hp.voc_compute_dims,
res_out_dims=hp.voc_res_out_dims,
res_blocks=hp.voc_res_blocks,
hop_length=hp.hop_length,
sample_rate=hp.sample_rate,
mode=hp.voc_mode).cuda()
# Check to make sure the hop length is correctly factorised
assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length
paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id)
voc_model.restore(paths.voc_latest_weights)
optimiser = optim.Adam(voc_model.parameters())
train_set, test_set = get_vocoder_datasets(paths.data, batch_size, train_gta)
total_steps = 10_000_000 if force_train else hp.voc_total_steps
simple_table([('Remaining', str((total_steps - voc_model.get_step())//1000) + 'k Steps'),
('Batch Size', batch_size),
('LR', lr),
('Sequence Len', hp.voc_seq_len),
('GTA Train', train_gta)])
loss_func = F.cross_entropy if voc_model.mode == 'RAW' else discretized_mix_logistic_loss
voc_train_loop(voc_model, loss_func, optimiser, train_set, test_set, lr, total_steps)
print('Training Complete.')
print('To continue training increase voc_total_steps in hparams.py or use --force_train')
| 35.08
| 123
| 0.619156
|
6e55940e537ad857a3c59e8bbe583d4976c9ba4f
| 92
|
py
|
Python
|
python/p006.py
|
RUiNtheExtinct/project-euler
|
5c3e64c7dfcbf52d5213df88d2310550f4ee9ce1
|
[
"MIT"
] | null | null | null |
python/p006.py
|
RUiNtheExtinct/project-euler
|
5c3e64c7dfcbf52d5213df88d2310550f4ee9ce1
|
[
"MIT"
] | null | null | null |
python/p006.py
|
RUiNtheExtinct/project-euler
|
5c3e64c7dfcbf52d5213df88d2310550f4ee9ce1
|
[
"MIT"
] | null | null | null |
t, ans = list(range(1, 101)), 0
for i in t:
ans += i * i
print(ans - (sum(t) * sum(t)))
| 18.4
| 31
| 0.5
|
e50412399ebdb3fa3c69418665848519a49b6ee1
| 847
|
py
|
Python
|
Detailed_generator.py
|
anod6351/CARtool
|
9bb6b23438990e3593b4e9d4a68f1e20ab700961
|
[
"MIT"
] | 1
|
2021-02-24T13:16:33.000Z
|
2021-02-24T13:16:33.000Z
|
Detailed_generator.py
|
anod6351/CARtool
|
9bb6b23438990e3593b4e9d4a68f1e20ab700961
|
[
"MIT"
] | 1
|
2021-05-17T08:42:31.000Z
|
2021-05-17T08:42:31.000Z
|
Detailed_generator.py
|
anod6351/CARtool
|
9bb6b23438990e3593b4e9d4a68f1e20ab700961
|
[
"MIT"
] | 3
|
2019-03-19T13:07:58.000Z
|
2021-05-03T06:32:40.000Z
|
def detail_samtools(Regions, Read_depth):
# create a detailed list with all depth values from the same region in a sub list. From samtools depth calculations
# samtools generates a depth file with: chr, position and coverage depth value
# Regeions comes from the bed file with chr, start, stop, region name
detailed =[]
list_temp=[]
previous_chr = Read_depth[0][0]
Region_row = 0
count=0
index = 0
size_list = len(Read_depth)
for line in Read_depth:
Region_row = Regions[index]
if str(line[0]) == str(previous_chr) and (int(line[1])) <= int(Region_row[2]):
list_temp.append(line[2])
else:
previous_chr=line[0]
detailed.append(list_temp)
list_temp=[]
list_temp.append(line[2])
index+=1
count+=1
if count == size_list:
detailed.append(list_temp)
return detailed
| 24.2
| 117
| 0.683589
|
985e4ebc83481a4694a7a63125e88864f783d347
| 2,105
|
py
|
Python
|
neutron/tests/unit/plugins/ml2/extensions/fake_extension.py
|
glove747/liberty-neutron
|
35a4c85e781d10da4521565c3a367e4ecb50739d
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/plugins/ml2/extensions/fake_extension.py
|
glove747/liberty-neutron
|
35a4c85e781d10da4521565c3a367e4ecb50739d
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/plugins/ml2/extensions/fake_extension.py
|
glove747/liberty-neutron
|
35a4c85e781d10da4521565c3a367e4ecb50739d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
'network_extension': {'allow_post': True,
'allow_put': True,
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True,
'enforce_policy': True},
},
'subnets': {
'subnet_extension': {'allow_post': True,
'allow_put': True,
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True,
'enforce_policy': True},
},
'ports': {
'port_extension': {'allow_post': True,
'allow_put': True,
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True,
'enforce_policy': True},
},
}
class Fake_extension(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "ML2 fake extension"
@classmethod
def get_alias(cls):
return "fake_extension"
@classmethod
def get_description(cls):
return _("Adds test attributes to core resources.")
@classmethod
def get_updated(cls):
return "2014-07-16T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| 32.384615
| 78
| 0.565321
|
c96817167494bf057cc5a8c52a01a7229ed85ed2
| 1,324
|
py
|
Python
|
examples/basics/scene/sphere.py
|
ghisvail/vispy
|
39d4a81db6d84f813bd23e76ff3d61bd4e6bf46f
|
[
"BSD-3-Clause"
] | 2
|
2020-11-27T10:51:56.000Z
|
2020-12-28T20:39:14.000Z
|
examples/basics/scene/sphere.py
|
ghisvail/vispy
|
39d4a81db6d84f813bd23e76ff3d61bd4e6bf46f
|
[
"BSD-3-Clause"
] | 2
|
2015-11-04T19:43:29.000Z
|
2015-11-19T04:26:29.000Z
|
examples/basics/scene/sphere.py
|
ghisvail/vispy
|
39d4a81db6d84f813bd23e76ff3d61bd4e6bf46f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates how to create a sphere.
"""
import sys
from vispy import scene
from vispy.visuals.transforms import STTransform
canvas = scene.SceneCanvas(keys='interactive', bgcolor='white',
size=(800, 600), show=True)
view = canvas.central_widget.add_view()
view.camera = 'arcball'
sphere1 = scene.visuals.Sphere(radius=1, method='latitude', parent=view.scene,
edge_color='black')
sphere2 = scene.visuals.Sphere(radius=1, method='ico', parent=view.scene,
edge_color='black')
sphere3 = scene.visuals.Sphere(radius=1, rows=10, cols=10, depth=10,
method='cube', parent=view.scene,
edge_color='black')
sphere1.transform = STTransform(translate=[-2.5, 0, 0])
sphere3.transform = STTransform(translate=[2.5, 0, 0])
view.camera.set_range(x=[-3, 3])
if __name__ == '__main__' and sys.flags.interactive == 0:
canvas.app.run()
| 33.948718
| 79
| 0.558157
|
07abee793dbb6b4c90ec28d3196c3478f40d5159
| 479
|
py
|
Python
|
python/UVA/846_steps.py
|
gineer01/programming-challenges
|
9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7
|
[
"MIT"
] | null | null | null |
python/UVA/846_steps.py
|
gineer01/programming-challenges
|
9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7
|
[
"MIT"
] | null | null | null |
python/UVA/846_steps.py
|
gineer01/programming-challenges
|
9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7
|
[
"MIT"
] | null | null | null |
import sys
import math
def solve(x, y):
t = y - x
k = math.floor(math.sqrt(t + .25) - 0.5)
g = t - (k * (k + 1))
if g == 0:
return 2 * k
elif g <= k + 1:
return 2 * k + 1
else:
return 2 * k + 2
assert solve(0, 1) == 1
assert solve(0, 2) == 2
assert solve(0, 3) == 3
assert solve(0, 4) == 3
assert solve(0, 5) == 4
n = int(next(sys.stdin))
for i in range(n):
x, y = map(int, next(sys.stdin).split())
print(solve(x, y))
| 17.740741
| 44
| 0.492693
|
f1027427596f011dd913ad49edb571a92b278a2a
| 6,949
|
py
|
Python
|
pydash/services.py
|
hqpr/django-pydash
|
75ee81a45a279da3726fbe224d51c7691fac8397
|
[
"MIT"
] | null | null | null |
pydash/services.py
|
hqpr/django-pydash
|
75ee81a45a279da3726fbe224d51c7691fac8397
|
[
"MIT"
] | null | null | null |
pydash/services.py
|
hqpr/django-pydash
|
75ee81a45a279da3726fbe224d51c7691fac8397
|
[
"MIT"
] | null | null | null |
import os
import platform
import multiprocessing
from datetime import timedelta
def chunks(get, n):
return [get[i:i + n] for i in range(0, len(get), n)]
def get_uptime():
"""
Get uptime
"""
try:
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_time = str(timedelta(seconds=uptime_seconds))
data = uptime_time.split('.', 1)[0]
except Exception as err:
data = str(err)
return data
def get_ipaddress():
"""
Get the IP Address
"""
data = []
try:
eth = os.popen("ip addr | grep LOWER_UP | awk '{print $2}'")
iface = eth.read().strip().replace(':', '').split('\n')
eth.close()
del iface[0]
for i in iface:
pipe = os.popen(
"ip addr show " + i + "| awk '{if ($2 == \"forever\"){!$2} else {print $2}}'")
data1 = pipe.read().strip().split('\n')
pipe.close()
if len(data1) == 2:
data1.append('unavailable')
if len(data1) == 3:
data1.append('unavailable')
data1[0] = i
data.append(data1)
ips = {'interface': iface, 'itfip': data}
data = ips
except Exception as err:
data = str(err)
return data
def get_cpus():
"""
Get the number of CPUs and model/type
"""
try:
pipe = os.popen("cat /proc/cpuinfo |" + "grep 'model name'")
data = pipe.read().strip().split(':')[-1]
pipe.close()
if not data:
pipe = os.popen("cat /proc/cpuinfo |" + "grep 'Processor'")
data = pipe.read().strip().split(':')[-1]
pipe.close()
cpus = multiprocessing.cpu_count()
data = {'cpus': cpus, 'type': data}
except Exception as err:
data = str(err)
return data
def get_users():
"""
Get the current logged in users
"""
try:
pipe = os.popen("who |" + "awk '{print $1, $2, $6}'")
data = pipe.read().strip().split('\n')
pipe.close()
if data == [""]:
data = None
else:
data = [i.split(None, 3) for i in data]
except Exception as err:
data = str(err)
return data
def get_traffic(request):
"""
Get the traffic for the specified interface
"""
try:
pipe = os.popen(
"cat /proc/net/dev |" + "grep " + request + "| awk '{print $1, $9}'")
data = pipe.read().strip().split(':', 1)[-1]
pipe.close()
if not data[0].isdigit():
pipe = os.popen(
"cat /proc/net/dev |" + "grep " + request + "| awk '{print $2, $10}'")
data = pipe.read().strip().split(':', 1)[-1]
pipe.close()
data = data.split()
traffic_in = int(data[0])
traffic_out = int(data[1])
all_traffic = {'traffic_in': traffic_in, 'traffic_out': traffic_out}
data = all_traffic
except Exception as err:
data = str(err)
return data
def get_platform():
"""
Get the OS name, hostname and kernel
"""
try:
osname = " ".join(platform.linux_distribution())
uname = platform.uname()
if osname == ' ':
osname = uname[0]
data = {'osname': osname, 'hostname': uname[1], 'kernel': uname[2]}
except Exception as err:
data = str(err)
return data
def get_disk():
"""
Get disk usage
"""
try:
pipe = os.popen(
"df -Ph | " + "grep -v Filesystem | " + "awk '{print $1, $2, $3, $4, $5, $6}'")
data = pipe.read().strip().split('\n')
pipe.close()
data = [i.split(None, 6) for i in data]
except Exception as err:
data = str(err)
return data
def get_disk_rw():
"""
Get the disk reads and writes
"""
try:
pipe = os.popen(
"cat /proc/partitions | grep -v 'major' | awk '{print $4}'")
data = pipe.read().strip().split('\n')
pipe.close()
rws = []
for i in data:
if i.isalpha():
pipe = os.popen(
"cat /proc/diskstats | grep -w '" + i + "'|awk '{print $4, $8}'")
rw = pipe.read().strip().split()
pipe.close()
rws.append([i, rw[0], rw[1]])
if not rws:
pipe = os.popen(
"cat /proc/diskstats | grep -w '" + data[0] + "'|awk '{print $4, $8}'")
rw = pipe.read().strip().split()
pipe.close()
rws.append([data[0], rw[0], rw[1]])
data = rws
except Exception as err:
data = str(err)
return data
def get_mem():
"""
Get memory usage
"""
try:
pipe = os.popen(
"free -tmo | " + "grep 'Mem' | " + "awk '{print $2,$4,$6,$7}'")
data = pipe.read().strip().split()
pipe.close()
allmem = int(data[0])
freemem = int(data[1])
buffers = int(data[2])
cachedmem = int(data[3])
# Memory in buffers + cached is actually available, so we count it
# as free. See http://www.linuxatemyram.com/ for details
freemem += buffers + cachedmem
percent = (100 - ((freemem * 100) / allmem))
usage = (allmem - freemem)
mem_usage = {'usage': usage, 'buffers': buffers, 'cached': cachedmem, 'free': freemem, 'percent': percent}
data = mem_usage
except Exception as err:
data = str(err)
return data
def get_cpu_usage():
"""
Get the CPU usage and running processes
"""
try:
pipe = os.popen("ps aux --sort -%cpu,-rss")
data = pipe.read().strip().split('\n')
pipe.close()
usage = [i.split(None, 10) for i in data]
del usage[0]
total_usage = []
for element in usage:
usage_cpu = element[2]
total_usage.append(usage_cpu)
total_usage = sum(float(i) for i in total_usage)
total_free = ((100 * int(get_cpus()['cpus'])) - float(total_usage))
cpu_used = {'free': total_free, 'used':
float(total_usage), 'all': usage}
data = cpu_used
except Exception as err:
data = str(err)
return data
def get_load():
"""
Get load average
"""
try:
data = os.getloadavg()[0]
except Exception as err:
data = str(err)
return data
def get_netstat():
"""
Get ports and applications
"""
try:
pipe = os.popen(
"ss -tnp | grep ESTAB | awk '{print $4, $5}'| sed 's/::ffff://g' | awk -F: '{print $1, $2}' "
"| awk 'NF > 0' | sort -n | uniq -c")
data = pipe.read().strip().split('\n')
pipe.close()
data = [i.split(None, 4) for i in data]
except Exception as err:
data = str(err)
return data
| 23.009934
| 114
| 0.491582
|
b87d4834982fd4bba7c458adb2dfa967a3e45663
| 9,657
|
py
|
Python
|
contrib/bitrpc/bitrpc.py
|
BlockchainFor/ESPB2
|
559a2cdc6aca001266c6721b2e48d542769a3a6d
|
[
"MIT"
] | 24
|
2019-01-26T01:52:06.000Z
|
2020-07-22T14:43:05.000Z
|
contrib/bitrpc/bitrpc.py
|
BlockchainFor/ESPB2
|
559a2cdc6aca001266c6721b2e48d542769a3a6d
|
[
"MIT"
] | 6
|
2019-04-26T17:24:37.000Z
|
2021-04-24T07:26:46.000Z
|
contrib/bitrpc/bitrpc.py
|
BlockchainFor/ESPB2
|
559a2cdc6aca001266c6721b2e48d542769a3a6d
|
[
"MIT"
] | 27
|
2018-11-16T11:07:08.000Z
|
2021-06-22T01:25:53.000Z
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:32322")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:32322")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 28.571006
| 101
| 0.573988
|
24b62ba315b7894aafe42dd918342229efae548e
| 7,241
|
py
|
Python
|
indy_node/test/catchup/test_requests_post_new_node_catchup.py
|
johadahl/indy-node
|
7edd4621a0212a5befa0eecf6a56c96cb2a64eae
|
[
"Apache-2.0"
] | null | null | null |
indy_node/test/catchup/test_requests_post_new_node_catchup.py
|
johadahl/indy-node
|
7edd4621a0212a5befa0eecf6a56c96cb2a64eae
|
[
"Apache-2.0"
] | null | null | null |
indy_node/test/catchup/test_requests_post_new_node_catchup.py
|
johadahl/indy-node
|
7edd4621a0212a5befa0eecf6a56c96cb2a64eae
|
[
"Apache-2.0"
] | null | null | null |
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.types import f
from plenum.common.util import randomString
from plenum.test.helper import assertLength
from plenum.test.node_catchup.helper import checkNodeDataForEquality, \
waitNodeDataEquality
from plenum.test.test_node import ensure_node_disconnected, checkNodesConnected
from indy_client.test.client.TestClient import TestClient
from indy_client.test.helper import getClientAddedWithRole
from indy_common.constants import TRUST_ANCHOR
from indy_node.test.conftest import nodeThetaAdded
from indy_node.test.helper import TestNode, addRawAttribute, getAttribute
from indy_common.config_helper import NodeConfigHelper
def test_new_node_catchup_update_projection(looper, tdirWithClientPoolTxns,
tdirWithDomainTxnsUpdated,
nodeSet, tconf, tdir,
trustee, trusteeWallet,
allPluginsPath,
some_transactions_done
):
"""
A node which receives txns from catchup updates both ledger and projection
4 nodes start up and some txns happen, after txns are done, new node joins
and starts catching up, the node should not process requests while catchup
is in progress. Make sure the new requests are coming from the new NYMs
added while the node was offline or catching up.
"""
# Create a new node and stop it.
new_steward, new_steward_wallet, new_node = nodeThetaAdded(looper,
nodeSet,
tdirWithClientPoolTxns,
tconf, trustee,
trusteeWallet,
allPluginsPath,
TestNode,
TestClient,
NodeConfigHelper,
tdir)
waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
ta_count = 2
np_count = 2
new_txn_count = 2 * ta_count + np_count # Since ATTRIB txn is done for TA
old_ledger_sizes = {}
new_ledger_sizes = {}
old_projection_sizes = {}
new_projection_sizes = {}
old_seq_no_map_sizes = {}
new_seq_no_map_sizes = {}
def get_ledger_size(node):
return len(node.domainLedger)
def get_projection_size(node):
domain_state = node.getState(DOMAIN_LEDGER_ID)
return len(domain_state.as_dict)
def get_seq_no_map_size(node):
return node.seqNoDB.size
def fill_counters(ls, ps, ss, nodes):
for n in nodes:
ls[n.name] = get_ledger_size(n)
ps[n.name] = get_projection_size(n)
ss[n.name] = get_seq_no_map_size(n)
def check_sizes(nodes):
for node in nodes:
assert new_ledger_sizes[node.name] - \
old_ledger_sizes[node.name] == new_txn_count
assert new_projection_sizes[node.name] - \
old_projection_sizes[node.name] == new_txn_count
assert new_seq_no_map_sizes[node.name] - \
old_seq_no_map_sizes[node.name] == new_txn_count
# Stop a node and note down the sizes of ledger and projection (state)
other_nodes = nodeSet[:-1]
fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
other_nodes)
new_node.cleanupOnStopping = False
new_node.stop()
looper.removeProdable(new_node)
ensure_node_disconnected(looper, new_node, other_nodes)
trust_anchors = []
attributes = []
for i in range(ta_count):
trust_anchors.append(
getClientAddedWithRole(
other_nodes,
tdirWithClientPoolTxns,
looper,
trustee,
trusteeWallet,
'TA' + str(i),
role=TRUST_ANCHOR,
client_connects_to=len(other_nodes)))
attributes.append((randomString(6), randomString(10)))
addRawAttribute(looper, *trust_anchors[-1], *attributes[-1],
dest=trust_anchors[-1][1].defaultId)
non_privileged = []
for i in range(np_count):
non_privileged.append(
getClientAddedWithRole(
other_nodes,
tdirWithClientPoolTxns,
looper,
trustee,
trusteeWallet,
'NP' + str(i),
client_connects_to=len(other_nodes)))
checkNodeDataForEquality(nodeSet[0], *other_nodes)
fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
other_nodes)
# The size difference should be same as number of new NYM txns
check_sizes(other_nodes)
config_helper = NodeConfigHelper(new_node.name, tconf, chroot=tdir)
new_node = TestNode(
new_node.name,
config_helper=config_helper,
config=tconf,
pluginPaths=allPluginsPath,
ha=new_node.nodestack.ha,
cliha=new_node.clientstack.ha)
looper.add(new_node)
nodeSet[-1] = new_node
fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
[new_node])
looper.run(checkNodesConnected(nodeSet))
waitNodeDataEquality(looper, new_node, *other_nodes)
fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
[new_node])
check_sizes([new_node])
for i, (tc, tw) in enumerate(trust_anchors):
# To prevent sending of 'get_attr' to just one node
tc._read_only_requests = set()
reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
reply[f.REQ_ID.nm])
assertLength(all_replies, len(nodeSet))
assert new_node.clientstack.name in all_replies
# Set the old counters to be current ledger and projection size
fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
nodeSet)
more_nyms_count = 2
for tc, tw in trust_anchors:
for i in range(more_nyms_count):
non_privileged.append(getClientAddedWithRole(other_nodes,
tdirWithClientPoolTxns,
looper,
tc, tw,
'NP1' + str(i)))
# The new node should process transactions done by Nyms added to its
# ledger while catchup
fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
nodeSet)
new_txn_count = more_nyms_count * len(trust_anchors)
check_sizes(nodeSet)
| 42.846154
| 86
| 0.58445
|
656148b230f43f18c2b46c83a7571846402f689a
| 2,425
|
py
|
Python
|
ui/service/job_service.py
|
ctwardy/sitehound
|
0f928a82f761e3d0335d1d4d01f6105b726fd889
|
[
"Apache-2.0"
] | null | null | null |
ui/service/job_service.py
|
ctwardy/sitehound
|
0f928a82f761e3d0335d1d4d01f6105b726fd889
|
[
"Apache-2.0"
] | null | null | null |
ui/service/job_service.py
|
ctwardy/sitehound
|
0f928a82f761e3d0335d1d4d01f6105b726fd889
|
[
"Apache-2.0"
] | 1
|
2018-10-02T22:03:23.000Z
|
2018-10-02T22:03:23.000Z
|
import traceback
import uuid
import pymongo
import time
__author__ = 'tomas'
from bson import ObjectId
import json
from pymongo.errors import DuplicateKeyError
from mongoutils.validate import validate_url
from ui.singleton import Singleton
#################### services #########################
def get_jobs_by_workspace(workspace_id):
# result = {}
# result['job'] = get_jobs_by_workspace_dao(workspace_id)
# result['tasks'] = get_tasks_by_job(jobId);
# return result
return get_jobs_by_workspace_dao(workspace_id)
def get_job(jobId):
result = {}
result['job'] = get_job_dao(jobId);
result['tasks'] = get_tasks_by_job(jobId);
return result
# def save_job(num_to_fetch, broad_crawler_provider, broad_crawler_sources, crawl_type, job_id, workspace_name, workspace_id):
def save_job(workspace_id, num_to_fetch, broad_crawler_provider, broad_crawler_sources, crawl_type):
job = {}
job["crawlType"] = crawl_type
job["nResultsRequested"] = num_to_fetch
job["provider"] = broad_crawler_provider
job["sources"] = broad_crawler_sources
job['timestamp'] = time.time()
job['strTimestamp'] = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
job["workspaceId"] = workspace_id
job["status"] = "QUEUED"
# _id = Singleton.getInstance().mongo_instance.get_crawl_job_collection().insert(job)
collection = Singleton.getInstance().mongo_instance.get_crawl_job_collection()
_id = collection.insert(job)
return str(_id)
def cancel_job(job_id):
collection = Singleton.getInstance().mongo_instance.get_crawl_job_collection()
operation = {'$set': {"status": "CANCELLED"}}
collection.update({"_id": ObjectId(job_id)}, operation)
################# DAO #################################
def get_jobs_by_workspace_dao(workspace_id):
docs = Singleton.getInstance().mongo_instance.get_crawl_job_collection()\
.find({'workspaceId': workspace_id})\
.sort('_id', pymongo.DESCENDING)
return list(docs)
def get_job_dao(job_id):
return Singleton.getInstance().mongo_instance.get_crawl_job_collection().find_one({'jobId': job_id})
def get_tasks_by_job(job_id):
return Singleton.getInstance().mongo_instance.get_crawl_task_collection().find_one({'jobId': job_id})
#
# def get_jobs_by_workspace(workspaceId):
# return Singleton.getInstance().mongo_instance.get_crawl_job_collection().find({'jobId': jobId})
| 32.77027
| 126
| 0.710103
|
17f2a87b42d4b9f9ea313269b225f980b8b068ed
| 6,099
|
py
|
Python
|
k8sclient/models/v1beta1_network_policy_ingress_rule.py
|
beli-sk/k8sclient
|
3a6102405ee1a65933e328298964f282329fb1f0
|
[
"Apache-2.0"
] | null | null | null |
k8sclient/models/v1beta1_network_policy_ingress_rule.py
|
beli-sk/k8sclient
|
3a6102405ee1a65933e328298964f282329fb1f0
|
[
"Apache-2.0"
] | null | null | null |
k8sclient/models/v1beta1_network_policy_ingress_rule.py
|
beli-sk/k8sclient
|
3a6102405ee1a65933e328298964f282329fb1f0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: extensions/v1beta1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1NetworkPolicyIngressRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, ports=None, _from=None):
"""
V1beta1NetworkPolicyIngressRule - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'ports': 'list[V1beta1NetworkPolicyPort]',
'_from': 'list[V1beta1NetworkPolicyPeer]'
}
self.attribute_map = {
'ports': 'ports',
'_from': 'from'
}
self._ports = ports
self.__from = _from
@property
def ports(self):
"""
Gets the ports of this V1beta1NetworkPolicyIngressRule.
List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.
:return: The ports of this V1beta1NetworkPolicyIngressRule.
:rtype: list[V1beta1NetworkPolicyPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this V1beta1NetworkPolicyIngressRule.
List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.
:param ports: The ports of this V1beta1NetworkPolicyIngressRule.
:type: list[V1beta1NetworkPolicyPort]
"""
self._ports = ports
@property
def _from(self):
"""
Gets the _from of this V1beta1NetworkPolicyIngressRule.
List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.
:return: The _from of this V1beta1NetworkPolicyIngressRule.
:rtype: list[V1beta1NetworkPolicyPeer]
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1beta1NetworkPolicyIngressRule.
List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.
:param _from: The _from of this V1beta1NetworkPolicyIngressRule.
:type: list[V1beta1NetworkPolicyPeer]
"""
self.__from = _from
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 40.125
| 466
| 0.644696
|
ba5c8938ee3f488402634de20f09ad381bb15ad9
| 5,648
|
py
|
Python
|
keras2onnx/ke2onnx/gru.py
|
ashaazami/keras-onnx
|
0e66937886c8256bdde366b9ac5dc67b68c9f56e
|
[
"MIT"
] | 1
|
2020-03-02T10:35:45.000Z
|
2020-03-02T10:35:45.000Z
|
keras2onnx/ke2onnx/gru.py
|
HOZHENWAI/keras-onnx
|
4a220193a3e2d6eb3bcb76dcf3be39a4b1f84f09
|
[
"MIT"
] | null | null | null |
keras2onnx/ke2onnx/gru.py
|
HOZHENWAI/keras-onnx
|
4a220193a3e2d6eb3bcb76dcf3be39a4b1f84f09
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import numpy as np
from ..proto import onnx_proto
from ..common.onnx_ops import apply_reshape, apply_transpose, OnnxOperatorBuilder
from .common import extract_recurrent_activation
def convert_keras_gru(scope, operator, container):
op = operator.raw_operator
hidden_size = op.units
input_shape = op.get_input_shape_at(0)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_size = input_shape[-1]
output_seq = op.return_sequences
output_state = op.return_state
reverse_input = op.go_backwards
attrs = {}
gru_input_names = []
gru_x_name = scope.get_unique_variable_name('gru_x')
apply_transpose(scope, operator.inputs[0].full_name, gru_x_name, container, perm=[1, 0, 2])
gru_input_names.append(gru_x_name)
tensor_w_name = scope.get_unique_variable_name('tensor_w')
W = op.get_weights()[0].T
container.add_initializer(tensor_w_name, onnx_proto.TensorProto.FLOAT,
[1, 3 * hidden_size, input_size], W.flatten())
gru_input_names.append(tensor_w_name)
tensor_r_name = scope.get_unique_variable_name('tensor_r')
R = op.get_weights()[1].T
container.add_initializer(tensor_r_name, onnx_proto.TensorProto.FLOAT,
[1, 3 * hidden_size, hidden_size], R.flatten())
gru_input_names.append(tensor_r_name)
B = op.get_weights()[2]
if op.use_bias and len(B) > 0:
tensor_b_name = scope.get_unique_variable_name('tensor_b')
if B.size == 3 * hidden_size:
B = np.concatenate([B, np.zeros(3 * hidden_size)])
container.add_initializer(tensor_b_name, onnx_proto.TensorProto.FLOAT, [1, 6 * hidden_size], B.flatten())
gru_input_names.append(tensor_b_name)
else:
gru_input_names.append('')
# sequence lens
uses_masking_layer = len(operator.input_masks) == 1
if uses_masking_layer:
# Mask using sequence_lens input
sequence_lengths = scope.get_unique_variable_name(operator.full_name + '_seq_lens')
gru_input_names.append(sequence_lengths)
else:
gru_input_names.append('')
# inital_h
if len(operator.inputs) == 1:
gru_input_names.append('')
else:
# Add a reshape after initial_h, 2d -> 3d
input_reshape_name = scope.get_unique_variable_name('input_reshape')
apply_reshape(scope, operator.inputs[1].full_name, input_reshape_name, container,
desired_shape=[1, -1, hidden_size])
gru_input_names.append(input_reshape_name)
activation_types = []
alphas = []
betas = []
for (activation_type, alpha, beta) in \
[extract_recurrent_activation(op.recurrent_activation), extract_recurrent_activation(op.activation)]:
activation_types.append(activation_type.encode('utf-8'))
if alpha is not None:
alphas.append(alpha)
if beta is not None:
betas.append(beta)
attrs['activations'] = activation_types
if alphas:
attrs['activation_alpha'] = alphas
if betas:
attrs['activation_beta'] = betas
# Set up other attributes
attrs['direction'] = 'reverse' if reverse_input else 'forward'
attrs['hidden_size'] = hidden_size
# We use the collected information to build ONNX's GRU. ONNX GRU's outputs will be saved onto two intermediate
# tensors and we will adjust them subsequently to mimic Keras output format.
gru_y_name = scope.get_unique_variable_name('gru_y')
gru_h_name = scope.get_unique_variable_name('gru_h')
gru_output_names = [gru_y_name, gru_h_name]
oopb = OnnxOperatorBuilder(container, scope)
if uses_masking_layer:
mask_cast = oopb.apply_cast(operator.input_masks[0].full_name, to=oopb.int32, name=operator.full_name + '_mask_cast')
oopb.add_node_with_output('ReduceSum', mask_cast, sequence_lengths, keepdims=False, axes=[-1], name=operator.full_name + '_mask_sum')
oopb.apply_op_with_output('apply_gru',
gru_input_names,
gru_output_names,
name=operator.raw_operator.name,
output_seq=output_seq,
reset_after=op.reset_after,
**attrs)
# Create output-adjusting operators
if output_seq:
intermediate_result_name = scope.get_unique_variable_name('intermediate_result')
perm = [1, 0, 2] if container.target_opset <= 5 else [2, 0, 1, 3]
apply_transpose(scope, gru_y_name, intermediate_result_name, container, perm=perm)
apply_reshape(scope, intermediate_result_name, operator.outputs[0].full_name, container,
desired_shape=[-1, 0, hidden_size])
else:
# Here we ignore ONNX GRU's first output because it's useless.
intermediate_result_name = scope.get_unique_variable_name('intermediate_result')
apply_transpose(scope, gru_h_name, intermediate_result_name, container, perm=[1, 0, 2])
apply_reshape(scope, intermediate_result_name, operator.outputs[0].full_name, container,
desired_shape=[-1, hidden_size])
if output_state:
apply_reshape(scope, gru_h_name, operator.outputs[1].full_name, container, desired_shape=[-1, hidden_size])
| 44.472441
| 141
| 0.660765
|
65baa1c9af8fe3bb32f56367df601178ce9df090
| 3,178
|
py
|
Python
|
m04_machine_learning/m04_c04_metrics_and_model_selection/model.py
|
mailo-barrientos/mat281_portfolio
|
5dbad4ae450d7ff558fdf6fc3a09b11215726b7d
|
[
"MIT"
] | 4
|
2019-04-10T02:35:39.000Z
|
2020-03-10T04:49:07.000Z
|
m04_machine_learning/m04_c04_metrics_and_model_selection/model.py
|
mailo-barrientos/mat281_portfolio
|
5dbad4ae450d7ff558fdf6fc3a09b11215726b7d
|
[
"MIT"
] | null | null | null |
m04_machine_learning/m04_c04_metrics_and_model_selection/model.py
|
mailo-barrientos/mat281_portfolio
|
5dbad4ae450d7ff558fdf6fc3a09b11215726b7d
|
[
"MIT"
] | 7
|
2019-10-01T19:57:14.000Z
|
2020-04-09T06:15:43.000Z
|
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
from scipy import random
secret_true_params = (5., np.pi/4.0, 0., 0.)
def my_model(x, a, b, c, d):
return a*np.cos(b*x+c) + d
def generate_data(N=100, true_params=secret_true_params,
seed = 42):
x = np.linspace(-2.5, 2.5, N)
y1 = my_model(x, *true_params)
y2 = 1.0 * random.normal(size=N)
# Create the data
data = np.array([x,y1+y2]).T
# Shuffle the data
permuted_data = random.permutation(data)
# Save the data
np.savetxt("dataN%d.txt"%N, data)
return data
def load_data(myfile):
data = np.loadtxt(myfile)
return data
def get_params(data):
# Use optimize to get A and B using the data
xdata = data[:,0]
ydata = data[:,1]
popt, pcov = optimize.curve_fit(my_model, xdata, ydata, maxfev=5000)
return popt
def get_error(model_params, data):
x_data = data[:,0]
y_data = data[:,1]
y_prediction = my_model(x_data, *model_params)
#error_1 = np.abs(y_data-y_prediction).sum() / len(y_data)
error_2 = np.sum((y_data-y_prediction)**2).sum()**0.5 / len(y_data)**0.5
return error_2
def plot(training_data, testing_data, training_params, all_data_params, true_params=secret_true_params):
fig = plt.figure(figsize=(16,8))
plt.plot(training_data[:,0], training_data[:,1], 'bs', label="training data", alpha=0.75, ms=10)
plt.plot(testing_data[:,0], testing_data[:,1], 'ro', label="testing data", alpha=0.75, ms=10)
data = np.vstack([training_data, testing_data])
x = np.array(sorted(data[:,0].copy()))
plt.plot(x, my_model(x, *true_params),
'k', label="true params", lw=2.0)
plt.plot(x, my_model(x, *training_params),
'b', label="training params", lw=2.0)
plt.plot(x, my_model(x, *all_data_params),
'g', label="all data params", lw=2.0)
xmin, xmax = x.min(), x.max()
plt.xlim([xmin-.2, xmax+0.2])
plt.legend(numpoints=1, loc="lower center")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
return
def full_report(training_data, testing_data, training_params, all_data_params):
data = np.vstack([training_data, testing_data])
print("The obtained model parameters for training dataset are:")
print("\t(a,b,c,d) = (%.3f, %.3f, %.3f, %.3f)" %tuple(training_params))
print("The obtained model parameters for the whole dataset are:")
print("\t(a,b,c,d) = (%.3f, %.3f, %.3f, %.3f)" %tuple(all_data_params))
print("The true model parameters are:")
print("\t(a,b,c,d) = (%.3f, %.3f, %.3f, %.3f)" %tuple(secret_true_params))
print("")
prediction_error = get_error(training_params, testing_data)
print("Conservative error estimation on testing dataset: %.2f" %prediction_error)
true_error = get_error(secret_true_params, testing_data)
print("Pure random error on testing dataset: %.2f" %true_error)
all_error = get_error(secret_true_params, data)
print("Pure random error on all data: %.2f" %all_error)
if __name__=="__main__":
generate_data(N=20)
generate_data(N=50)
generate_data(N=100)
generate_data(N=500)
generate_data(N=5000)
| 36.528736
| 104
| 0.652926
|
68c63b637d0482ed229b060a2d1252039139b1f4
| 2,108
|
py
|
Python
|
tests/test_component_recorder.py
|
mikiec84/home-assistant
|
d9e3c02df3a2690e74d1b606e8db0a4dd686e872
|
[
"MIT"
] | null | null | null |
tests/test_component_recorder.py
|
mikiec84/home-assistant
|
d9e3c02df3a2690e74d1b606e8db0a4dd686e872
|
[
"MIT"
] | null | null | null |
tests/test_component_recorder.py
|
mikiec84/home-assistant
|
d9e3c02df3a2690e74d1b606e8db0a4dd686e872
|
[
"MIT"
] | null | null | null |
"""
tests.test_component_recorder
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests Recorder component.
"""
# pylint: disable=too-many-public-methods,protected-access
import unittest
import os
from homeassistant.const import MATCH_ALL
from homeassistant.components import recorder
from helpers import get_test_home_assistant
class TestRecorder(unittest.TestCase):
""" Test the chromecast module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant()
recorder.setup(self.hass, {})
self.hass.start()
recorder._INSTANCE.block_till_done()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
recorder._INSTANCE.block_till_done()
os.remove(self.hass.config.path(recorder.DB_FILE))
def test_saving_state(self):
""" Tests saving and restoring a state. """
entity_id = 'test.recorder'
state = 'restoring_from_db'
attributes = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.states.set(entity_id, state, attributes)
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
states = recorder.query_states('SELECT * FROM states')
self.assertEqual(1, len(states))
self.assertEqual(self.hass.states.get(entity_id), states[0])
def test_saving_event(self):
""" Tests saving and restoring an event. """
event_type = 'EVENT_TEST'
event_data = {'test_attr': 5, 'test_attr_10': 'nice'}
events = []
def event_listener(event):
""" Records events from eventbus. """
if event.event_type == event_type:
events.append(event)
self.hass.bus.listen(MATCH_ALL, event_listener)
self.hass.bus.fire(event_type, event_data)
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
db_events = recorder.query_events(
'SELECT * FROM events WHERE event_type = ?', (event_type, ))
self.assertEqual(events, db_events)
| 29.690141
| 72
| 0.645636
|
1513358aac1ec40cf1fb3f0ef43e3ced6a520d82
| 295
|
py
|
Python
|
scraping/scrapy_project/pipelines.py
|
ManasUniyal/Friday
|
6e1ff6541cca98f073e3fd07218b22da165a613b
|
[
"MIT"
] | 1
|
2021-03-01T11:25:32.000Z
|
2021-03-01T11:25:32.000Z
|
scraping/scrapy_project/pipelines.py
|
ManasUniyal/Friday
|
6e1ff6541cca98f073e3fd07218b22da165a613b
|
[
"MIT"
] | null | null | null |
scraping/scrapy_project/pipelines.py
|
ManasUniyal/Friday
|
6e1ff6541cca98f073e3fd07218b22da165a613b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class ScrapyProjectPipeline(object):
def process_item(self, item, spider):
return item
| 24.583333
| 66
| 0.718644
|
41c4448eee800aa83233d319c64f98f7602c68ba
| 18,967
|
py
|
Python
|
namespace.py
|
maxnilz/ontology-visualization
|
528deb736f0c8d9e96b693f3acd5aedff41b74dd
|
[
"MIT"
] | null | null | null |
namespace.py
|
maxnilz/ontology-visualization
|
528deb736f0c8d9e96b693f3acd5aedff41b74dd
|
[
"MIT"
] | null | null | null |
namespace.py
|
maxnilz/ontology-visualization
|
528deb736f0c8d9e96b693f3acd5aedff41b74dd
|
[
"MIT"
] | null | null | null |
import os
from unicodedata import category
from urllib.parse import urljoin, urldefrag
from urllib.request import pathname2url
from rdflib.term import URIRef, Variable, _XSD_PFX, _is_valid_uri
__all__ = [
'is_ncname', 'split_uri', 'Namespace',
'ClosedNamespace', 'NamespaceManager',
'XMLNS', 'RDF', 'RDFS', 'XSD', 'OWL',
'SKOS', 'DOAP', 'FOAF', 'DC', 'DCTERMS', 'VOID']
class Namespace(str):
def __new__(cls, value):
try:
rt = str.__new__(cls, value)
except UnicodeDecodeError:
rt = str.__new__(cls, value, 'utf-8')
return rt
@property
def title(self):
return URIRef(self + 'title')
def term(self, name):
# need to handle slices explicitly because of __getitem__ override
return URIRef(self + (name if isinstance(name, str) else ''))
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
return self.term(name)
def __repr__(self):
return "Namespace(%r)" % str.__repr__(self)
class URIPattern(str):
__doc__ = """
Utility class for creating URIs according to some pattern
This supports either new style formatting with .format
or old-style with % operator
>>> u=URIPattern("http://example.org/%s/%d/resource")
>>> u%('books', 12345)
rdflib.term.URIRef(u'http://example.org/books/12345/resource')
"""
def __new__(cls, value):
try:
rt = str.__new__(cls, value)
except UnicodeDecodeError:
rt = str.__new__(cls, value, 'utf-8')
return rt
def __mod__(self, *args, **kwargs):
return URIRef(str(self).__mod__(*args, **kwargs))
def format(self, *args, **kwargs):
return URIRef(str.format(self, *args, **kwargs))
def __repr__(self):
return "URIPattern(%r)" % str.__repr__(self)
class ClosedNamespace(object):
"""
A namespace with a closed list of members
Trying to create terms not listen is an error
"""
def __init__(self, uri, terms):
self.uri = uri
self.__uris = {}
for t in terms:
self.__uris[t] = URIRef(self.uri + t)
def term(self, name):
uri = self.__uris.get(name)
if uri is None:
raise AttributeError(
"term '{}' not in namespace '{}'".format(name, self.uri)
)
else:
return uri
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
return self.term(name)
def __str__(self):
return str(self.uri)
def __repr__(self):
return "rdf.namespace.ClosedNamespace(%r)" % str(self.uri)
class _RDFNamespace(ClosedNamespace):
"""
Closed namespace for RDF terms
"""
def __init__(self):
super(_RDFNamespace, self).__init__(
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
terms=[
# Syntax Names
"RDF", "Description", "ID", "about", "parseType",
"resource", "li", "nodeID", "datatype",
# RDF Classes
"Seq", "Bag", "Alt", "Statement", "Property",
"List", "PlainLiteral",
# RDF Properties
"subject", "predicate", "object", "type",
"value", "first", "rest",
# and _n where n is a non-negative integer
# RDF Resources
"nil",
# Added in RDF 1.1
"XMLLiteral", "HTML", "langString"]
)
def term(self, name):
try:
i = int(name)
return URIRef("%s_%s" % (self.uri, i))
except ValueError:
return super(_RDFNamespace, self).term(name)
RDF = _RDFNamespace()
RDFS = ClosedNamespace(
uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
terms=[
"Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label",
"domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container",
"ContainerMembershipProperty", "member", "Datatype"]
)
OWL = Namespace('http://www.w3.org/2002/07/owl#')
XSD = Namespace(_XSD_PFX)
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
DOAP = Namespace('http://usefulinc.com/ns/doap#')
FOAF = Namespace('http://xmlns.com/foaf/0.1/')
DC = Namespace('http://purl.org/dc/elements/1.1/')
DCTERMS = Namespace('http://purl.org/dc/terms/')
VOID = Namespace('http://rdfs.org/ns/void#')
class NamespaceManager(object):
"""
Class for managing prefix => namespace mappings
Sample usage from FuXi ...
.. code-block:: python
ruleStore = N3RuleStore(additionalBuiltins=additionalBuiltins)
nsMgr = NamespaceManager(Graph(ruleStore))
ruleGraph = Graph(ruleStore,namespace_manager=nsMgr)
and ...
.. code-block:: pycon
>>> import rdflib
>>> from rdflib import Graph
>>> from rdflib.namespace import Namespace, NamespaceManager
>>> exNs = Namespace('http://example.com/')
>>> namespace_manager = NamespaceManager(Graph())
>>> namespace_manager.bind('ex', exNs, override=False)
>>> g = Graph()
>>> g.namespace_manager = namespace_manager
>>> all_ns = [n for n in g.namespace_manager.namespaces()]
>>> assert ('ex', rdflib.term.URIRef('http://example.com/')) in all_ns
>>>
"""
def __init__(self, graph, ex_ns):
self.graph = graph
self.__cache = {}
self.__cache_strict = {}
self.__log = None
self.__strie = {}
self.__trie = {}
for p, n in self.namespaces(): # self.bind is not always called
insert_trie(self.__trie, str(n))
self.bind("xml", "http://www.w3.org/XML/1998/namespace")
self.bind("rdf", RDF)
self.bind("rdfs", RDFS)
self.bind("xsd", XSD)
for (label, ns) in ex_ns.items():
self.bind(label, Namespace(ns))
def reset(self):
self.__cache = {}
self.__strie = {}
self.__trie = {}
for p, n in self.namespaces(): # repopulate the trie
insert_trie(self.__trie, str(n))
def __get_store(self):
return self.graph.store
store = property(__get_store)
def qname(self, uri):
prefix, namespace, name = self.compute_qname(uri)
if prefix == "":
return name
else:
return ":".join((prefix, name))
def qname_strict(self, uri):
prefix, namespace, name = self.compute_qname_strict(uri)
if prefix == '':
return name
else:
return ':'.join((prefix, name))
def normalizeUri(self, rdfTerm):
"""
Takes an RDF Term and 'normalizes' it into a QName (using the
registered prefix) or (unlike compute_qname) the Notation 3
form for URIs: <...URI...>
"""
try:
namespace, name = split_uri(rdfTerm)
if namespace not in self.__strie:
insert_strie(self.__strie, self.__trie, str(namespace))
namespace = URIRef(str(namespace))
except:
if isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
else:
return "<%s>" % rdfTerm
prefix = self.store.prefix(namespace)
if prefix is None and isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
elif prefix is None:
return "<%s>" % rdfTerm
else:
qNameParts = self.compute_qname(rdfTerm)
return ':'.join([qNameParts[0], qNameParts[-1]])
def compute_qname(self, uri, generate=True):
if not _is_valid_uri(uri):
raise ValueError(
'"{}" does not look like a valid URI, cannot serialize this. Did you want to urlencode it?'.format(uri)
)
if uri not in self.__cache:
try:
namespace, name = split_uri(uri)
except ValueError as e:
namespace = URIRef(uri)
prefix = self.store.prefix(namespace)
if not prefix:
raise e
if namespace not in self.__strie:
insert_strie(self.__strie, self.__trie, namespace)
if self.__strie[namespace]:
pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
if pl_namespace is not None:
namespace = pl_namespace
name = uri[len(namespace):]
namespace = URIRef(namespace)
prefix = self.store.prefix(namespace) # warning multiple prefixes problem
if prefix is None:
if not generate:
raise KeyError(
"No known prefix for {} and generate=False".format(namespace)
)
num = 1
while 1:
prefix = "ns%s" % num
if not self.store.namespace(prefix):
break
num += 1
self.bind(prefix, namespace)
self.__cache[uri] = (prefix, namespace, name)
return self.__cache[uri]
def compute_qname_strict(self, uri, generate=True):
# code repeated to avoid branching on strict every time
# if output needs to be strict (e.g. for xml) then
# only the strict output should bear the overhead
prefix, namespace, name = self.compute_qname(uri)
if is_ncname(str(name)):
return prefix, namespace, name
else:
if uri not in self.__cache_strict:
try:
namespace, name = split_uri(uri, NAME_START_CATEGORIES)
except ValueError as e:
message = ('This graph cannot be serialized to a strict format '
'because there is no valid way to shorten {uri}'.format(uri))
raise ValueError(message)
# omitted for strict since NCNames cannot be empty
# namespace = URIRef(uri)
# prefix = self.store.prefix(namespace)
# if not prefix:
# raise e
if namespace not in self.__strie:
insert_strie(self.__strie, self.__trie, namespace)
# omitted for strict
# if self.__strie[namespace]:
# pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
# if pl_namespace is not None:
# namespace = pl_namespace
# name = uri[len(namespace):]
namespace = URIRef(namespace)
prefix = self.store.prefix(namespace) # warning multiple prefixes problem
if prefix is None:
if not generate:
raise KeyError(
"No known prefix for {} and generate=False".format(namespace)
)
num = 1
while 1:
prefix = "ns%s" % num
if not self.store.namespace(prefix):
break
num += 1
self.bind(prefix, namespace)
self.__cache_strict[uri] = (prefix, namespace, name)
return self.__cache_strict[uri]
def bind(self, prefix, namespace, override=True, replace=False):
"""bind a given namespace to the prefix
if override, rebind, even if the given namespace is already
bound to another prefix.
if replace, replace any existing prefix with the new namespace
"""
namespace = URIRef(str(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
prefix = ''
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI
# and if so convert it into a URIRef for comparison
# This is to prevent duplicate namespaces with the
# same URI
if bound_namespace:
bound_namespace = URIRef(bound_namespace)
if bound_namespace and bound_namespace != namespace:
if replace:
self.store.bind(prefix, namespace)
insert_trie(self.__trie, str(namespace))
return
# prefix already in use for different namespace
#
# append number to end of prefix until we find one
# that's not in use.
if not prefix:
prefix = "default"
num = 1
while 1:
new_prefix = "%s%s" % (prefix, num)
tnamespace = self.store.namespace(new_prefix)
if tnamespace and namespace == URIRef(tnamespace):
# the prefix is already bound to the correct
# namespace
return
if not self.store.namespace(new_prefix):
break
num += 1
self.store.bind(new_prefix, namespace)
else:
bound_prefix = self.store.prefix(namespace)
if bound_prefix is None:
self.store.bind(prefix, namespace)
elif bound_prefix == prefix:
pass # already bound
else:
if override or bound_prefix.startswith("_"): # or a generated prefix
self.store.bind(prefix, namespace)
insert_trie(self.__trie, str(namespace))
def namespaces(self):
for prefix, namespace in self.store.namespaces():
namespace = URIRef(namespace)
yield prefix, namespace
def absolutize(self, uri, defrag=1):
base = urljoin("file:", pathname2url(os.getcwd()))
result = urljoin("%s/" % base, uri, allow_fragments=not defrag)
if defrag:
result = urldefrag(result)[0]
if not defrag:
if uri and uri[-1] == "#" and result[-1] != "#":
result = "%s#" % result
return URIRef(result)
def namespaceof(self, uri):
result = self.absolutize(uri)
if result[-1] != "#":
result = "%s#" % result
return Namespace(result)
# From: http://www.w3.org/TR/REC-xml#NT-CombiningChar
#
# * Name start characters must have one of the categories Ll, Lu, Lo,
# Lt, Nl.
#
# * Name characters other than Name-start characters must have one of
# the categories Mc, Me, Mn, Lm, or Nd.
#
# * Characters in the compatibility area (i.e. with character code
# greater than #xF900 and less than #xFFFE) are not allowed in XML
# names.
#
# * Characters which have a font or compatibility decomposition
# (i.e. those with a "compatibility formatting tag" in field 5 of the
# database -- marked by field 5 beginning with a "<") are not allowed.
#
# * The following characters are treated as name-start characters rather
# than name characters, because the property file classifies them as
# Alphabetic: [#x02BB-#x02C1], #x0559, #x06E5, #x06E6.
#
# * Characters #x20DD-#x20E0 are excluded (in accordance with Unicode
# 2.0, section 5.14).
#
# * Character #x00B7 is classified as an extender, because the property
# list so identifies it.
#
# * Character #x0387 is added as a name character, because #x00B7 is its
# canonical equivalent.
#
# * Characters ':' and '_' are allowed as name-start characters.
#
# * Characters '-' and '.' are allowed as name characters.
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
SPLIT_START_CATEGORIES = NAME_START_CATEGORIES + ['Nd']
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = ["\u00B7", "\u0387", "-", ".", "_", ":"]
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# [4] NCName ::= (Letter | '_') (NCNameChar)* /* An XML Name, minus
# the ":" */
# [5] NCNameChar ::= Letter | Digit | '.' | '-' | '_' | CombiningChar
# | Extender
def is_ncname(name):
if name:
first = name[0]
if first == "_" or category(first) in NAME_START_CATEGORIES:
for i in range(1, len(name)):
c = name[i]
if not category(c) in NAME_CATEGORIES:
if c != ':' and c in ALLOWED_NAME_CHARS:
continue
return 0
# if in compatibility area
# if decomposition(c)!='':
# return 0
return 1
return 0
XMLNS = "http://www.w3.org/XML/1998/namespace"
def split_uri(uri, split_start=SPLIT_START_CATEGORIES):
if uri.startswith(XMLNS):
return (XMLNS, uri.split(XMLNS)[1])
length = len(uri)
for i in range(0, length):
c = uri[-i - 1]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
for j in range(-1 - i, length):
if category(uri[j]) in split_start or uri[j] == "_":
# _ prevents early split, roundtrip not generate
ns = uri[:j]
if not ns:
break
ln = uri[j:]
return (ns, ln)
break
raise ValueError("Can't split '{}'".format(uri))
def insert_trie(trie, value): # aka get_subtrie_or_insert
""" Insert a value into the trie if it is not already contained in the trie.
Return the subtree for the value regardless of whether it is a new value
or not. """
if value in trie:
return trie[value]
multi_check = False
for key in tuple(trie.keys()):
if len(value) > len(key) and value.startswith(key):
return insert_trie(trie[key], value)
elif key.startswith(value): # we know the value is not in the trie
if not multi_check:
trie[value] = {}
multi_check = True # there can be multiple longer existing prefixes
dict_ = trie.pop(key) # does not break strie since key<->dict_ remains unchanged
trie[value][key] = dict_
if value not in trie:
trie[value] = {}
return trie[value]
def insert_strie(strie, trie, value):
if value not in strie:
strie[value] = insert_trie(trie, value)
def get_longest_namespace(trie, value):
for key in trie:
if value.startswith(key):
out = get_longest_namespace(trie[key], value)
if out is None:
return key
else:
return out
return None
| 33.569912
| 119
| 0.557495
|
1d053a56edc62ddef5875ed2c4b76b0011c2841d
| 3,900
|
py
|
Python
|
test/selenium_tests/test_workflow_editor.py
|
openminted/galaxy
|
bd2147cccf814f8cec93372973b5e6ada4668a80
|
[
"CC-BY-3.0"
] | 1
|
2021-04-26T08:46:21.000Z
|
2021-04-26T08:46:21.000Z
|
test/selenium_tests/test_workflow_editor.py
|
openminted/galaxy
|
bd2147cccf814f8cec93372973b5e6ada4668a80
|
[
"CC-BY-3.0"
] | null | null | null |
test/selenium_tests/test_workflow_editor.py
|
openminted/galaxy
|
bd2147cccf814f8cec93372973b5e6ada4668a80
|
[
"CC-BY-3.0"
] | 1
|
2018-12-09T13:50:28.000Z
|
2018-12-09T13:50:28.000Z
|
from ._workflow_fixtures import (
WORKFLOW_SIMPLE_CAT_TWICE,
WORKFLOW_WITH_INVALID_STATE,
WORKFLOW_WITH_OLD_TOOL_VERSION,
)
from .framework import (
retry_assertion_during_transitions,
selenium_test,
SeleniumTestCase
)
class WorkflowEditorTestCase(SeleniumTestCase):
ensure_registered = True
@selenium_test
def test_build_workflow(self):
name = "test_edit_nam"
self.workflow_create_new(name=name)
element = self.wait_for_selector("#edit-attributes #workflow-name")
assert name in element.text, element.text
@selenium_test
def test_data_input(self):
self.workflow_create_new()
menu = self.wait_for_selector(".toolMenu")
self.sleep_for(self.wait_types.UX_RENDER)
inputs_section = menu.find_element_by_css_selector("#title___workflow__inputs__")
inputs_link = inputs_section.find_element_by_css_selector("a span")
inputs_link.click()
self.wait_for_selector_visible("#__workflow__inputs__ .toolTitle")
input_links = self.driver.find_elements_by_css_selector("#__workflow__inputs__ .toolTitle a")
input_links[0].click()
# TODO: verify box is highlighted and side panel is a form describing input.
# More work needs to be done to develop testing abstractions for doing these things.
@selenium_test
def test_save_as(self):
workflow_populator = self.workflow_populator
workflow_populator.upload_yaml_workflow(WORKFLOW_SIMPLE_CAT_TWICE)
self.workflow_index_open()
self.workflow_index_click_option("Edit")
self.sleep_for(self.wait_types.UX_RENDER)
self.workflow_editor_click_option("Save As")
@selenium_test
def test_editor_tool_upgrade_message(self):
workflow_populator = self.workflow_populator
workflow_populator.upload_yaml_workflow(WORKFLOW_WITH_OLD_TOOL_VERSION, exact_tools=True)
self.workflow_index_open()
self.workflow_index_click_option("Edit")
self.assert_modal_has_text("Using version '0.2' instead of version '0.0.1'")
@selenium_test
def test_editor_invalid_tool_state(self):
workflow_populator = self.workflow_populator
workflow_populator.upload_yaml_workflow(WORKFLOW_WITH_INVALID_STATE, exact_tools=True)
self.workflow_index_open()
self.workflow_index_click_option("Edit")
self.assert_modal_has_text("Using version '0.2' instead of version '0.0.1'")
self.assert_modal_has_text("Using default: '1'")
@selenium_test
def test_missing_tools(self):
workflow_populator = self.workflow_populator
workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
inputs:
- id: input1
steps:
- tool_id: missing
label: first_cat
state:
foo: bar
""")
self.workflow_index_open()
self.workflow_index_click_option("Edit")
self.assert_modal_has_text("Tool is not installed")
def workflow_create_new(self, name=None, annotation=None):
self.workflow_index_open()
self.click_button_new_workflow()
form_element = self.driver.find_element_by_css_selector("#center form")
action = form_element.get_attribute("action")
assert action.endswith("/workflow/create"), action
name = name or self._get_random_name()
annotation = annotation or self._get_random_name()
self.fill(form_element, {
'workflow_name': name,
'workflow_annotation': annotation,
})
self.click_submit(form_element)
@retry_assertion_during_transitions
def assert_modal_has_text(self, expected_text):
modal_element = self.wait_for_selector_visible(self.modal_body_selector())
text = modal_element.text
assert expected_text in text, "Failed to find expected text [%s] in modal text [%s]" % (expected_text, text)
| 38.235294
| 116
| 0.719487
|
1e3d0d8aaeebd8d3d2d70c2c8f70986f7e273358
| 7,572
|
py
|
Python
|
cone_finder/scripts/hsv_filt.py
|
mattp256/wheele
|
724e2df031017051085000ac49849e4bb03b69cb
|
[
"BSD-3-Clause"
] | 2
|
2018-01-28T00:48:42.000Z
|
2019-04-08T00:24:11.000Z
|
cone_finder/scripts/hsv_filt.py
|
mattp256/wheele
|
724e2df031017051085000ac49849e4bb03b69cb
|
[
"BSD-3-Clause"
] | 27
|
2018-08-03T01:49:19.000Z
|
2022-01-05T19:22:46.000Z
|
cone_finder/scripts/hsv_filt.py
|
mattp256/wheele
|
724e2df031017051085000ac49849e4bb03b69cb
|
[
"BSD-3-Clause"
] | 3
|
2018-04-20T02:55:10.000Z
|
2020-11-24T05:57:13.000Z
|
#DO NOT OR FILTS TOGETHER
#INSTEAD, JUST FIND CONTOURS ON EACH SEPARATE FILT
#HOW TO AUTOMATICALLY FIND THE BACKGROUND COLORS OF AN IMAGE?
# 1. Given a range of colors defined like below (MIN and MAX for inRange)
# Step through each color, filt img, close/open, find contours
# If a large contour exists, assume it is a background color
# Watch out for being close up to walls, etc.
#!/usr/bin/env python
import cv2
import imutils
import numpy as np
def nothing(x):
pass
TREE_MIN = np.array([0, 0, 0],np.uint8)
TREE_MAX = np.array([20, 255, 255],np.uint8)
rect_w = 5;
rect_h = 50;
noise_se_w = 5;
noise_se_h = 5;
fill_se_w = 5;
fill_se_h = 5;
#Probably faster to just use channel 0 of hsv image
#and apply simple threshold, rather than inRange
#To omit ground/gravel/grass area, look for a uniform speckled distribution
#created by a hue filter with a range of 1-3 (hopefully just 1)
#Check binary results of hue = 8, hue = 9,... hue = 20
#If such a hue filter gives a uniform speckle in lower half of image,
#assume the specled area is not an object
#if a black object sticks out against the speckled, it is a candidate.
# read the frames
#_,frame = cap.read()
cv2.namedWindow('control', cv2.WINDOW_NORMAL)
cv2.namedWindow('control2', cv2.WINDOW_NORMAL)
cv2.createTrackbar('Hue Min','control', 0,255,nothing)
cv2.createTrackbar('Hue Max','control',12,255,nothing)
cv2.createTrackbar('Sat Min','control',75,255,nothing)
cv2.createTrackbar('Sat Max','control',230,255,nothing)
cv2.createTrackbar('Val Min','control',105,255,nothing)
cv2.createTrackbar('Val Max','control',255,255,nothing)
cv2.createTrackbar('Noise SE Width','control2',3,99,nothing)
cv2.createTrackbar('Noise SE Height','control2',7,99,nothing)
cv2.createTrackbar('Fill SE Width','control2',3,99,nothing)
cv2.createTrackbar('Fill SE Height','control2',10,99,nothing)
cv2.createTrackbar('Rect Width','control2',0,99,nothing)
cv2.createTrackbar('Rect Height','control2',0,99,nothing)
def find_marker(image):
# convert the image to grayscale, blur it, and detect edges
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
cv2.imshow('edge',edged)
# find the contours in the edged image and keep the largest one;
# we'll assume that this is our piece of paper in the image
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key = cv2.contourArea)
# compute the bounding box of the of the paper region and return it
return cv2.minAreaRect(c)
#img_num = [60,81,94,100,144,158,194,999]
img_num = range(0,187)
k = 0
while k < len(img_num):
if(k<0):k=0
best_cnt = np.array([0])
#img_name = 'sync_photos91/image'+str(img_num[k])+'.jpg'
#img_name = 'testB_'+str(img_num[k])+'.jpg'
img_name = "/home/karl/wheele_misc/cone_run4_pics/frame{:04d}.jpg".format(k)
orig = cv2.imread(img_name)
rows,cols,nc = orig.shape
#roi = orig[60:rows,0:cols]
#orig = roi
hsv = cv2.cvtColor(orig,cv2.COLOR_BGR2HSV)
while(1):
orig = cv2.imread(img_name)
tree_filt = cv2.inRange(hsv, TREE_MIN, TREE_MAX)
cv2.imshow('tree_filt',tree_filt)
gray = cv2.cvtColor(orig.copy(), cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (25, 25), 0)
adapt = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,15,0)
#adapt = cv2.bitwise_not(adapt)
cv2.imshow('adapt',adapt)
rect_w = rect_w +(rect_w==0)
rect_h = rect_h +(rect_h==0)
noise_se_w = noise_se_w +(noise_se_w==0)
noise_se_h = noise_se_h +(noise_se_h==0)
fill_se_w = fill_se_w +(fill_se_w==0)
fill_se_h = fill_se_h +(fill_se_h==0)
#Open binary image
rect_se = cv2.getStructuringElement(cv2.MORPH_RECT,(rect_w,rect_h))
noise_se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(noise_se_w,noise_se_h))
fill_se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(fill_se_w,fill_se_h))
#erosion then dilation, removes noise in background
opening = cv2.morphologyEx(tree_filt,cv2.MORPH_OPEN,noise_se)
cv2.imshow('opn_e',opening)
#4.Closes the Thresholded Image
#dilation then erosion, fills holes in foreground
closing = cv2.morphologyEx(opening,cv2.MORPH_CLOSE, fill_se)
cv2.imshow('cls_e',closing)
open2 = cv2.morphologyEx(closing,cv2.MORPH_OPEN, rect_se)
cv2.imshow('opn_r',open2)
#thresh = cv2.Canny(frame,100,200)
#thresh2 = thresh.copy()
_, contours, hierarchy = cv2.findContours(open2,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #python 2 vs 3
# finding contour with maximum area and store it as best_cnt
max_area = 0
for cnt in contours:
area = cv2.contourArea(cnt)
pts = cnt[:,0]
x = pts[:,0]
y = pts[:,1]
cnt_height = max(y)-min(y)
cnt_width = max(x)-min(x)
#Longest Distance between 2 points/area
if area > max_area and cnt_height/cnt_width > 0.5:# and cnt_height < 40 and cnt_width < 30:
max_area = area
best_cnt = cnt
#EDGE DETECTION FOR FINDING CONE
marker = find_marker(orig)
# draw a bounding box around the image and display it
box = cv2.cv.BoxPoints(marker) if imutils.is_cv2() else cv2.boxPoints(marker)
box = np.int0(box)
cv2.drawContours(orig, [box], -1, (0, 255, 0), 2)
#END EDGE DETECTION APPROACH
# finding centroids of best_cnt and draw a circle there
if(best_cnt.ndim == 3):
M = cv2.moments(best_cnt)
cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
cv2.circle(orig,(cx,cy),5,255,-1)
vis1 = np.concatenate((orig, hsv), axis=0)
vis2 = np.concatenate((tree_filt,open2), axis=0)
vis3 = cv2.cvtColor(vis2,cv2.COLOR_GRAY2RGB)
vis = np.concatenate((vis1,vis3),axis=1)
cv2.imshow('vis',vis)
cv2.imshow('orig',orig)
hue_min = cv2.getTrackbarPos('Hue Min','control')
hue_max = cv2.getTrackbarPos('Hue Max','control')
sat_min = cv2.getTrackbarPos('Sat Min','control')
sat_max = cv2.getTrackbarPos('Sat Max','control')
val_min = cv2.getTrackbarPos('Val Min','control')
val_max = cv2.getTrackbarPos('Val Max','control')
noise_se_w = cv2.getTrackbarPos('Noise SE Width','control2')
noise_se_h = cv2.getTrackbarPos('Noise SE Height','control2')
fill_se_w = cv2.getTrackbarPos('Fill SE Width','control2')
fill_se_h = cv2.getTrackbarPos('Fill SE Height','control2')
rect_w = cv2.getTrackbarPos('Rect Width','control2')
rect_h = cv2.getTrackbarPos('Rect Height','control2')
TREE_MIN = np.array([hue_min, sat_min, val_min],np.uint8)
TREE_MAX = np.array([hue_max, sat_max, val_max],np.uint8)
key = cv2.waitKey(33)
if key == ord('n'):
k = k+1
break
elif key == ord('p'):
k = k-1
break
elif key == 27:
k = len(img_num)
break
# Clean up everything before leaving
cv2.destroyAllWindows()
#cap.release()
| 41.604396
| 111
| 0.634575
|
faab26d1e63990899ba5f5445b8e0acf25f7051b
| 2,418
|
py
|
Python
|
migrations/versions/28c099a8784c_tabela_categoria.py
|
leticiarosemberg/EcoOng
|
15c2ec1eabeeb12f3c7491eb22c2663fd10229ed
|
[
"MIT"
] | 1
|
2022-01-11T20:20:48.000Z
|
2022-01-11T20:20:48.000Z
|
migrations/versions/28c099a8784c_tabela_categoria.py
|
leticiarosemberg/EcoOng
|
15c2ec1eabeeb12f3c7491eb22c2663fd10229ed
|
[
"MIT"
] | 1
|
2021-12-16T03:51:01.000Z
|
2021-12-16T03:51:01.000Z
|
migrations/versions/28c099a8784c_tabela_categoria.py
|
leticiarosemberg/EcoOng
|
15c2ec1eabeeb12f3c7491eb22c2663fd10229ed
|
[
"MIT"
] | 1
|
2022-02-19T19:11:11.000Z
|
2022-02-19T19:11:11.000Z
|
"""Tabela Categoria
Revision ID: 28c099a8784c
Revises: ceb81cd11f40
Create Date: 2021-12-07 11:26:45.354260
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '28c099a8784c'
down_revision = 'ceb81cd11f40'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categoria',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('categoria', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_categoria')),
sa.UniqueConstraint('categoria', name=op.f('uq_categoria_categoria'))
)
op.create_table('noticiatag',
sa.Column('noticia_id', sa.Integer(), nullable=False),
sa.Column('tag_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['noticia_id'], ['noticia.id'], name=op.f('fk_noticiatag_noticia_id_noticia')),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name=op.f('fk_noticiatag_tag_id_tag')),
sa.PrimaryKeyConstraint('noticia_id', 'tag_id', name=op.f('pk_noticiatag'))
)
op.drop_table('noticatag')
with op.batch_alter_table('noticia', schema=None) as batch_op:
batch_op.add_column(sa.Column('categoria_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key(batch_op.f('fk_noticia_categoria_id_categoria'), 'categoria', ['categoria_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('noticia', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_noticia_categoria_id_categoria'), type_='foreignkey')
batch_op.drop_column('categoria_id')
op.create_table('noticatag',
sa.Column('noticia_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
sa.Column('tag_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['noticia_id'], ['noticia.id'], name='fk_noticatag_noticia_id_noticia'),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], name='fk_noticatag_tag_id_tag'),
sa.PrimaryKeyConstraint('noticia_id', 'tag_id'),
mysql_default_charset='utf8',
mysql_engine='InnoDB'
)
op.drop_table('noticiatag')
op.drop_table('categoria')
# ### end Alembic commands ###
| 40.3
| 123
| 0.712572
|
82fccec883e17979401535fae88b7dd01e0759f1
| 7,953
|
py
|
Python
|
main_DQN.py
|
robintyh1/Variational-DQN
|
75974131a9b3427ab6798c47bcd276a2ac15bd33
|
[
"MIT"
] | 9
|
2017-12-01T17:20:30.000Z
|
2020-05-18T08:13:30.000Z
|
main_DQN.py
|
robintyh1/Variational-DQN
|
75974131a9b3427ab6798c47bcd276a2ac15bd33
|
[
"MIT"
] | 1
|
2020-01-02T12:22:40.000Z
|
2020-01-02T12:22:40.000Z
|
main_DQN.py
|
robintyh1/Variational-DQN
|
75974131a9b3427ab6798c47bcd276a2ac15bd33
|
[
"MIT"
] | 3
|
2018-12-26T20:57:36.000Z
|
2020-01-06T21:45:54.000Z
|
"""
Implement vanilla DQN in Chainer
code adapted from Chainer tutorial
"""
from __future__ import print_function
from __future__ import division
import argparse
import collections
import copy
import random
import gym
import numpy as np
import os
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
# import HardMDP
class QFunction(chainer.Chain):
"""Q-function represented by a MLP."""
def __init__(self, obs_size, n_actions, n_units=100):
super(QFunction, self).__init__()
with self.init_scope():
self.l0 = L.Linear(obs_size, n_units)
self.l1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, n_actions)
def __call__(self, x):
"""Compute Q-values of actions for given observations."""
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return self.l2(h)
def get_greedy_action(Q, obs):
"""Get a greedy action wrt a given Q-function."""
obs = Q.xp.asarray(obs[None], dtype=np.float32)
with chainer.no_backprop_mode():
q = Q(obs).data[0]
return int(q.argmax())
def mean_clipped_loss(y, t):
return F.mean(F.huber_loss(y, t, delta=1.0, reduce='no'))
def update(Q, target_Q, opt, samples, gamma=0.99, target_type='double_dqn'):
"""Update a Q-function with given samples and a target Q-function."""
xp = Q.xp
obs = xp.asarray([sample[0] for sample in samples], dtype=np.float32)
action = xp.asarray([sample[1] for sample in samples], dtype=np.int32)
reward = xp.asarray([sample[2] for sample in samples], dtype=np.float32)
done = xp.asarray([sample[3] for sample in samples], dtype=np.float32)
obs_next = xp.asarray([sample[4] for sample in samples], dtype=np.float32)
# Predicted values: Q(s,a)
y = F.select_item(Q(obs), action)
# Target values: r + gamma * max_b Q(s',b)
with chainer.no_backprop_mode():
if target_type == 'dqn':
next_q = F.max(target_Q(obs_next), axis=1)
elif target_type == 'double_dqn':
next_q = F.select_item(target_Q(obs_next),
F.argmax(Q(obs_next), axis=1))
else:
raise ValueError('Unsupported target_type: {}'.format(target_type))
target = reward + gamma * (1 - done) * next_q
loss = mean_clipped_loss(y, target)
Q.cleargrads()
loss.backward()
opt.update()
def main():
parser = argparse.ArgumentParser(description='Chainer example: DQN')
parser.add_argument('--seed', type=int, default=100)
parser.add_argument('--env', type=str, default='CartPole-v0',
help='Name of the OpenAI Gym environment')
parser.add_argument('--batch-size', '-b', type=int, default=64,
help='Number of transitions in each mini-batch')
parser.add_argument('--episodes', '-e', type=int, default=1000,
help='Number of episodes to run')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='dqn_result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
parser.add_argument('--target-type', type=str, default='dqn',
help='Target type', choices=['dqn', 'double_dqn'])
parser.add_argument('--reward-scale', type=float, default=1e-2,
help='Reward scale factor')
parser.add_argument('--replay-start-size', type=int, default=500,
help=('Number of iterations after which replay is '
'started'))
parser.add_argument('--iterations-to-decay-epsilon', type=int,
default=5000,
help='Number of steps used to linearly decay epsilon')
parser.add_argument('--min-epsilon', type=float, default=0.01,
help='Minimum value of epsilon')
parser.add_argument('--target-update-freq', type=int, default=100,
help='Frequency of target network update')
parser.add_argument('--record', action='store_true', default=True,
help='Record performance')
parser.add_argument('--no-record', action='store_false', dest='record')
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--gamma', type=float, default=.99)
args = parser.parse_args()
# Initialize with seed
seed = args.seed
os.environ['CHAINER_SEED'] = str(seed)
np.random.seed(seed)
logdir = 'DQN/' + args.env + '/lr_' + str(args.lr) + 'episodes'
'_' + str(args.episodes)
if not os.path.exists(logdir):
os.makedirs(logdir)
# Initialize an environment
env = gym.make(args.env)
assert isinstance(env.observation_space, gym.spaces.Box)
assert isinstance(env.action_space, gym.spaces.Discrete)
obs_size = env.observation_space.low.size
n_actions = env.action_space.n
reward_threshold = env.spec.reward_threshold
if reward_threshold is not None:
print('{} defines "solving" as getting average reward of {} over 100 '
'consecutive trials.'.format(args.env, reward_threshold))
else:
print('{} is an unsolved environment, which means it does not have a '
'specified reward threshold at which it\'s considered '
'solved.'.format(args.env))
# Initialize variables
D = collections.deque(maxlen=10 ** 6) # Replay buffer
Rs = collections.deque(maxlen=100) # History of returns
iteration = 0
# Initialize a model and its optimizer
Q = QFunction(obs_size, n_actions, n_units=args.unit)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
Q.to_gpu(args.gpu)
target_Q = copy.deepcopy(Q)
opt = optimizers.Adam(eps=args.lr)
opt.setup(Q)
rrecord = []
for episode in range(args.episodes):
obs = env.reset()
done = False
R = 0.0 # Return (sum of rewards obtained in an episode)
timestep = 0
while not done and timestep < env.spec.timestep_limit:
# Epsilon is linearly decayed
epsilon = 1.0 if len(D) < args.replay_start_size else \
max(args.min_epsilon,
np.interp(
iteration,
[0, args.iterations_to_decay_epsilon],
[1.0, args.min_epsilon]))
# Select an action epsilon-greedily
if np.random.rand() < epsilon:
action = env.action_space.sample()
else:
action = get_greedy_action(Q, obs)
# Execute an action
new_obs, reward, done, _ = env.step(action)
R += reward
# Store a transition
D.append((obs, action, reward * args.reward_scale, done, new_obs))
obs = new_obs
# Sample a random minibatch of transitions and replay
if len(D) >= args.replay_start_size:
sample_indices = random.sample(range(len(D)), args.batch_size)
samples = [D[i] for i in sample_indices]
update(Q, target_Q, opt, samples,
gamma=args.gamma, target_type=args.target_type)
# Update the target network
if iteration % args.target_update_freq == 0:
target_Q = copy.deepcopy(Q)
iteration += 1
timestep += 1
Rs.append(R)
average_R = np.mean(Rs)
print('episode: {} iteration: {} R: {} average_R: {}'.format(
episode, iteration, R, average_R))
rrecord.append(R)
np.save(logdir + '/rrecord_' + str(seed), rrecord)
if __name__ == '__main__':
main()
| 37.338028
| 79
| 0.602037
|
68083336da0351b12d1ac7475ca2f54e3a857d1f
| 775
|
py
|
Python
|
Section 4/Student Resources/Assignment Solution Scripts/assignment_01b_solution.py
|
t12-hub/Python-Hands-On
|
951a42efaff56a5f5213bf1873b268c0b3c189c5
|
[
"MIT"
] | 6
|
2021-01-23T16:40:57.000Z
|
2022-03-14T16:41:07.000Z
|
Section 4/Student Resources/Assignment Solution Scripts/assignment_01b_solution.py
|
t12-hub/Python-Hands-On
|
951a42efaff56a5f5213bf1873b268c0b3c189c5
|
[
"MIT"
] | null | null | null |
Section 4/Student Resources/Assignment Solution Scripts/assignment_01b_solution.py
|
t12-hub/Python-Hands-On
|
951a42efaff56a5f5213bf1873b268c0b3c189c5
|
[
"MIT"
] | 3
|
2021-01-11T14:27:12.000Z
|
2021-10-02T16:48:12.000Z
|
# Assignment 1 Solution (Part 2)
# Subjects wit scores
# ** NOTE: Nothing is stopping you from entering - 101 or 120 or -20 or ten or london
english_score = int(input("Enter a score for English : \t"))
math_score = int(input("Enter a score for Math : \t\t"))
science_score = int(input("Enter a score for Science : \t"))
social_score = int(input("Enter a score for Social : \t\t"))
total = english_score + math_score + science_score + social_score
total_subjects = 4
average_score = total/total_subjects
# This is not the best way to format output.
# We will learn more when learning about strings.
print()
print("**************************")
print(" Result")
print("**************************")
print(" Total : \t\t", total)
print(" Average : \t\t", average_score)
| 33.695652
| 85
| 0.658065
|
cc39cbd30af0680b83e38d9efc1b9ccaf5e53443
| 1,765
|
py
|
Python
|
events/api.py
|
MohamedAbdultawab/events
|
3d4acaf73628135addb7f1e98b4351cbba50c27e
|
[
"MIT"
] | null | null | null |
events/api.py
|
MohamedAbdultawab/events
|
3d4acaf73628135addb7f1e98b4351cbba50c27e
|
[
"MIT"
] | null | null | null |
events/api.py
|
MohamedAbdultawab/events
|
3d4acaf73628135addb7f1e98b4351cbba50c27e
|
[
"MIT"
] | null | null | null |
import frappe
from frappe import _
from frappe.utils import add_days, nowdate
@frappe.whitelist()
def send_invitation_emails(event):
"""
Send Email Invitations to event invitees.
"""
event = frappe.get_doc("Custom Event", event)
# event.check_permission("email")
if event.status == "Planned":
frappe.sendmail(
recipients=[d.invitee for d in event.invitees],
sender='mohamedtoba96@gmail.com',
subject=event.invitation_subject or event.title,
message=event.invitation_message,
reference_doctype=event.doctype,
reference_name=event.name,
)
event.status = "Invitations Sent"
event.save()
frappe.msgprint(_("Invitation Sent"))
else:
frappe.msgprint(_("Event Status must be 'Planned'"))
@frappe.whitelist()
def get_events(start, end):
"""
Return Event list.
"""
if not frappe.has_permission("Custom Event", "read"):
raise frappe.PermissionError
data = frappe.db.sql("""select
timestamp(timestamp(`date`), start_time) as start,
timestamp(timestamp(`date`), end_time) as end,
name,
title,
status,
0 as all_day
from `tabCustom Event`
where `date` between %(start)s and %(end)s""", {
"start": start,
"end": end
}, as_dict=True)
return data
def create_welcome_party_event(doc, method):
"""
Create a welcome party event when a new User is added.
"""
event = frappe.get_doc({
"doctype": "Custom Event",
"title": "Welcome Party for {0}".format(doc.first_name),
"date": add_days(nowdate(), 7),
"from_time": "09:00",
"to_time": "09:30",
"status": "Planned",
"invitees": [{
"invitee": doc.name
}]
})
# the System Manager might not have permission to create a Meeting
event.flags.ignore_permissions = True
event.insert()
frappe.msgprint(_("Welcome party event created"))
| 22.628205
| 67
| 0.695184
|
b72300fd63b7085e66767ff06df772e29cbb8159
| 10,817
|
py
|
Python
|
website/project/views/register.py
|
h-ci-user01/osf.h-test
|
a61db2c639a26031aa5b7f58c4dd719919aa5ece
|
[
"Apache-2.0"
] | null | null | null |
website/project/views/register.py
|
h-ci-user01/osf.h-test
|
a61db2c639a26031aa5b7f58c4dd719919aa5ece
|
[
"Apache-2.0"
] | 18
|
2020-03-24T15:26:02.000Z
|
2022-03-08T21:30:39.000Z
|
website/project/views/register.py
|
h-ci-user01/osf.h-test
|
a61db2c639a26031aa5b7f58c4dd719919aa5ece
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import httplib as http
import itertools
from flask import request
from framework import status
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.auth.decorators import must_be_signed
from website.archiver import ARCHIVER_SUCCESS, ARCHIVER_FAILURE
from website import settings
from website.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project, must_be_contributor_or_public,
must_have_permission,
must_not_be_registration, must_be_registration,
must_not_be_retracted_registration
)
from website.identifiers.utils import get_or_create_identifiers, build_ezid_metadata
from osf.models import Identifier, MetaSchema, NodeLog
from website.project.utils import serialize_node
from website.util.permissions import ADMIN
from website import language
from website.project import signals as project_signals
from website.project.metadata.schemas import _id_to_name
from website import util
from website.project.metadata.utils import serialize_meta_schema
from website.project.model import has_anonymous_link
from website.archiver.decorators import fail_archive_on_error
from website.identifiers.client import EzidClient
from .node import _view_project
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_contributor_or_public
def node_register_page(auth, node, **kwargs):
"""Display the registration metadata for a registration.
:return: serialized Node
"""
if node.is_registration:
return serialize_node(node, auth)
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False)
return redirect(node.web_url_for('node_registrations', view='draft'))
@must_be_valid_project
@must_have_permission(ADMIN)
def node_registration_retraction_redirect(auth, node, **kwargs):
return redirect(node.web_url_for('node_registration_retraction_get', _guid=True))
@must_be_valid_project
@must_not_be_retracted_registration
@must_have_permission(ADMIN)
def node_registration_retraction_get(auth, node, **kwargs):
"""Prepares node object for registration retraction page.
:return: serialized Node to be retracted
:raises: 400: BAD_REQUEST if registration already pending retraction
"""
if not node.is_registration:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.is_pending_retraction:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal.'
})
return serialize_node(node, auth, primary=True)
@must_be_valid_project
@must_have_permission(ADMIN)
def node_registration_retraction_post(auth, node, **kwargs):
"""Handles retraction of public registrations
:param auth: Authentication object for User
:return: Redirect URL for successful POST
"""
if node.is_pending_retraction:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal'
})
if not node.is_registration:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.root_id != node.id:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-parent registrations is not permitted.'
})
data = request.get_json()
try:
node.retract_registration(auth.user, data.get('justification', None))
node.save()
node.retraction.ask(node.get_active_contributors_recursive(unique_users=True))
except NodeStateError as err:
raise HTTPError(http.FORBIDDEN, data=dict(message_long=err.message))
return {'redirectUrl': node.web_url_for('view_project')}
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_contributor_or_public
def node_register_template_page(auth, node, metaschema_id, **kwargs):
if node.is_registration and bool(node.registered_schema):
try:
meta_schema = MetaSchema.objects.get(_id=metaschema_id)
except MetaSchema.DoesNotExist:
# backwards compatability for old urls, lookup by name
meta_schema = MetaSchema.objects.filter(name=_id_to_name(metaschema_id)).order_by('-schema_version').first()
if not meta_schema:
raise HTTPError(http.NOT_FOUND, data={
'message_short': 'Invalid schema name',
'message_long': 'No registration schema with that name could be found.'
})
if not node.registered_schema.filter(id=meta_schema.id).exists():
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid schema',
'message_long': 'This registration has no registration supplment with that name.'
})
ret = _view_project(node, auth, primary=True)
my_meta = serialize_meta_schema(meta_schema)
if has_anonymous_link(node, auth):
for indx, schema_page in enumerate(my_meta['schema']['pages']):
for idx, schema_question in enumerate(schema_page['questions']):
if schema_question['title'] in settings.ANONYMIZED_TITLES:
del my_meta['schema']['pages'][indx]['questions'][idx]
ret['node']['registered_schema'] = serialize_meta_schema(meta_schema)
return ret
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False
)
return redirect(node.web_url_for('node_registrations', view=kwargs.get('template')))
@must_be_valid_project # returns project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_before_register(auth, node, **kwargs):
"""Returns prompt informing user that addons, if any, won't be registered."""
# TODO: Avoid generating HTML code in Python; all HTML should be in display layer
messages = {
'full': {
'addons': set(),
'message': 'The content and version history of <strong>{0}</strong> will be copied to the registration.',
},
'partial': {
'addons': set(),
'message': 'The current version of the content in <strong>{0}</strong> will be copied to the registration, but version history will be lost.'
},
'none': {
'addons': set(),
'message': 'The contents of <strong>{0}</strong> cannot be registered at this time, and will not be included as part of this registration.',
},
}
errors = {}
addon_set = [n.get_addons() for n in itertools.chain([node], node.get_descendants_recursive(primary_only=True))]
for addon in itertools.chain(*addon_set):
if not addon.complete:
continue
archive_errors = getattr(addon, 'archive_errors', None)
error = None
if archive_errors:
error = archive_errors()
if error:
errors[addon.config.short_name] = error
continue
name = addon.config.short_name
if name in settings.ADDONS_ARCHIVABLE:
messages[settings.ADDONS_ARCHIVABLE[name]]['addons'].add(addon.config.full_name)
else:
messages['none']['addons'].add(addon.config.full_name)
error_messages = errors.values()
prompts = [
m['message'].format(util.conjunct(m['addons']))
for m in messages.values() if m['addons']
]
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_REGISTER_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {
'prompts': prompts,
'errors': error_messages
}
def osf_admin_change_status_identifier(node, status):
if node.get_identifier_value('doi') and node.get_identifier_value('ark'):
doi, metadata = build_ezid_metadata(node)
client = EzidClient(settings.EZID_USERNAME, settings.EZID_PASSWORD)
client.change_status_identifier(status, doi, metadata)
@must_be_valid_project
@must_have_permission(ADMIN)
def node_identifiers_post(auth, node, **kwargs):
"""Create identifier pair for a node. Node must be a public registration.
"""
if not node.is_public or node.is_retracted:
raise HTTPError(http.BAD_REQUEST)
if node.get_identifier('doi') or node.get_identifier('ark'):
raise HTTPError(http.BAD_REQUEST)
try:
identifiers = get_or_create_identifiers(node)
except HTTPError:
raise HTTPError(http.BAD_REQUEST)
for category, value in identifiers.iteritems():
node.set_identifier_value(category, value)
node.add_log(
NodeLog.EXTERNAL_IDS_ADDED,
params={
'parent_node': node.parent_id,
'node': node._id,
'identifiers': identifiers,
},
auth=auth,
)
return identifiers, http.CREATED
def get_referent_by_identifier(category, value):
"""Look up identifier by `category` and `value` and redirect to its referent
if found.
"""
try:
identifier = Identifier.objects.get(category=category, value=value)
except Identifier.DoesNotExist:
raise HTTPError(http.NOT_FOUND)
if identifier.referent.url:
return redirect(identifier.referent.url)
raise HTTPError(http.NOT_FOUND)
@fail_archive_on_error
@must_be_signed
@must_be_registration
def registration_callbacks(node, payload, *args, **kwargs):
errors = payload.get('errors')
src_provider = payload['source']['provider']
if errors:
node.archive_job.update_target(
src_provider,
ARCHIVER_FAILURE,
errors=errors,
)
else:
# Dataverse requires two seperate targets, one
# for draft files and one for published files
if src_provider == 'dataverse':
src_provider += '-' + (payload['destination']['name'].split(' ')[-1].lstrip('(').rstrip(')').strip())
node.archive_job.update_target(
src_provider,
ARCHIVER_SUCCESS,
)
project_signals.archive_callback.send(node)
| 38.358156
| 166
| 0.686512
|
3db732e9f0b66224b391783feef4e5e832566c34
| 1,276
|
py
|
Python
|
commands/today.py
|
quentinguidee/todo-cli
|
7e91579a9046ef6ce7720835550410ef068bbfbb
|
[
"MIT"
] | 4
|
2021-06-14T10:19:11.000Z
|
2022-01-25T20:24:54.000Z
|
commands/today.py
|
quentinguidee/todo-cli
|
7e91579a9046ef6ce7720835550410ef068bbfbb
|
[
"MIT"
] | 2
|
2021-06-14T17:30:32.000Z
|
2021-06-19T08:47:47.000Z
|
commands/today.py
|
quentinguidee/todo-cli
|
7e91579a9046ef6ce7720835550410ef068bbfbb
|
[
"MIT"
] | 2
|
2021-06-14T17:12:57.000Z
|
2021-06-18T18:09:37.000Z
|
import save
import datetime
from rich.console import Console
from rich.table import Table
from rich.text import Text
from commands.command import Command
from models.event import Event
from utils.time import DeltaTime, Time
class TodayCommand(Command):
def execute(self, args):
events: list[Event] = save.get_events(Time.now())
console = Console()
table = Table(title="Events")
table.add_column()
table.add_column("Course")
table.add_column("Task")
table.add_column("Duration")
table.add_column("Start")
table.add_column("End")
total = DeltaTime(0)
for event in events:
delta_time = DeltaTime.between(event.start, event.end)
total += delta_time
table.add_row(
event.id,
save.get_course(event.course_id).name,
save.get_task(event.course_id, event.task_id).name,
delta_time.get_hour_minutes_seconds(),
event.start.get_hour_and_minutes(),
event.end.get_hour_and_minutes()
)
tag = Text(" TOTAL ", style="bold black on yellow", end=" ")
console.print(table, Text(end=" "), tag, Text(total.get_hour_minutes_seconds()))
| 28.355556
| 88
| 0.618339
|
f3026a064162380d3d6ab8239282d051ca4c6f25
| 3,533
|
py
|
Python
|
Medium/721.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 6
|
2017-09-25T18:05:50.000Z
|
2019-03-27T00:23:15.000Z
|
Medium/721.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 1
|
2017-10-29T12:04:41.000Z
|
2018-08-16T18:00:37.000Z
|
Medium/721.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | null | null | null |
# ------------------------------
# 721. Accounts Merge
#
# Description:
# Given a list accounts, each element accounts[i] is a list of strings, where the first
# element accounts[i][0] is a name, and the rest of the elements are emails representing
# emails of the account.
#
# Now, we would like to merge these accounts. Two accounts definitely belong to the same
# person if there is some email that is common to both accounts. Note that even if two
# accounts have the same name, they may belong to different people as people could have
# the same name. A person can have any number of accounts initially, but all of their
# accounts definitely have the same name.
#
# After merging the accounts, return the accounts in the following format: the first
# element of each account is the name, and the rest of the elements are emails in sorted
# order. The accounts themselves can be returned in any order.
#
# Example 1:
# Input:
# accounts = [["John", "johnsmith@mail.com", "john00@mail.com"], ["John", "johnnybravo@mail.com"],
# ["John", "johnsmith@mail.com", "john_newyork@mail.com"], ["Mary", "mary@mail.com"]]
# Output: [["John", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'],
# ["John", "johnnybravo@mail.com"], ["Mary", "mary@mail.com"]]
#
# Explanation:
# The first and third John's are the same person as they have the common email "johnsmith@mail.com".
# The second John and Mary are different people as none of their email addresses are used by other accounts.
# We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'],
# ['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.
#
# Note:
#
# The length of accounts will be in the range [1, 1000].
# The length of accounts[i] will be in the range [1, 10].
# The length of accounts[i][j] will be in the range [1, 30].
#
# Version: 1.0
# 11/04/19 by Jianfa
# ------------------------------
class DSU:
def __init__(self):
self.p = [i for i in range(10001)] # [0, 1, ..., 10000]
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, x, y):
self.p[self.find(x)] = self.find(y)
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
dsu = DSU()
owners = {} # dictionary {email: owner}
ids = {} # dictionary {email: id}
i = 0
for acc in accounts:
name = acc[0]
for email in acc[1:]:
owners[email] = name
if email not in ids:
# grant an id to email if it's not shown before
ids[email] = i
i += 1
dsu.union(ids[email], ids[acc[1]]) # union id of email and id of first email of this account
ans = collections.defaultdict(list)
for email in owners:
ans[dsu.find(ids[email])].append(email)
return [[owners[v[0]]] + sorted(v) for v in ans.values()]
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Union Find solution from https://leetcode.com/problems/accounts-merge/solution/
# https://leetcode.com/problems/accounts-merge/discuss/109157/JavaC%2B%2B-Union-Find is
# also a good explanation.
#
# O(A * logA) time where A = sum(a_i), a_i = len(accounts[i])
# O(A) space
| 40.609195
| 129
| 0.617039
|
5254875cbdfedf5ffa1ab7132ab63ab9b2f748fb
| 117,148
|
py
|
Python
|
tests/test_api_cache_clearer.py
|
enterstudio/the-blue-alliance
|
b53f752fe1f059b4b6f91c841e1865a6c6b81268
|
[
"MIT"
] | null | null | null |
tests/test_api_cache_clearer.py
|
enterstudio/the-blue-alliance
|
b53f752fe1f059b4b6f91c841e1865a6c6b81268
|
[
"MIT"
] | null | null | null |
tests/test_api_cache_clearer.py
|
enterstudio/the-blue-alliance
|
b53f752fe1f059b4b6f91c841e1865a6c6b81268
|
[
"MIT"
] | null | null | null |
import json
import unittest2
import webtest
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from google.appengine.ext import deferred
from google.appengine.api import taskqueue
from google.appengine.ext import testbed
import api_main
import tba_config
from consts.award_type import AwardType
from consts.district_type import DistrictType
from consts.event_type import EventType
from consts.media_type import MediaType
from controllers.api.api_district_controller import ApiDistrictListController
from controllers.api.api_district_controller import ApiDistrictEventsController
from controllers.api.api_district_controller import ApiDistrictRankingsController
from controllers.api.api_event_controller import ApiEventController
from controllers.api.api_event_controller import ApiEventListController
from controllers.api.api_event_controller import ApiEventTeamsController
from controllers.api.api_event_controller import ApiEventMatchesController
from controllers.api.api_event_controller import ApiEventStatsController
from controllers.api.api_event_controller import ApiEventRankingsController
from controllers.api.api_event_controller import ApiEventAwardsController
from controllers.api.api_event_controller import ApiEventDistrictPointsController
from controllers.api.api_match_controller import ApiMatchController
from controllers.api.api_team_controller import ApiTeamController
from controllers.api.api_team_controller import ApiTeamEventsController
from controllers.api.api_team_controller import ApiTeamEventAwardsController
from controllers.api.api_team_controller import ApiTeamEventMatchesController
from controllers.api.api_team_controller import ApiTeamMediaController
from controllers.api.api_team_controller import ApiTeamYearsParticipatedController
from controllers.api.api_team_controller import ApiTeamListController
from controllers.api.api_team_controller import ApiTeamHistoryRobotsController
from helpers.award_manipulator import AwardManipulator
from helpers.event_manipulator import EventManipulator
from helpers.event_team_manipulator import EventTeamManipulator
from helpers.match_manipulator import MatchManipulator
from helpers.media_manipulator import MediaManipulator
from helpers.robot_manipulator import RobotManipulator
from helpers.team_manipulator import TeamManipulator
from models.award import Award
from models.cached_response import CachedResponse
from models.event import Event
from models.event_team import EventTeam
from models.match import Match
from models.media import Media
from models.robot import Robot
from models.team import Team
class TestApiCacheClearer(unittest2.TestCase):
def setUp(self):
self.testapp = webtest.TestApp(api_main.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
# Enable the cache we're testing
tba_config.CONFIG['response_cache'] = True
# populate mini db
self.event_2010sc_1 = Event(
id='2010sc',
name='Palmetto Regional',
event_type_enum=EventType.REGIONAL,
event_district_enum=DistrictType.MICHIGAN,
short_name='Palmetto',
event_short='sc',
year=2010,
end_date=datetime(2010, 03, 27),
official=True,
city="Clemson",
state_prov="SC",
country="USA",
start_date=datetime(2010, 03, 24),
)
self.event_2010sc_2 = Event(
id='2010sc',
name='New Regional',
event_type_enum=EventType.REGIONAL,
event_district_enum=DistrictType.MICHIGAN,
short_name='Palmetto',
event_short='sc',
year=2010,
end_date=datetime(2010, 03, 27),
official=True,
city="Clemson",
state_prov="SC",
country="USA",
start_date=datetime(2010, 03, 24),
)
self.team_frc1_1 = Team(
id='frc1',
name='This is a name',
team_number=1,
nickname='NICKNAME',
city='San Jose',
state_prov='CA',
country='USA',
website='www.usfirst.org',
)
self.team_frc1_2 = Team(
id='frc1',
name='This is a name',
team_number=1,
nickname='NICKNAME',
city='San Jose',
state_prov='CA',
country='USA',
website='www.thebluealliance.com',
)
self.team_frc2_1 = Team(
id='frc2',
name='This is a name',
team_number=2,
nickname='NICKNAME',
city='San Jose',
state_prov='CA',
country='USA',
website='www.usfirst.org',
)
self.team_frc2_2 = Team(
id='frc2',
name='This is a name',
team_number=2,
nickname='nickname',
city='San Jose',
state_prov='CA',
country='USA',
website='www.usfirst.org',
)
self.eventteam_2010sc_frc1 = EventTeam(
id='2010sc_frc1',
event=self.event_2010sc_1.key,
team=self.team_frc1_1.key,
year=2010,
)
self.eventteam_2010sc_frc2 = EventTeam(
id='2010sc_frc2',
event=self.event_2010sc_1.key,
team=self.team_frc2_1.key,
year=2010,
)
self.match1_1 = Match(
id='2010sc_qm1',
alliances_json=json.dumps({'blue': {'score': -1, 'teams': ['frc1', 'frc2', 'frc3']}, 'red': {'score': -1, 'teams': ['frc4', 'frc5', 'frc6']}}),
comp_level='qm',
event=self.event_2010sc_1.key,
set_number=1,
match_number=1,
year=2010,
team_key_names=[u'frc1', u'frc2', u'frc3', u'frc4', u'frc5', u'frc6'],
)
self.match1_2 = Match(
id='2010sc_qm1',
alliances_json=json.dumps({'blue': {'score': -1, 'teams': ['frc1', 'frc999', 'frc3']}, 'red': {'score': -1, 'teams': ['frc4', 'frc5', 'frc6']}}),
comp_level='qm',
event=self.event_2010sc_1.key,
set_number=1,
match_number=1,
year=2010,
team_key_names=[u'frc1', u'frc999', u'frc3', u'frc4', u'frc5', u'frc6'],
)
self.award1_1 = Award(
id="2010sc_1",
name_str="Regional Champion",
award_type_enum=AwardType.WINNER,
year=2010,
event=self.event_2010sc_1.key,
event_type_enum=EventType.REGIONAL,
team_list=[self.team_frc1_1.key],
recipient_json_list=[json.dumps({'team_number': 1, 'awardee': None})],
)
self.award1_2 = Award(
id="2010sc_1",
name_str="Regional Champion",
award_type_enum=AwardType.WINNER,
year=2010,
event=self.event_2010sc_1.key,
event_type_enum=EventType.REGIONAL,
team_list=[self.team_frc2_1.key],
recipient_json_list=[json.dumps({'team_number': 2, 'awardee': None})],
)
self.media1_1 = Media(
id='cdphotothread_39894',
media_type_enum=MediaType.CD_PHOTO_THREAD,
foreign_key='39894',
details_json='{"image_partial": "fe3/fe38d320428adf4f51ac969efb3db32c_l.jpg"}',
year=2010,
references=[self.team_frc1_1.key],
)
self.media1_2 = Media(
id='cdphotothread_39894',
media_type_enum=MediaType.CD_PHOTO_THREAD,
foreign_key='39894',
details_json='{"image_partial": "fe3/fe38d320428adf4f51ac969efb3db32c_l.jpg"}',
year=2010,
references=[self.team_frc2_1.key],
)
self.districtlist_2010_cache_key = ApiDistrictListController.get_cache_key_from_format('2010')
self.district_events_2010_cache_key = ApiDistrictEventsController.get_cache_key_from_format('fim', '2010')
self.district_rankings_2010_cache_key = ApiDistrictRankingsController.get_cache_key_from_format('fim', '2010')
self.eventlist_2010_cache_key = ApiEventListController.get_cache_key_from_format('2010')
self.event_2010sc_cache_key = ApiEventController.get_cache_key_from_format('2010sc')
self.eventteams_2010sc_cache_key = ApiEventTeamsController.get_cache_key_from_format('2010sc')
self.eventmatches_2010sc_cache_key = ApiEventMatchesController.get_cache_key_from_format('2010sc')
self.eventstats_2010sc_cache_key = ApiEventStatsController.get_cache_key_from_format('2010sc')
self.eventrankings_2010sc_cache_key = ApiEventRankingsController.get_cache_key_from_format('2010sc')
self.eventawards_2010sc_cache_key = ApiEventAwardsController.get_cache_key_from_format('2010sc')
self.eventdistrictpoints_2010sc_cache_key = ApiEventDistrictPointsController.get_cache_key_from_format('2010sc')
self.match_cache_key = ApiMatchController.get_cache_key_from_format('2010sc_qm1')
self.team_frc1_cache_key = ApiTeamController.get_cache_key_from_format('frc1', 2010)
self.team_frc2_cache_key = ApiTeamController.get_cache_key_from_format('frc2', 2010)
self.team_events_frc1_cache_key = ApiTeamEventsController.get_cache_key_from_format('frc1', 2010)
self.team_events_frc2_cache_key = ApiTeamEventsController.get_cache_key_from_format('frc2', 2010)
self.team_event_awards_frc1_2010sc_cache_key = ApiTeamEventAwardsController.get_cache_key_from_format('frc1', '2010sc')
self.team_event_awards_frc2_2010sc_cache_key = ApiTeamEventAwardsController.get_cache_key_from_format('frc2', '2010sc')
self.team_event_matches_frc1_2010sc_cache_key = ApiTeamEventMatchesController.get_cache_key_from_format('frc1', '2010sc')
self.team_event_matches_frc2_2010sc_cache_key = ApiTeamEventMatchesController.get_cache_key_from_format('frc2', '2010sc')
self.team_media_frc1_cache_key = ApiTeamMediaController.get_cache_key_from_format('frc1', 2010)
self.team_media_frc2_cache_key = ApiTeamMediaController.get_cache_key_from_format('frc2', 2010)
self.team_years_participated_frc1_cache_key = ApiTeamYearsParticipatedController.get_cache_key_from_format('frc1')
self.team_years_participated_frc2_cache_key = ApiTeamYearsParticipatedController.get_cache_key_from_format('frc2')
self.team_list_page_0_cache_key = ApiTeamListController.get_cache_key_from_format(0)
self.team_list_page_1_cache_key = ApiTeamListController.get_cache_key_from_format(1)
self.robot1 = Robot(
id='frc1_2015',
year=2015,
team=self.team_frc1_1.key,
robot_name='Baymax'
)
self.robot2 = Robot(
id='frc1_2015',
year=2015,
team=self.team_frc1_1.key,
robot_name='Wall-E'
)
self.robots_cache_key = ApiTeamHistoryRobotsController.get_cache_key_from_format('frc1')
def tearDown(self):
self.testbed.deactivate()
def processDeferred(self):
"""
Cache clearing is done in a deferred task. Force it to run here.
"""
tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='cache-clearing')
queue = taskqueue.Queue('cache-clearing')
for task in tasks:
deferred.run(task.payload)
queue.delete_tasks(task)
def testRobots(self):
self.assertEqual(CachedResponse.get_by_id(self.robots_cache_key), None)
TeamManipulator.createOrUpdate(self.team_frc1_1)
RobotManipulator.createOrUpdate(self.robot1)
self.processDeferred()
response = self.testapp.get('/api/v2/team/frc1/history/robots', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.robots_cache_key), None)
RobotManipulator.createOrUpdate(self.robot2)
self.processDeferred()
self.assertEqual(CachedResponse.get_by_id(self.robots_cache_key), None)
response = self.testapp.get('/api/v2/team/frc1/history/robots', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.robots_cache_key), None)
def resetAll(self, flushed=False):
response = self.testapp.get('/api/v2/events/2010', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
EventManipulator.createOrUpdate(self.event_2010sc_1)
self.processDeferred()
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
TeamManipulator.createOrUpdate(self.team_frc1_1)
TeamManipulator.createOrUpdate(self.team_frc2_1)
EventTeamManipulator.createOrUpdate(self.eventteam_2010sc_frc1)
EventTeamManipulator.createOrUpdate(self.eventteam_2010sc_frc2)
MatchManipulator.createOrUpdate(self.match1_1)
AwardManipulator.createOrUpdate(self.award1_1)
MediaManipulator.createOrUpdate(self.media1_1)
self.processDeferred()
response = self.testapp.get('/api/v2/events/2010', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/event/2010sc', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/event/2010sc/teams', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/event/2010sc/matches', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/event/2010sc/stats', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/event/2010sc/rankings', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/event/2010sc/awards', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/event/2010sc/district_points', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/match/2010sc_qm1', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc1', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc2', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc1/2010/events', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc2/2010/events', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc1/event/2010sc/awards', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc2/event/2010sc/awards', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc1/event/2010sc/matches', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc2/event/2010sc/matches', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc1/2010/media', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc2/2010/media', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc1/years_participated', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/team/frc2/years_participated', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/teams/0', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/teams/1', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/districts/2010', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/district/fim/2010/events', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
if flushed:
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
response = self.testapp.get('/api/v2/district/fim/2010/rankings', headers={'X-TBA-App-Id': 'tba-tests:api-cache-clear-test:v01'})
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
def testApiCacheClear(self):
self.assertEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll(flushed=True)
# this shouldn't evict any caches
EventManipulator.createOrUpdate(self.event_2010sc_1)
EventTeamManipulator.createOrUpdate(self.eventteam_2010sc_frc1)
EventTeamManipulator.createOrUpdate(self.eventteam_2010sc_frc2)
AwardManipulator.createOrUpdate(self.award1_1)
MatchManipulator.createOrUpdate(self.match1_1)
TeamManipulator.createOrUpdate(self.team_frc1_1)
TeamManipulator.createOrUpdate(self.team_frc2_1)
MediaManipulator.createOrUpdate(self.media1_1)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
# updating an event
EventManipulator.createOrUpdate(self.event_2010sc_2)
self.processDeferred()
self.assertEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# updating a team
TeamManipulator.createOrUpdate(self.team_frc1_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# updating a match
MatchManipulator.createOrUpdate(self.match1_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# updating an award
AwardManipulator.createOrUpdate(self.award1_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# updating a media
MediaManipulator.createOrUpdate(self.media1_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# deleting a media
MediaManipulator.delete(self.media1_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# deleting an award
AwardManipulator.delete(self.award1_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# deleting a match
MatchManipulator.delete(self.match1_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# deleting a team
TeamManipulator.delete(self.team_frc2_2)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# deleting an event
EventManipulator.delete(self.event_2010sc_2)
self.processDeferred()
self.assertEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
self.resetAll()
# deleting an eventteam
EventTeamManipulator.delete(self.eventteam_2010sc_frc1)
self.processDeferred()
self.assertNotEqual(CachedResponse.get_by_id(self.eventlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.event_2010sc_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.eventteams_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventmatches_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventstats_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventrankings_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventawards_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.eventdistrictpoints_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.match_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_events_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_events_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_awards_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc1_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_event_matches_frc2_2010sc_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_media_frc2_cache_key), None)
self.assertEqual(CachedResponse.get_by_id(self.team_years_participated_frc1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_years_participated_frc2_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_0_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.team_list_page_1_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.districtlist_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_events_2010_cache_key), None)
self.assertNotEqual(CachedResponse.get_by_id(self.district_rankings_2010_cache_key), None)
| 78.307487
| 157
| 0.778477
|
74f03351c20d4a682fed0b6ba9f1d2e1ac386650
| 678
|
py
|
Python
|
yatube/yatube/urls.py
|
Dimanitto/yatube
|
1d93d5b90c2a9f7c888ce83eac5acdc4d9ed40e2
|
[
"MIT"
] | null | null | null |
yatube/yatube/urls.py
|
Dimanitto/yatube
|
1d93d5b90c2a9f7c888ce83eac5acdc4d9ed40e2
|
[
"MIT"
] | null | null | null |
yatube/yatube/urls.py
|
Dimanitto/yatube
|
1d93d5b90c2a9f7c888ce83eac5acdc4d9ed40e2
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('posts.urls', namespace='posts')),
path('admin/', admin.site.urls),
path('auth/', include('users.urls', namespace='users')),
path('auth/', include('django.contrib.auth.urls')),
path('about/', include('about.urls', namespace='about')),
]
handler404 = 'core.views.page_not_found'
handler500 = 'core.views.server_error'
handler403 = 'core.views.permission_denied'
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| 30.818182
| 61
| 0.712389
|
da0fb022a71a375df57da7605498060263f4c062
| 413
|
py
|
Python
|
polls/urls.py
|
elciosato/django_polls
|
9d44bf88abc8a26dd66f911329448083d42757f3
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
elciosato/django_polls
|
9d44bf88abc8a26dd66f911329448083d42757f3
|
[
"MIT"
] | 6
|
2021-03-19T02:04:25.000Z
|
2021-09-22T18:57:58.000Z
|
polls/urls.py
|
elciosato/django_polls
|
9d44bf88abc8a26dd66f911329448083d42757f3
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
# ex: /polls/
path('', views.index, name='index'),
# ex: /polls/5/
path('<int:question_id>/', views.detail, name='detail'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
# ex: /polls/5/results/
path('<int:question_id>/results/', views.results, name='results'),
]
| 27.533333
| 70
| 0.615012
|
4de2be85a4fb82173fd90fec64a5262a4635427b
| 2,673
|
py
|
Python
|
ts_decompose.py
|
Pu-nk/Offline_pump_scheduling
|
13b913a5f530d70ae5fc33d65adc46e74731154a
|
[
"MIT"
] | null | null | null |
ts_decompose.py
|
Pu-nk/Offline_pump_scheduling
|
13b913a5f530d70ae5fc33d65adc46e74731154a
|
[
"MIT"
] | null | null | null |
ts_decompose.py
|
Pu-nk/Offline_pump_scheduling
|
13b913a5f530d70ae5fc33d65adc46e74731154a
|
[
"MIT"
] | null | null | null |
import pywt as wt
import pandas as pd
import matplotlib.pyplot as plt
class wavelet_strcuture(object):
def __init__(self, ts_array, wave_func='db4'):
self.ts_array = ts_array.copy()
self.wave_func = wave_func
self.cA3 = self._ts_decompose()[0]
self.cD3 = self._ts_decompose()[1]
self.cD2 = self._ts_decompose()[2]
self.cD1 = self._ts_decompose()[3]
def _ts_decompose(self):
cA3, cD3, cD2, cD1 = wt.wavedec(self.ts_array, self.wave_func, level=3, mode='periodic')
cA3_array = wt.waverec([cA3, None, None, None], self.wave_func)[:len(self.ts_array)]
cD3_array = wt.waverec([None, cD3, None, None], self.wave_func)[:len(self.ts_array)]
cD2_array = wt.waverec([None, cD2, None], self.wave_func)[:len(self.ts_array)]
cD1_array = wt.waverec([None, cD1], self.wave_func)[:len(self.ts_array)]
return cA3_array, cD3_array, cD2_array, cD1_array
def wavelet_denosing(df,mode='all'):
data = df.copy()
selected_columns = data.columns if mode == 'all' else mode
for cl in selected_columns:
cl_array = data[cl].values
cl_array_cA3 = wavelet_decompose(cl_array)[0]
data.loc[:, cl] = cl_array_cA3
return data
def wavelet_decompose(data, wave_func='db4'):
Wave_func = wt.Wavelet(wave_func)
cA3, cD3, cD2, cD1 = wt.wavedec(data, Wave_func, level=3, mode='periodic')
cA3_array = wt.waverec([cA3, None, None, None], Wave_func)[:len(data)]
cD3_array = wt.waverec([None, cD3, None, None], Wave_func)[:len(data)]
cD2_array = wt.waverec([None, cD2, None], Wave_func)[:len(data)]
cD1_array = wt.waverec([None, cD1], Wave_func)[:len(data)]
return cA3_array,cD3_array, cD2_array, cD1_array
def wavelet_sample_construct(data, wave_func='db4'):
ts = data['value'].values
mkdf = lambda x: pd.DataFrame(x, columns=['value'], index=data.index)
cA3, cD3, cD2, cD1 = list(map(mkdf, wavelet_decompose(ts, wave_func)))
return cA3, cD3, cD2, cD1
if __name__ == '__main__':
train_data = pd.read_csv('./train_dataset.csv', index_col='datetime', parse_dates=True)
train_data.dropna(inplace=True)
cA3_data = wavelet_denosing(train_data[['hx_pressure']])
plt.plot(train_data.index, cA3_data['hx_pressure'])
plt.plot(train_data.index, train_data['hx_pressure'])
plt.show()
# df = STData(data_path, precision=15, sp=0)
# # 过滤异常值
# df.Z_score_Filter(lag=10000, threshold=3, plot=False)
# # 异常值插补
# df.TW_KNNR_imputation()
# dataclip = df.data['2020-9':'2020-11'] # represent the data of september to November
# cA3, cD3, cD2, cD1 = wavelet_sample_construct(dataclip)
| 41.123077
| 96
| 0.671904
|
df8648bfe7b1db800d61dde1581b21beb3c9d3cd
| 2,162
|
py
|
Python
|
tests/test_price_account_header.py
|
superfive666/pyth-client-py
|
243298ba3beeae8712a993113ce3f5f792e1e09b
|
[
"Apache-2.0"
] | 1
|
2022-03-18T04:06:54.000Z
|
2022-03-18T04:06:54.000Z
|
tests/test_price_account_header.py
|
superfive666/pyth-client-py
|
243298ba3beeae8712a993113ce3f5f792e1e09b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_price_account_header.py
|
superfive666/pyth-client-py
|
243298ba3beeae8712a993113ce3f5f792e1e09b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from pythclient.pythaccounts import PythAccountType, _parse_header
@pytest.fixture
def valid_binary():
"""
magic=0xA1B2C3D4 version=2, type=price, size=16
"""
return bytes([212, 195, 178, 161, 2, 0, 0, 0, 3, 0, 0, 0, 16, 0, 0, 0])
@pytest.fixture
def valid_expected():
return PythAccountType.PRICE, 16, 2
@pytest.fixture
def bad_magic():
"""
magic=0xDEADBEEF, version=2, type=price, size=16
"""
return bytes([239, 190, 173, 222, 2, 0, 0, 0, 3, 0, 0, 0, 16, 0, 0, 0])
@pytest.fixture
def bad_magic_message():
return "Invalid Pyth account data header has wrong magic: expected a1b2c3d4, got deadbeef"
@pytest.fixture
def wrong_version():
"""
magic=0xA1B2C3D4 version=42, type=price, size=16
"""
return bytes([212, 195, 178, 161, 42, 0, 0, 0, 3, 0, 0, 0, 16, 0, 0, 0])
@pytest.fixture
def wrong_version_message():
return "Invalid Pyth account data has unsupported version 42"
@pytest.fixture
def wrong_size():
"""
magic=0xA1B2C3D4 version=2, type=price, size=32
"""
return bytes([212, 195, 178, 161, 2, 0, 0, 0, 3, 0, 0, 0, 32, 0, 0, 0])
@pytest.fixture
def wrong_size_message():
return "Invalid Pyth header says data is 32 bytes, but buffer only has 16 bytes"
@pytest.fixture
def too_short():
"""
Totally bogus messge that is too short
"""
return bytes([1, 2, 3, 4])
@pytest.fixture
def too_short_message():
return "Pyth account data too short"
@pytest.mark.parametrize(
"buffer_fixture_name",
["bad_magic", "wrong_version", "wrong_size", "too_short"],
)
def test_header_parsing_errors(buffer_fixture_name, request):
buffer = request.getfixturevalue(buffer_fixture_name)
exc_message = request.getfixturevalue(f"{buffer_fixture_name}_message")
with pytest.raises(ValueError, match=exc_message):
_parse_header(
buffer=buffer,
offset=0,
key="Invalid",
)
def test_header_parsing_valid(valid_binary, valid_expected):
actual = _parse_header(
buffer=valid_binary,
offset=0,
key="Invalid",
)
assert actual == valid_expected
| 23
| 94
| 0.659112
|
24562f47140ef9f3adad258f9c44eaedde4ca00b
| 826
|
py
|
Python
|
ocp_build_data/migrations/0001_initial.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 1
|
2020-09-21T06:48:47.000Z
|
2020-09-21T06:48:47.000Z
|
ocp_build_data/migrations/0001_initial.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 5
|
2021-02-05T19:43:08.000Z
|
2021-06-04T23:23:29.000Z
|
ocp_build_data/migrations/0001_initial.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 6
|
2021-02-06T07:21:37.000Z
|
2021-06-07T12:40:37.000Z
|
# Generated by Django 3.0.7 on 2020-07-30 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpenShiftCurrentAdvisory',
fields=[
('log_openshift_release_advisory_id', models.AutoField(primary_key=True, serialize=False)),
('openshift_version', models.CharField(max_length=50)),
('advisory_type', models.CharField(max_length=100)),
('current_advisory_id', models.CharField(max_length=20)),
('previous_advisory_id', models.CharField(max_length=20)),
],
options={
'db_table': 'log_openshift_release_advisory',
},
),
]
| 29.5
| 107
| 0.59201
|
3101ae9c802bb53a02bc7aaf1a5c1f7a65b5e913
| 548
|
py
|
Python
|
game.py
|
Mickey758/Python-Game
|
0c2b531856f00be387a643daf88598b41dc376c2
|
[
"MIT"
] | 2
|
2021-12-25T12:05:59.000Z
|
2022-01-08T19:11:18.000Z
|
game.py
|
Mickey758/Python-Game
|
0c2b531856f00be387a643daf88598b41dc376c2
|
[
"MIT"
] | null | null | null |
game.py
|
Mickey758/Python-Game
|
0c2b531856f00be387a643daf88598b41dc376c2
|
[
"MIT"
] | null | null | null |
from variables import player, food, progress
from console.utils import set_title
def title():
set_title(f"Score = {progress.score}")
def update(key):
match key:
case "down":
player.y += 1 if player.y < 29 else 0
case "up":
player.y -= 1 if player.y > 0 else 0
case "left":
player.x -= 1 if player.x > 0 else 0
case "right":
player.x += 1 if player.x < 59 else 0
if player.x == food.x and player.y == food.y:
progress.score += 1
food.spawn()
| 32.235294
| 49
| 0.54927
|
06611fd83c896c99d425cb552ba31893bc557aec
| 68,251
|
py
|
Python
|
yandex/cloud/ydb/v1/database_service_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 36
|
2018-12-23T13:51:50.000Z
|
2022-03-25T07:48:24.000Z
|
yandex/cloud/ydb/v1/database_service_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 15
|
2019-02-28T04:55:09.000Z
|
2022-03-06T23:17:24.000Z
|
yandex/cloud/ydb/v1/database_service_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 18
|
2019-02-23T07:10:57.000Z
|
2022-03-28T14:41:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/ydb/v1/database_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from yandex.cloud.api import operation_pb2 as yandex_dot_cloud_dot_api_dot_operation__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
from yandex.cloud.ydb.v1 import database_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2
from yandex.cloud.ydb.v1 import backup_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_backup__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/ydb/v1/database_service.proto',
package='yandex.cloud.ydb.v1',
syntax='proto3',
serialized_options=b'\n\027yandex.cloud.api.ydb.v1Z;github.com/yandex-cloud/go-genproto/yandex/cloud/ydb/v1;ydb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n*yandex/cloud/ydb/v1/database_service.proto\x12\x13yandex.cloud.ydb.v1\x1a\x1cgoogle/api/annotations.proto\x1a google/protobuf/field_mask.proto\x1a yandex/cloud/api/operation.proto\x1a&yandex/cloud/operation/operation.proto\x1a\x1dyandex/cloud/validation.proto\x1a\"yandex/cloud/ydb/v1/database.proto\x1a yandex/cloud/ydb/v1/backup.proto\"\x89\x01\n\x14RestoreBackupRequest\x12\x1f\n\tbackup_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12!\n\x0b\x64\x61tabase_id\x18\x02 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x18\n\x10paths_to_restore\x18\x03 \x03(\t\x12\x13\n\x0btarget_path\x18\x04 \x01(\t\"?\n\x15RestoreBackupMetadata\x12\x11\n\tbackup_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61tabase_id\x18\x02 \x01(\t\"j\n\x15\x42\x61\x63kupDatabaseRequest\x12\x13\n\x0b\x64\x61tabase_id\x18\x01 \x01(\t\x12<\n\x0f\x62\x61\x63kup_settings\x18\x02 \x01(\x0b\x32#.yandex.cloud.ydb.v1.BackupSettings\"@\n\x16\x42\x61\x63kupDatabaseMetadata\x12\x11\n\tbackup_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61tabase_id\x18\x02 \x01(\t\"9\n\x14StartDatabaseRequest\x12!\n\x0b\x64\x61tabase_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"C\n\x15StartDatabaseMetadata\x12\x13\n\x0b\x64\x61tabase_id\x18\x01 \x01(\t\x12\x15\n\rdatabase_name\x18\x02 \x01(\t\"8\n\x13StopDatabaseRequest\x12!\n\x0b\x64\x61tabase_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"B\n\x14StopDatabaseMetadata\x12\x13\n\x0b\x64\x61tabase_id\x18\x01 \x01(\t\x12\x15\n\rdatabase_name\x18\x02 \x01(\t\"7\n\x12GetDatabaseRequest\x12!\n\x0b\x64\x61tabase_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"g\n\x14ListDatabasesRequest\x12\x11\n\tfolder_id\x18\x01 \x01(\t\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"b\n\x15ListDatabasesResponse\x12\x30\n\tdatabases\x18\x01 \x03(\x0b\x32\x1d.yandex.cloud.ydb.v1.Database\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\xc9\x06\n\x15\x43reateDatabaseRequest\x12\x11\n\tfolder_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x1a\n\x12resource_preset_id\x18\x04 \x01(\t\x12:\n\x0estorage_config\x18\x05 \x01(\x0b\x32\".yandex.cloud.ydb.v1.StorageConfig\x12\x36\n\x0cscale_policy\x18\x06 \x01(\x0b\x32 .yandex.cloud.ydb.v1.ScalePolicy\x12\x12\n\nnetwork_id\x18\x07 \x01(\t\x12\x12\n\nsubnet_ids\x18\x08 \x03(\t\x12<\n\x0ezonal_database\x18\t \x01(\x0b\x32\".yandex.cloud.ydb.v1.ZonalDatabaseH\x00\x12\x42\n\x11regional_database\x18\n \x01(\x0b\x32%.yandex.cloud.ydb.v1.RegionalDatabaseH\x00\x12\x44\n\x12\x64\x65\x64icated_database\x18\r \x01(\x0b\x32&.yandex.cloud.ydb.v1.DedicatedDatabaseH\x00\x12\x46\n\x13serverless_database\x18\x0e \x01(\x0b\x32\'.yandex.cloud.ydb.v1.ServerlessDatabaseH\x00\x12\x19\n\x11\x61ssign_public_ips\x18\x0b \x01(\x08\x12\x13\n\x0blocation_id\x18\x0c \x01(\t\x12\x46\n\x06labels\x18\x0f \x03(\x0b\x32\x36.yandex.cloud.ydb.v1.CreateDatabaseRequest.LabelsEntry\x12\x38\n\rbackup_config\x18\x10 \x01(\x0b\x32!.yandex.cloud.ydb.v1.BackupConfig\x12@\n\x11monitoring_config\x18\x11 \x01(\x0b\x32%.yandex.cloud.ydb.v1.MonitoringConfig\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0f\n\rdatabase_type\"D\n\x16\x43reateDatabaseMetadata\x12\x13\n\x0b\x64\x61tabase_id\x18\x01 \x01(\t\x12\x15\n\rdatabase_name\x18\x02 \x01(\t\"\x8f\x07\n\x15UpdateDatabaseRequest\x12\x11\n\tfolder_id\x18\x01 \x01(\t\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x13\n\x0b\x64\x61tabase_id\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12\x1a\n\x12resource_preset_id\x18\x06 \x01(\t\x12:\n\x0estorage_config\x18\x07 \x01(\x0b\x32\".yandex.cloud.ydb.v1.StorageConfig\x12\x36\n\x0cscale_policy\x18\x08 \x01(\x0b\x32 .yandex.cloud.ydb.v1.ScalePolicy\x12\x12\n\nnetwork_id\x18\t \x01(\t\x12\x12\n\nsubnet_ids\x18\n \x03(\t\x12<\n\x0ezonal_database\x18\x0b \x01(\x0b\x32\".yandex.cloud.ydb.v1.ZonalDatabaseH\x00\x12\x42\n\x11regional_database\x18\x0c \x01(\x0b\x32%.yandex.cloud.ydb.v1.RegionalDatabaseH\x00\x12\x44\n\x12\x64\x65\x64icated_database\x18\x0f \x01(\x0b\x32&.yandex.cloud.ydb.v1.DedicatedDatabaseH\x00\x12\x46\n\x13serverless_database\x18\x10 \x01(\x0b\x32\'.yandex.cloud.ydb.v1.ServerlessDatabaseH\x00\x12\x19\n\x11\x61ssign_public_ips\x18\r \x01(\x08\x12\x13\n\x0blocation_id\x18\x0e \x01(\t\x12\x46\n\x06labels\x18\x11 \x03(\x0b\x32\x36.yandex.cloud.ydb.v1.UpdateDatabaseRequest.LabelsEntry\x12\x38\n\rbackup_config\x18\x12 \x01(\x0b\x32!.yandex.cloud.ydb.v1.BackupConfig\x12@\n\x11monitoring_config\x18\x13 \x01(\x0b\x32%.yandex.cloud.ydb.v1.MonitoringConfig\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0f\n\rdatabase_type\"D\n\x16UpdateDatabaseMetadata\x12\x13\n\x0b\x64\x61tabase_id\x18\x01 \x01(\t\x12\x15\n\rdatabase_name\x18\x02 \x01(\t\",\n\x15\x44\x65leteDatabaseRequest\x12\x13\n\x0b\x64\x61tabase_id\x18\x01 \x01(\t\"D\n\x16\x44\x65leteDatabaseMetadata\x12\x13\n\x0b\x64\x61tabase_id\x18\x01 \x01(\t\x12\x15\n\rdatabase_name\x18\x02 \x01(\t2\xa1\x0b\n\x0f\x44\x61tabaseService\x12v\n\x03Get\x12\'.yandex.cloud.ydb.v1.GetDatabaseRequest\x1a\x1d.yandex.cloud.ydb.v1.Database\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/ydb/v1/databases/{database_id}\x12x\n\x04List\x12).yandex.cloud.ydb.v1.ListDatabasesRequest\x1a*.yandex.cloud.ydb.v1.ListDatabasesResponse\"\x19\x82\xd3\xe4\x93\x02\x13\x12\x11/ydb/v1/databases\x12\x9b\x01\n\x06\x43reate\x12*.yandex.cloud.ydb.v1.CreateDatabaseRequest\x1a!.yandex.cloud.operation.Operation\"B\x82\xd3\xe4\x93\x02\x16\"\x11/ydb/v1/databases:\x01*\xb2\xd2*\"\n\x16\x43reateDatabaseMetadata\x12\x08\x44\x61tabase\x12\xa9\x01\n\x06Update\x12*.yandex.cloud.ydb.v1.UpdateDatabaseRequest\x1a!.yandex.cloud.operation.Operation\"P\x82\xd3\xe4\x93\x02$2\x1f/ydb/v1/databases/{database_id}:\x01*\xb2\xd2*\"\n\x16UpdateDatabaseMetadata\x12\x08\x44\x61tabase\x12\xa9\x01\n\x05Start\x12).yandex.cloud.ydb.v1.StartDatabaseRequest\x1a!.yandex.cloud.operation.Operation\"R\x82\xd3\xe4\x93\x02\'\"%/ydb/v1/databases/{database_id}:start\xb2\xd2*!\n\x15StartDatabaseMetadata\x12\x08\x44\x61tabase\x12\xa5\x01\n\x04Stop\x12(.yandex.cloud.ydb.v1.StopDatabaseRequest\x1a!.yandex.cloud.operation.Operation\"P\x82\xd3\xe4\x93\x02&\"$/ydb/v1/databases/{database_id}:stop\xb2\xd2* \n\x14StopDatabaseMetadata\x12\x08\x44\x61tabase\x12\xb3\x01\n\x06\x44\x65lete\x12*.yandex.cloud.ydb.v1.DeleteDatabaseRequest\x1a!.yandex.cloud.operation.Operation\"Z\x82\xd3\xe4\x93\x02!*\x1f/ydb/v1/databases/{database_id}\xb2\xd2*/\n\x16\x44\x65leteDatabaseMetadata\x12\x15google.protobuf.Empty\x12\xa2\x01\n\x07Restore\x12).yandex.cloud.ydb.v1.RestoreBackupRequest\x1a!.yandex.cloud.operation.Operation\"I\x82\xd3\xe4\x93\x02\x1e\"\x19/ydb/v1/databases:restore:\x01*\xb2\xd2*!\n\x15RestoreBackupMetadata\x12\x08\x44\x61tabase\x12\xa2\x01\n\x06\x42\x61\x63kup\x12*.yandex.cloud.ydb.v1.BackupDatabaseRequest\x1a!.yandex.cloud.operation.Operation\"I\x82\xd3\xe4\x93\x02\x1d\"\x18/ydb/v1/databases:backup:\x01*\xb2\xd2*\"\n\x16\x42\x61\x63kupDatabaseMetadata\x12\x08\x44\x61tabaseBV\n\x17yandex.cloud.api.ydb.v1Z;github.com/yandex-cloud/go-genproto/yandex/cloud/ydb/v1;ydbb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,yandex_dot_cloud_dot_api_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_operation_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2.DESCRIPTOR,yandex_dot_cloud_dot_ydb_dot_v1_dot_backup__pb2.DESCRIPTOR,])
_RESTOREBACKUPREQUEST = _descriptor.Descriptor(
name='RestoreBackupRequest',
full_name='yandex.cloud.ydb.v1.RestoreBackupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backup_id', full_name='yandex.cloud.ydb.v1.RestoreBackupRequest.backup_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.RestoreBackupRequest.database_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='paths_to_restore', full_name='yandex.cloud.ydb.v1.RestoreBackupRequest.paths_to_restore', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='target_path', full_name='yandex.cloud.ydb.v1.RestoreBackupRequest.target_path', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=307,
serialized_end=444,
)
_RESTOREBACKUPMETADATA = _descriptor.Descriptor(
name='RestoreBackupMetadata',
full_name='yandex.cloud.ydb.v1.RestoreBackupMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backup_id', full_name='yandex.cloud.ydb.v1.RestoreBackupMetadata.backup_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.RestoreBackupMetadata.database_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=446,
serialized_end=509,
)
_BACKUPDATABASEREQUEST = _descriptor.Descriptor(
name='BackupDatabaseRequest',
full_name='yandex.cloud.ydb.v1.BackupDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.BackupDatabaseRequest.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_settings', full_name='yandex.cloud.ydb.v1.BackupDatabaseRequest.backup_settings', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=511,
serialized_end=617,
)
_BACKUPDATABASEMETADATA = _descriptor.Descriptor(
name='BackupDatabaseMetadata',
full_name='yandex.cloud.ydb.v1.BackupDatabaseMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backup_id', full_name='yandex.cloud.ydb.v1.BackupDatabaseMetadata.backup_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.BackupDatabaseMetadata.database_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=619,
serialized_end=683,
)
_STARTDATABASEREQUEST = _descriptor.Descriptor(
name='StartDatabaseRequest',
full_name='yandex.cloud.ydb.v1.StartDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.StartDatabaseRequest.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=685,
serialized_end=742,
)
_STARTDATABASEMETADATA = _descriptor.Descriptor(
name='StartDatabaseMetadata',
full_name='yandex.cloud.ydb.v1.StartDatabaseMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.StartDatabaseMetadata.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_name', full_name='yandex.cloud.ydb.v1.StartDatabaseMetadata.database_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=744,
serialized_end=811,
)
_STOPDATABASEREQUEST = _descriptor.Descriptor(
name='StopDatabaseRequest',
full_name='yandex.cloud.ydb.v1.StopDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.StopDatabaseRequest.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=813,
serialized_end=869,
)
_STOPDATABASEMETADATA = _descriptor.Descriptor(
name='StopDatabaseMetadata',
full_name='yandex.cloud.ydb.v1.StopDatabaseMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.StopDatabaseMetadata.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_name', full_name='yandex.cloud.ydb.v1.StopDatabaseMetadata.database_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=871,
serialized_end=937,
)
_GETDATABASEREQUEST = _descriptor.Descriptor(
name='GetDatabaseRequest',
full_name='yandex.cloud.ydb.v1.GetDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.GetDatabaseRequest.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=939,
serialized_end=994,
)
_LISTDATABASESREQUEST = _descriptor.Descriptor(
name='ListDatabasesRequest',
full_name='yandex.cloud.ydb.v1.ListDatabasesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.ydb.v1.ListDatabasesRequest.folder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.ydb.v1.ListDatabasesRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.ydb.v1.ListDatabasesRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=996,
serialized_end=1099,
)
_LISTDATABASESRESPONSE = _descriptor.Descriptor(
name='ListDatabasesResponse',
full_name='yandex.cloud.ydb.v1.ListDatabasesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='databases', full_name='yandex.cloud.ydb.v1.ListDatabasesResponse.databases', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.ydb.v1.ListDatabasesResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1101,
serialized_end=1199,
)
_CREATEDATABASEREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1981,
serialized_end=2026,
)
_CREATEDATABASEREQUEST = _descriptor.Descriptor(
name='CreateDatabaseRequest',
full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.folder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource_preset_id', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.resource_preset_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='storage_config', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.storage_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scale_policy', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.scale_policy', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network_id', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.network_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subnet_ids', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.subnet_ids', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='zonal_database', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.zonal_database', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='regional_database', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.regional_database', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dedicated_database', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.dedicated_database', index=10,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='serverless_database', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.serverless_database', index=11,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assign_public_ips', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.assign_public_ips', index=12,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location_id', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.location_id', index=13,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.labels', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_config', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.backup_config', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='monitoring_config', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.monitoring_config', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CREATEDATABASEREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='database_type', full_name='yandex.cloud.ydb.v1.CreateDatabaseRequest.database_type',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1202,
serialized_end=2043,
)
_CREATEDATABASEMETADATA = _descriptor.Descriptor(
name='CreateDatabaseMetadata',
full_name='yandex.cloud.ydb.v1.CreateDatabaseMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.CreateDatabaseMetadata.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_name', full_name='yandex.cloud.ydb.v1.CreateDatabaseMetadata.database_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2045,
serialized_end=2113,
)
_UPDATEDATABASEREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1981,
serialized_end=2026,
)
_UPDATEDATABASEREQUEST = _descriptor.Descriptor(
name='UpdateDatabaseRequest',
full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.folder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_mask', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.update_mask', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.database_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.description', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resource_preset_id', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.resource_preset_id', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='storage_config', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.storage_config', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scale_policy', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.scale_policy', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network_id', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.network_id', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subnet_ids', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.subnet_ids', index=9,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='zonal_database', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.zonal_database', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='regional_database', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.regional_database', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dedicated_database', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.dedicated_database', index=12,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='serverless_database', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.serverless_database', index=13,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assign_public_ips', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.assign_public_ips', index=14,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location_id', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.location_id', index=15,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.labels', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_config', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.backup_config', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='monitoring_config', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.monitoring_config', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_UPDATEDATABASEREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='database_type', full_name='yandex.cloud.ydb.v1.UpdateDatabaseRequest.database_type',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2116,
serialized_end=3027,
)
_UPDATEDATABASEMETADATA = _descriptor.Descriptor(
name='UpdateDatabaseMetadata',
full_name='yandex.cloud.ydb.v1.UpdateDatabaseMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.UpdateDatabaseMetadata.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_name', full_name='yandex.cloud.ydb.v1.UpdateDatabaseMetadata.database_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3029,
serialized_end=3097,
)
_DELETEDATABASEREQUEST = _descriptor.Descriptor(
name='DeleteDatabaseRequest',
full_name='yandex.cloud.ydb.v1.DeleteDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.DeleteDatabaseRequest.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3099,
serialized_end=3143,
)
_DELETEDATABASEMETADATA = _descriptor.Descriptor(
name='DeleteDatabaseMetadata',
full_name='yandex.cloud.ydb.v1.DeleteDatabaseMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='database_id', full_name='yandex.cloud.ydb.v1.DeleteDatabaseMetadata.database_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_name', full_name='yandex.cloud.ydb.v1.DeleteDatabaseMetadata.database_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3145,
serialized_end=3213,
)
_BACKUPDATABASEREQUEST.fields_by_name['backup_settings'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_backup__pb2._BACKUPSETTINGS
_LISTDATABASESRESPONSE.fields_by_name['databases'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._DATABASE
_CREATEDATABASEREQUEST_LABELSENTRY.containing_type = _CREATEDATABASEREQUEST
_CREATEDATABASEREQUEST.fields_by_name['storage_config'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._STORAGECONFIG
_CREATEDATABASEREQUEST.fields_by_name['scale_policy'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._SCALEPOLICY
_CREATEDATABASEREQUEST.fields_by_name['zonal_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._ZONALDATABASE
_CREATEDATABASEREQUEST.fields_by_name['regional_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._REGIONALDATABASE
_CREATEDATABASEREQUEST.fields_by_name['dedicated_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._DEDICATEDDATABASE
_CREATEDATABASEREQUEST.fields_by_name['serverless_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._SERVERLESSDATABASE
_CREATEDATABASEREQUEST.fields_by_name['labels'].message_type = _CREATEDATABASEREQUEST_LABELSENTRY
_CREATEDATABASEREQUEST.fields_by_name['backup_config'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_backup__pb2._BACKUPCONFIG
_CREATEDATABASEREQUEST.fields_by_name['monitoring_config'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._MONITORINGCONFIG
_CREATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_CREATEDATABASEREQUEST.fields_by_name['zonal_database'])
_CREATEDATABASEREQUEST.fields_by_name['zonal_database'].containing_oneof = _CREATEDATABASEREQUEST.oneofs_by_name['database_type']
_CREATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_CREATEDATABASEREQUEST.fields_by_name['regional_database'])
_CREATEDATABASEREQUEST.fields_by_name['regional_database'].containing_oneof = _CREATEDATABASEREQUEST.oneofs_by_name['database_type']
_CREATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_CREATEDATABASEREQUEST.fields_by_name['dedicated_database'])
_CREATEDATABASEREQUEST.fields_by_name['dedicated_database'].containing_oneof = _CREATEDATABASEREQUEST.oneofs_by_name['database_type']
_CREATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_CREATEDATABASEREQUEST.fields_by_name['serverless_database'])
_CREATEDATABASEREQUEST.fields_by_name['serverless_database'].containing_oneof = _CREATEDATABASEREQUEST.oneofs_by_name['database_type']
_UPDATEDATABASEREQUEST_LABELSENTRY.containing_type = _UPDATEDATABASEREQUEST
_UPDATEDATABASEREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATEDATABASEREQUEST.fields_by_name['storage_config'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._STORAGECONFIG
_UPDATEDATABASEREQUEST.fields_by_name['scale_policy'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._SCALEPOLICY
_UPDATEDATABASEREQUEST.fields_by_name['zonal_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._ZONALDATABASE
_UPDATEDATABASEREQUEST.fields_by_name['regional_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._REGIONALDATABASE
_UPDATEDATABASEREQUEST.fields_by_name['dedicated_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._DEDICATEDDATABASE
_UPDATEDATABASEREQUEST.fields_by_name['serverless_database'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._SERVERLESSDATABASE
_UPDATEDATABASEREQUEST.fields_by_name['labels'].message_type = _UPDATEDATABASEREQUEST_LABELSENTRY
_UPDATEDATABASEREQUEST.fields_by_name['backup_config'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_backup__pb2._BACKUPCONFIG
_UPDATEDATABASEREQUEST.fields_by_name['monitoring_config'].message_type = yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._MONITORINGCONFIG
_UPDATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_UPDATEDATABASEREQUEST.fields_by_name['zonal_database'])
_UPDATEDATABASEREQUEST.fields_by_name['zonal_database'].containing_oneof = _UPDATEDATABASEREQUEST.oneofs_by_name['database_type']
_UPDATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_UPDATEDATABASEREQUEST.fields_by_name['regional_database'])
_UPDATEDATABASEREQUEST.fields_by_name['regional_database'].containing_oneof = _UPDATEDATABASEREQUEST.oneofs_by_name['database_type']
_UPDATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_UPDATEDATABASEREQUEST.fields_by_name['dedicated_database'])
_UPDATEDATABASEREQUEST.fields_by_name['dedicated_database'].containing_oneof = _UPDATEDATABASEREQUEST.oneofs_by_name['database_type']
_UPDATEDATABASEREQUEST.oneofs_by_name['database_type'].fields.append(
_UPDATEDATABASEREQUEST.fields_by_name['serverless_database'])
_UPDATEDATABASEREQUEST.fields_by_name['serverless_database'].containing_oneof = _UPDATEDATABASEREQUEST.oneofs_by_name['database_type']
DESCRIPTOR.message_types_by_name['RestoreBackupRequest'] = _RESTOREBACKUPREQUEST
DESCRIPTOR.message_types_by_name['RestoreBackupMetadata'] = _RESTOREBACKUPMETADATA
DESCRIPTOR.message_types_by_name['BackupDatabaseRequest'] = _BACKUPDATABASEREQUEST
DESCRIPTOR.message_types_by_name['BackupDatabaseMetadata'] = _BACKUPDATABASEMETADATA
DESCRIPTOR.message_types_by_name['StartDatabaseRequest'] = _STARTDATABASEREQUEST
DESCRIPTOR.message_types_by_name['StartDatabaseMetadata'] = _STARTDATABASEMETADATA
DESCRIPTOR.message_types_by_name['StopDatabaseRequest'] = _STOPDATABASEREQUEST
DESCRIPTOR.message_types_by_name['StopDatabaseMetadata'] = _STOPDATABASEMETADATA
DESCRIPTOR.message_types_by_name['GetDatabaseRequest'] = _GETDATABASEREQUEST
DESCRIPTOR.message_types_by_name['ListDatabasesRequest'] = _LISTDATABASESREQUEST
DESCRIPTOR.message_types_by_name['ListDatabasesResponse'] = _LISTDATABASESRESPONSE
DESCRIPTOR.message_types_by_name['CreateDatabaseRequest'] = _CREATEDATABASEREQUEST
DESCRIPTOR.message_types_by_name['CreateDatabaseMetadata'] = _CREATEDATABASEMETADATA
DESCRIPTOR.message_types_by_name['UpdateDatabaseRequest'] = _UPDATEDATABASEREQUEST
DESCRIPTOR.message_types_by_name['UpdateDatabaseMetadata'] = _UPDATEDATABASEMETADATA
DESCRIPTOR.message_types_by_name['DeleteDatabaseRequest'] = _DELETEDATABASEREQUEST
DESCRIPTOR.message_types_by_name['DeleteDatabaseMetadata'] = _DELETEDATABASEMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RestoreBackupRequest = _reflection.GeneratedProtocolMessageType('RestoreBackupRequest', (_message.Message,), {
'DESCRIPTOR' : _RESTOREBACKUPREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.RestoreBackupRequest)
})
_sym_db.RegisterMessage(RestoreBackupRequest)
RestoreBackupMetadata = _reflection.GeneratedProtocolMessageType('RestoreBackupMetadata', (_message.Message,), {
'DESCRIPTOR' : _RESTOREBACKUPMETADATA,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.RestoreBackupMetadata)
})
_sym_db.RegisterMessage(RestoreBackupMetadata)
BackupDatabaseRequest = _reflection.GeneratedProtocolMessageType('BackupDatabaseRequest', (_message.Message,), {
'DESCRIPTOR' : _BACKUPDATABASEREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.BackupDatabaseRequest)
})
_sym_db.RegisterMessage(BackupDatabaseRequest)
BackupDatabaseMetadata = _reflection.GeneratedProtocolMessageType('BackupDatabaseMetadata', (_message.Message,), {
'DESCRIPTOR' : _BACKUPDATABASEMETADATA,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.BackupDatabaseMetadata)
})
_sym_db.RegisterMessage(BackupDatabaseMetadata)
StartDatabaseRequest = _reflection.GeneratedProtocolMessageType('StartDatabaseRequest', (_message.Message,), {
'DESCRIPTOR' : _STARTDATABASEREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.StartDatabaseRequest)
})
_sym_db.RegisterMessage(StartDatabaseRequest)
StartDatabaseMetadata = _reflection.GeneratedProtocolMessageType('StartDatabaseMetadata', (_message.Message,), {
'DESCRIPTOR' : _STARTDATABASEMETADATA,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.StartDatabaseMetadata)
})
_sym_db.RegisterMessage(StartDatabaseMetadata)
StopDatabaseRequest = _reflection.GeneratedProtocolMessageType('StopDatabaseRequest', (_message.Message,), {
'DESCRIPTOR' : _STOPDATABASEREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.StopDatabaseRequest)
})
_sym_db.RegisterMessage(StopDatabaseRequest)
StopDatabaseMetadata = _reflection.GeneratedProtocolMessageType('StopDatabaseMetadata', (_message.Message,), {
'DESCRIPTOR' : _STOPDATABASEMETADATA,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.StopDatabaseMetadata)
})
_sym_db.RegisterMessage(StopDatabaseMetadata)
GetDatabaseRequest = _reflection.GeneratedProtocolMessageType('GetDatabaseRequest', (_message.Message,), {
'DESCRIPTOR' : _GETDATABASEREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.GetDatabaseRequest)
})
_sym_db.RegisterMessage(GetDatabaseRequest)
ListDatabasesRequest = _reflection.GeneratedProtocolMessageType('ListDatabasesRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTDATABASESREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.ListDatabasesRequest)
})
_sym_db.RegisterMessage(ListDatabasesRequest)
ListDatabasesResponse = _reflection.GeneratedProtocolMessageType('ListDatabasesResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTDATABASESRESPONSE,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.ListDatabasesResponse)
})
_sym_db.RegisterMessage(ListDatabasesResponse)
CreateDatabaseRequest = _reflection.GeneratedProtocolMessageType('CreateDatabaseRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _CREATEDATABASEREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.CreateDatabaseRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _CREATEDATABASEREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.CreateDatabaseRequest)
})
_sym_db.RegisterMessage(CreateDatabaseRequest)
_sym_db.RegisterMessage(CreateDatabaseRequest.LabelsEntry)
CreateDatabaseMetadata = _reflection.GeneratedProtocolMessageType('CreateDatabaseMetadata', (_message.Message,), {
'DESCRIPTOR' : _CREATEDATABASEMETADATA,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.CreateDatabaseMetadata)
})
_sym_db.RegisterMessage(CreateDatabaseMetadata)
UpdateDatabaseRequest = _reflection.GeneratedProtocolMessageType('UpdateDatabaseRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _UPDATEDATABASEREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.UpdateDatabaseRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _UPDATEDATABASEREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.UpdateDatabaseRequest)
})
_sym_db.RegisterMessage(UpdateDatabaseRequest)
_sym_db.RegisterMessage(UpdateDatabaseRequest.LabelsEntry)
UpdateDatabaseMetadata = _reflection.GeneratedProtocolMessageType('UpdateDatabaseMetadata', (_message.Message,), {
'DESCRIPTOR' : _UPDATEDATABASEMETADATA,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.UpdateDatabaseMetadata)
})
_sym_db.RegisterMessage(UpdateDatabaseMetadata)
DeleteDatabaseRequest = _reflection.GeneratedProtocolMessageType('DeleteDatabaseRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEDATABASEREQUEST,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.DeleteDatabaseRequest)
})
_sym_db.RegisterMessage(DeleteDatabaseRequest)
DeleteDatabaseMetadata = _reflection.GeneratedProtocolMessageType('DeleteDatabaseMetadata', (_message.Message,), {
'DESCRIPTOR' : _DELETEDATABASEMETADATA,
'__module__' : 'yandex.cloud.ydb.v1.database_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ydb.v1.DeleteDatabaseMetadata)
})
_sym_db.RegisterMessage(DeleteDatabaseMetadata)
DESCRIPTOR._options = None
_RESTOREBACKUPREQUEST.fields_by_name['backup_id']._options = None
_RESTOREBACKUPREQUEST.fields_by_name['database_id']._options = None
_STARTDATABASEREQUEST.fields_by_name['database_id']._options = None
_STOPDATABASEREQUEST.fields_by_name['database_id']._options = None
_GETDATABASEREQUEST.fields_by_name['database_id']._options = None
_LISTDATABASESREQUEST.fields_by_name['page_size']._options = None
_LISTDATABASESREQUEST.fields_by_name['page_token']._options = None
_CREATEDATABASEREQUEST_LABELSENTRY._options = None
_UPDATEDATABASEREQUEST_LABELSENTRY._options = None
_DATABASESERVICE = _descriptor.ServiceDescriptor(
name='DatabaseService',
full_name='yandex.cloud.ydb.v1.DatabaseService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3216,
serialized_end=4657,
methods=[
_descriptor.MethodDescriptor(
name='Get',
full_name='yandex.cloud.ydb.v1.DatabaseService.Get',
index=0,
containing_service=None,
input_type=_GETDATABASEREQUEST,
output_type=yandex_dot_cloud_dot_ydb_dot_v1_dot_database__pb2._DATABASE,
serialized_options=b'\202\323\344\223\002!\022\037/ydb/v1/databases/{database_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='yandex.cloud.ydb.v1.DatabaseService.List',
index=1,
containing_service=None,
input_type=_LISTDATABASESREQUEST,
output_type=_LISTDATABASESRESPONSE,
serialized_options=b'\202\323\344\223\002\023\022\021/ydb/v1/databases',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='yandex.cloud.ydb.v1.DatabaseService.Create',
index=2,
containing_service=None,
input_type=_CREATEDATABASEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\026\"\021/ydb/v1/databases:\001*\262\322*\"\n\026CreateDatabaseMetadata\022\010Database',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='yandex.cloud.ydb.v1.DatabaseService.Update',
index=3,
containing_service=None,
input_type=_UPDATEDATABASEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002$2\037/ydb/v1/databases/{database_id}:\001*\262\322*\"\n\026UpdateDatabaseMetadata\022\010Database',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Start',
full_name='yandex.cloud.ydb.v1.DatabaseService.Start',
index=4,
containing_service=None,
input_type=_STARTDATABASEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\'\"%/ydb/v1/databases/{database_id}:start\262\322*!\n\025StartDatabaseMetadata\022\010Database',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Stop',
full_name='yandex.cloud.ydb.v1.DatabaseService.Stop',
index=5,
containing_service=None,
input_type=_STOPDATABASEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002&\"$/ydb/v1/databases/{database_id}:stop\262\322* \n\024StopDatabaseMetadata\022\010Database',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='yandex.cloud.ydb.v1.DatabaseService.Delete',
index=6,
containing_service=None,
input_type=_DELETEDATABASEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002!*\037/ydb/v1/databases/{database_id}\262\322*/\n\026DeleteDatabaseMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Restore',
full_name='yandex.cloud.ydb.v1.DatabaseService.Restore',
index=7,
containing_service=None,
input_type=_RESTOREBACKUPREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\036\"\031/ydb/v1/databases:restore:\001*\262\322*!\n\025RestoreBackupMetadata\022\010Database',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Backup',
full_name='yandex.cloud.ydb.v1.DatabaseService.Backup',
index=8,
containing_service=None,
input_type=_BACKUPDATABASEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\035\"\030/ydb/v1/databases:backup:\001*\262\322*\"\n\026BackupDatabaseMetadata\022\010Database',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_DATABASESERVICE)
DESCRIPTOR.services_by_name['DatabaseService'] = _DATABASESERVICE
# @@protoc_insertion_point(module_scope)
| 51.705303
| 7,309
| 0.778362
|
e2f8befc7c97b67b8bd75b5ec6fe033ab029664c
| 2,265
|
py
|
Python
|
veneer/navigate.py
|
flowmatters/veneer-py
|
af551b49038f5f93358b510fb893015c590bf6d4
|
[
"0BSD"
] | 7
|
2016-11-14T13:06:40.000Z
|
2020-10-13T06:13:51.000Z
|
veneer/navigate.py
|
flowmatters/veneer-py
|
af551b49038f5f93358b510fb893015c590bf6d4
|
[
"0BSD"
] | 3
|
2016-11-06T10:22:07.000Z
|
2019-05-09T09:55:14.000Z
|
veneer/navigate.py
|
flowmatters/veneer-py
|
af551b49038f5f93358b510fb893015c590bf6d4
|
[
"0BSD"
] | 4
|
2016-11-02T00:46:32.000Z
|
2020-07-30T03:24:35.000Z
|
'''
Prototype functionality for interacting with the Source model directly, including tab-completion in IPython/Jupyter. Eg
v = veneer.Veneer()
scenario = Queryable(v)
scenario.Name = 'New Scenario Name'
'''
class Queryable(object):
def __init__(self,v,path='scenario',namespace=None):
self._v = v
self._path = path
self._init = False
self._ns = namespace
def _eval_(self):
return self._v.model.get(self._path,namespace=self._ns)
def _child_(self,path):
val = Queryable(self._v,'%s.%s'%(self._path,path),namespace=self._ns)
return val
def _double_quote_(self,maybe_string):
v = maybe_string
if not isinstance(v,str):
return v
if not "'" in v:
return "'%s'"%v
if not '"' in v:
return '"%s"'%v
v = v.replace('"','\\"')
return '"%s"'%v
def _child_idx_(self,ix):
return Queryable(self._v,'%s[%s]'%(self._path,str(ix)),namespace=self._ns)
def _initialise_children_(self,entries):
if self._init: return
self._init = True
for r in entries:
if r[:2]=='__': continue
super(Queryable,self).__setattr__(r,self._child_(r))
def _run_script(self,script):
return self._v.model._safe_run('%s\n%s'%(self._v.model._init_script(self._ns),script))
def __call__(self,*args,**kwargs):
return self._v.model.call(self._path+str(tuple(args)))
def __repr__(self):
return str(self._eval_())
def __dir__(self):
res = [e['Value'] for e in self._run_script('dir(%s)'%(self._path))['Response']['Value']]
self._initialise_children_(res)
return res
def __getattr__(self,attrname):
return self._child_(attrname)
def __getitem__(self,ix):
return self._child_idx_(ix)
def __setattr__(self,a,v):
if a.startswith('_'):
return super(Queryable,self).__setattr__(a,v)
v = self._double_quote_(v)
if not self._v.model.set('%s.%s'%(self._path,a),v):
raise Exception("Couldn't set property")
def __int__(self):
return int(self._eval_())
def __float__(self):
return float(self._eval_())
| 28.3125
| 119
| 0.595585
|
3413e5c4c6600514b96261dc9902b8527c09f68c
| 600
|
py
|
Python
|
awards/urls.py
|
NIelsen-Mudaki/Awwards
|
6814a6e3c1cf0d0b68dff21b8e17171bf2bd91d4
|
[
"Unlicense"
] | null | null | null |
awards/urls.py
|
NIelsen-Mudaki/Awwards
|
6814a6e3c1cf0d0b68dff21b8e17171bf2bd91d4
|
[
"Unlicense"
] | null | null | null |
awards/urls.py
|
NIelsen-Mudaki/Awwards
|
6814a6e3c1cf0d0b68dff21b8e17171bf2bd91d4
|
[
"Unlicense"
] | null | null | null |
from django.conf.urls import url,include
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns=[
url(r'^$',views.welcome,name = 'welcome'),
url(r'accounts/', include('django.contrib.auth.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^accounts/profile/',views.profile, name='profile'),
url(r'^post/', views.post_form, name='post'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 40
| 81
| 0.703333
|
ec2d709688a8e3e049722b2e401e36c0f7b0b0b4
| 40,733
|
py
|
Python
|
scout/server/blueprints/cases/controllers.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
scout/server/blueprints/cases/controllers.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
scout/server/blueprints/cases/controllers.py
|
bjhall/scout
|
ea772cf8d233223e0ec5271f61b95d3afcf719ad
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
import itertools
import logging
import os
import query_phenomizer
import requests
from bs4 import BeautifulSoup
from flask import current_app, url_for
from flask_login import current_user
from flask_mail import Message
from xlsxwriter import Workbook
from scout.constants import (CANCER_PHENOTYPE_MAP, CASE_STATUSES,
MT_EXPORT_HEADER, PHENOTYPE_GROUPS, PHENOTYPE_MAP,
SEX_MAP, VERBS_MAP)
from scout.constants.variant_tags import (
CANCER_SPECIFIC_VARIANT_DISMISS_OPTIONS, CANCER_TIER_OPTIONS,
DISMISS_VARIANT_OPTIONS, GENETIC_MODELS, MANUAL_RANK_OPTIONS)
from scout.export.variant import export_mt_variants
from scout.parse.clinvar import (clinvar_submission_header,
clinvar_submission_lines)
from scout.parse.matchmaker import (genomic_features, hpo_terms, omim_terms,
parse_matches)
from scout.server.blueprints.genes.controllers import gene
from scout.server.blueprints.variant.controllers import \
variant as variant_decorator
from scout.server.blueprints.variant.utils import predictions
from scout.server.utils import institute_and_case, user_institutes
from scout.utils.matchmaker import matchmaker_request
LOG = logging.getLogger(__name__)
STATUS_MAP = {"solved": "bg-success", "archived": "bg-warning"}
TRACKS = {"rare": "Rare Disease", "cancer": "Cancer"}
def cases(store, case_query, prioritized_cases_query=None, limit=100):
"""Preprocess case objects.
Add the necessary information to display the 'cases' view
Args:
store(adapter.MongoAdapter)
case_query(pymongo.Cursor)
prioritized_cases_query(pymongo.Cursor)
limit(int): Maximum number of cases to display
Returns:
data(dict): includes the cases, how many there are and the limit.
"""
case_groups = {status: [] for status in CASE_STATUSES}
nr_cases = 0
# local function to add info to case obj
def populate_case_obj(case_obj):
analysis_types = set(ind["analysis_type"] for ind in case_obj["individuals"])
LOG.debug(
"Analysis types found in %s: %s", case_obj["_id"], ",".join(analysis_types)
)
if len(analysis_types) > 1:
LOG.debug("Set analysis types to {'mixed'}")
analysis_types = set(["mixed"])
case_obj["analysis_types"] = list(analysis_types)
case_obj["assignees"] = [
store.user(user_email) for user_email in case_obj.get("assignees", [])
]
case_obj["is_rerun"] = len(case_obj.get("analyses", [])) > 0
case_obj["clinvar_variants"] = store.case_to_clinVars(case_obj["_id"])
case_obj["display_track"] = TRACKS[case_obj.get("track", "rare")]
return case_obj
for nr_cases, case_obj in enumerate(case_query.limit(limit), 1):
case_obj = populate_case_obj(case_obj)
case_groups[case_obj["status"]].append(case_obj)
if prioritized_cases_query:
extra_prioritized = 0
for case_obj in prioritized_cases_query:
if any(
group_obj.get("display_name") == case_obj.get("display_name")
for group_obj in case_groups[case_obj["status"]]
):
continue
else:
extra_prioritized += 1
case_obj = populate_case_obj(case_obj)
case_groups[case_obj["status"]].append(case_obj)
# extra prioritized cases are potentially shown in addition to the case query limit
nr_cases += extra_prioritized
data = {
"cases": [(status, case_groups[status]) for status in CASE_STATUSES],
"found_cases": nr_cases,
"limit": limit,
}
return data
def case(store, institute_obj, case_obj):
"""Preprocess a single case.
Prepare the case to be displayed in the case view.
Args:
store(adapter.MongoAdapter)
institute_obj(models.Institute)
case_obj(models.Case)
Returns:
data(dict): includes the cases, how many there are and the limit.
"""
# Convert individual information to more readable format
case_obj["individual_ids"] = []
for individual in case_obj["individuals"]:
try:
sex = int(individual.get("sex", 0))
except ValueError as err:
sex = 0
individual["sex_human"] = SEX_MAP[sex]
pheno_map = PHENOTYPE_MAP
if case_obj.get("track", "rare") == "cancer":
pheno_map = CANCER_PHENOTYPE_MAP
individual["phenotype_human"] = pheno_map.get(individual["phenotype"])
case_obj["individual_ids"].append(individual["individual_id"])
case_obj["assignees"] = [
store.user(user_email) for user_email in case_obj.get("assignees", [])
]
# Fetch the variant objects for suspects and causatives
suspects = [
store.variant(variant_id) or variant_id
for variant_id in case_obj.get("suspects", [])
]
causatives = [
store.variant(variant_id) or variant_id
for variant_id in case_obj.get("causatives", [])
]
# check for partial causatives and associated phenotypes
partial_causatives = []
if case_obj.get("partial_causatives"):
for var_id, values in case_obj["partial_causatives"].items():
causative_obj = {
"variant": store.variant(var_id) or var_id,
"omim_terms": values.get("diagnosis_phenotypes"),
"hpo_terms": values.get("phenotype_terms"),
}
partial_causatives.append(causative_obj)
# Set of all unique genes in the default gene panels
distinct_genes = set()
case_obj["panel_names"] = []
for panel_info in case_obj.get("panels", []):
if not panel_info.get("is_default"):
continue
panel_name = panel_info["panel_name"]
panel_version = panel_info.get("version")
panel_obj = store.gene_panel(panel_name, version=panel_version)
if not panel_obj:
LOG.warning(
"Could not fetch gene panel %s, version %s", panel_name, panel_version
)
LOG.info("Try to fetch latest existing version")
panel_obj = store.gene_panel(panel_name)
if not panel_obj:
LOG.warning("Could not find any version of gene panel %s", panel_name)
continue
LOG.info("Using panel %s, version %s", panel_name, panel_obj["version"])
distinct_genes.update([gene["hgnc_id"] for gene in panel_obj.get("genes", [])])
full_name = "{} ({})".format(panel_obj["display_name"], panel_obj["version"])
case_obj["panel_names"].append(full_name)
case_obj["default_genes"] = list(distinct_genes)
for hpo_term in itertools.chain(
case_obj.get("phenotype_groups", []), case_obj.get("phenotype_terms", [])
):
hpo_term["hpo_link"] = "http://hpo.jax.org/app/browse/term/{}".format(
hpo_term["phenotype_id"]
)
rank_model_link_prefix = current_app.config.get("RANK_MODEL_LINK_PREFIX", "")
if case_obj.get("rank_model_version"):
rank_model_link_postfix = current_app.config.get("RANK_MODEL_LINK_POSTFIX", "")
rank_model_link = "".join(
[
rank_model_link_prefix,
str(case_obj["rank_model_version"]),
rank_model_link_postfix,
]
)
print(rank_model_link)
case_obj["rank_model_link"] = rank_model_link
sv_rank_model_link_prefix = current_app.config.get("SV_RANK_MODEL_LINK_PREFIX", "")
if case_obj.get("sv_rank_model_version"):
sv_rank_model_link_postfix = current_app.config.get(
"SV_RANK_MODEL_LINK_POSTFIX", ""
)
case_obj["sv_rank_model_link"] = "".join(
[
sv_rank_model_link_prefix,
str(case_obj["sv_rank_model_version"]),
sv_rank_model_link_postfix,
]
)
# other collaborators than the owner of the case
o_collaborators = []
for collab_id in case_obj.get("collaborators", []):
if collab_id != case_obj["owner"] and store.institute(collab_id):
o_collaborators.append(store.institute(collab_id))
case_obj["o_collaborators"] = [
(collab_obj["_id"], collab_obj["display_name"])
for collab_obj in o_collaborators
]
collab_ids = None
if institute_obj.get("collaborators"):
collab_ids = [
(collab["_id"], collab["display_name"])
for collab in store.institutes()
if institute_obj.get("collaborators")
and collab["_id"] in institute_obj.get("collaborators")
]
events = list(store.events(institute_obj, case=case_obj))
for event in events:
event["verb"] = VERBS_MAP[event["verb"]]
case_obj["clinvar_variants"] = store.case_to_clinVars(case_obj["_id"])
# if updated_at is a list, set it to the last update datetime
if case_obj.get("updated_at") and isinstance(case_obj["updated_at"], list):
case_obj["updated_at"] = max(case_obj["updated_at"])
# Phenotype groups can be specific for an institute, there are some default groups
pheno_groups = institute_obj.get("phenotype_groups") or PHENOTYPE_GROUPS
data = {
"status_class": STATUS_MAP.get(case_obj["status"]),
"other_causatives": [var for var in store.check_causatives(case_obj=case_obj)],
"comments": store.events(institute_obj, case=case_obj, comments=True),
"hpo_groups": pheno_groups,
"events": events,
"suspects": suspects,
"causatives": causatives,
"partial_causatives": partial_causatives,
"collaborators": collab_ids,
"cohort_tags": institute_obj.get("cohorts", []),
"manual_rank_options": MANUAL_RANK_OPTIONS,
"cancer_tier_options": CANCER_TIER_OPTIONS,
}
return data
def case_report_content(store, institute_obj, case_obj):
"""Gather contents to be visualized in a case report
Args:
store(adapter.MongoAdapter)
institute_obj(models.Institute)
case_obj(models.Case)
Returns:
data(dict)
"""
variant_types = {
"causatives_detailed": "causatives",
"partial_causatives_detailed": "partial_causatives",
"suspects_detailed": "suspects",
"classified_detailed": "acmg_classification",
"tagged_detailed": "manual_rank",
"tier_detailed": "cancer_tier",
"dismissed_detailed": "dismiss_variant",
"commented_detailed": "is_commented",
}
data = case_obj
for individual in data["individuals"]:
try:
sex = int(individual.get("sex", 0))
except ValueError as err:
sex = 0
individual["sex_human"] = SEX_MAP[sex]
individual["phenotype_human"] = PHENOTYPE_MAP.get(individual["phenotype"])
dismiss_options = DISMISS_VARIANT_OPTIONS
if case_obj.get("track") == "cancer":
dismiss_options = {
**DISMISS_VARIANT_OPTIONS,
**CANCER_SPECIFIC_VARIANT_DISMISS_OPTIONS,
}
# Add the case comments
data["comments"] = store.events(institute_obj, case=case_obj, comments=True)
data["manual_rank_options"] = MANUAL_RANK_OPTIONS
data["cancer_tier_options"] = CANCER_TIER_OPTIONS
data["dismissed_options"] = dismiss_options
data["genetic_models"] = dict(GENETIC_MODELS)
data["report_created_at"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
evaluated_variants = {vt: [] for vt in variant_types}
# We collect all causatives (including the partial ones) and suspected variants
# These are handeled in separate since they are on case level
for var_type in ["causatives", "suspects", "partial_causatives"]:
# These include references to variants
vt = "_".join([var_type, "detailed"])
for var_id in case_obj.get(var_type, []):
variant_obj = store.variant(var_id)
if not variant_obj:
continue
if var_type == "partial_causatives": # Collect associated phenotypes
variant_obj["phenotypes"] = [
value
for key, value in case_obj["partial_causatives"].items()
if key == var_id
][0]
evaluated_variants[vt].append(variant_obj)
## get variants for this case that are either classified, commented, tagged or dismissed.
for var_obj in store.evaluated_variants(case_id=case_obj["_id"]):
# Check which category it belongs to
for vt in variant_types:
keyword = variant_types[vt]
# When found we add it to the categpry
# Eac variant can belong to multiple categories
if keyword not in var_obj:
continue
evaluated_variants[vt].append(var_obj)
for var_type in evaluated_variants:
decorated_variants = []
for var_obj in evaluated_variants[var_type]:
# We decorate the variant with some extra information
decorated_info = variant_decorator(
store=store,
institute_id=institute_obj["_id"],
case_name=case_obj["display_name"],
variant_id=None,
variant_obj=var_obj,
add_case=False,
add_other=False,
get_overlapping=False,
add_compounds=False,
variant_type=var_obj["category"],
institute_obj=institute_obj,
case_obj=case_obj,
)
decorated_variants.append(decorated_info["variant"])
# Add the decorated variants to the case
data[var_type] = decorated_variants
return data
def coverage_report_contents(store, institute_obj, case_obj, base_url):
"""Posts a request to chanjo-report and capture the body of the returned response to include it in case report
Args:
store(adapter.MongoAdapter)
institute_obj(models.Institute)
case_obj(models.Case)
base_url(str): base url of server
Returns:
coverage_data(str): string rendering of the content between <body </body> tags of a coverage report
"""
request_data = {}
# extract sample ids from case_obj and add them to the post request object:
request_data["sample_id"] = [
ind["individual_id"] for ind in case_obj["individuals"]
]
# extract default panel names and default genes from case_obj and add them to the post request object
distinct_genes = set()
panel_names = []
for panel_info in case_obj.get("panels", []):
if panel_info.get("is_default") is False:
continue
panel_obj = store.gene_panel(
panel_info["panel_name"], version=panel_info.get("version")
)
distinct_genes.update([gene["hgnc_id"] for gene in panel_obj.get("genes", [])])
full_name = "{} ({})".format(panel_obj["display_name"], panel_obj["version"])
panel_names.append(full_name)
panel_names = " ,".join(panel_names)
request_data["gene_ids"] = ",".join(
[str(gene_id) for gene_id in list(distinct_genes)]
)
request_data["panel_name"] = panel_names
request_data["request_sent"] = datetime.datetime.now()
# add institute-specific cutoff level to the post request object
request_data["level"] = institute_obj.get("coverage_cutoff", 15)
# send get request to chanjo report
# disable default certificate verification
resp = requests.post(base_url + "reports/report", data=request_data, verify=False)
# read response content
soup = BeautifulSoup(resp.text)
# remove links in the printed version of coverage report
for tag in soup.find_all("a"):
tag.replaceWith("")
# extract body content using BeautifulSoup
coverage_data = "".join(["%s" % x for x in soup.body.contents])
return coverage_data
def clinvar_submissions(store, institute_id):
"""Get all Clinvar submissions for a user and an institute"""
submissions = list(store.clinvar_submissions(institute_id))
return submissions
def clinvar_header(submission_objs, csv_type):
""" Call clinvar parser to extract required fields to include in csv header from clinvar submission objects"""
clinvar_header_obj = clinvar_submission_header(submission_objs, csv_type)
return clinvar_header_obj
def clinvar_lines(clinvar_objects, clinvar_header):
""" Call clinvar parser to extract required lines to include in csv file from clinvar submission objects and header"""
clinvar_lines = clinvar_submission_lines(clinvar_objects, clinvar_header)
return clinvar_lines
def mt_excel_files(store, case_obj, temp_excel_dir):
"""Collect MT variants and format line of a MT variant report
to be exported in excel format
Args:
store(adapter.MongoAdapter)
case_obj(models.Case)
temp_excel_dir(os.Path): folder where the temp excel files are written to
Returns:
written_files(int): the number of files written to temp_excel_dir
"""
today = datetime.datetime.now().strftime("%Y-%m-%d")
samples = case_obj.get("individuals")
query = {"chrom": "MT"}
mt_variants = list(
store.variants(
case_id=case_obj["_id"], query=query, nr_of_variants=-1, sort_key="position"
)
)
written_files = 0
for sample in samples:
sample_id = sample["individual_id"]
display_name = sample["display_name"]
sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)
# set up document name
document_name = (
".".join([case_obj["display_name"], display_name, today]) + ".xlsx"
)
workbook = Workbook(os.path.join(temp_excel_dir, document_name))
Report_Sheet = workbook.add_worksheet()
# Write the column header
row = 0
for col, field in enumerate(MT_EXPORT_HEADER):
Report_Sheet.write(row, col, field)
# Write variant lines, after header (start at line 1)
for row, line in enumerate(
sample_lines, 1
): # each line becomes a row in the document
for col, field in enumerate(line): # each field in line becomes a cell
Report_Sheet.write(row, col, field)
workbook.close()
if os.path.exists(os.path.join(temp_excel_dir, document_name)):
written_files += 1
return written_files
def update_synopsis(store, institute_obj, case_obj, user_obj, new_synopsis):
"""Update synopsis."""
# create event only if synopsis was actually changed
if case_obj["synopsis"] != new_synopsis:
link = url_for(
"cases.case",
institute_id=institute_obj["_id"],
case_name=case_obj["display_name"],
)
store.update_synopsis(
institute_obj, case_obj, user_obj, link, content=new_synopsis
)
def update_individuals(store, institute_obj, case_obj, user_obj, ind, age, tissue):
"""Handle update of individual data (age and/or Tissue type) for a case"""
case_individuals = case_obj.get("individuals")
for subject in case_individuals:
if subject["individual_id"] == ind:
if age:
subject["age"] = round(float(age), 1)
else:
subject["age"] = None
if tissue:
subject["tissue_type"] = tissue
case_obj["individuals"] = case_individuals
# update case with new individual data
store.update_case(case_obj, keep_date=True)
# create an associated event
link = url_for(
"cases.case",
institute_id=institute_obj["_id"],
case_name=case_obj["display_name"],
)
store.create_event(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
category="case",
verb="update_individual",
subject=case_obj["display_name"],
)
def update_cancer_samples(
store, institute_obj, case_obj, user_obj, ind, tissue, tumor_type, tumor_purity
):
"""Handle update of sample data data (tissue, tumor_type, tumor_purity) for a cancer case"""
case_samples = case_obj.get("individuals")
for sample in case_samples:
if sample["individual_id"] == ind:
if tissue:
sample["tissue_type"] = tissue
if tumor_type:
sample["tumor_type"] = tumor_type
else:
sample["tumor_type"] = None
if tumor_purity:
sample["tumor_purity"] = float(tumor_purity)
else:
sample["tumor_purity"] = None
case_obj["individuals"] = case_samples
# update case with new sample data
store.update_case(case_obj, keep_date=True)
# create an associated event
link = url_for(
"cases.case",
institute_id=institute_obj["_id"],
case_name=case_obj["display_name"],
)
store.create_event(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
category="case",
verb="update_sample",
subject=case_obj["display_name"],
)
def hpo_diseases(username, password, hpo_ids, p_value_treshold=1):
"""Return the list of HGNC symbols that match annotated HPO terms.
Args:
username (str): username to use for phenomizer connection
password (str): password to use for phenomizer connection
Returns:
query_result: a generator of dictionaries on the form
{
'p_value': float,
'disease_source': str,
'disease_nr': int,
'gene_symbols': list(str),
'description': str,
'raw_line': str
}
"""
# skip querying Phenomizer unless at least one HPO terms exists
try:
results = query_phenomizer.query(username, password, *hpo_ids)
diseases = [
result for result in results if result["p_value"] <= p_value_treshold
]
return diseases
except SystemExit:
return None
def rerun(store, mail, current_user, institute_id, case_name, sender, recipient):
"""Request a rerun by email."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
store.request_rerun(institute_obj, case_obj, user_obj, link)
# this should send a JSON document to the SuSy API in the future
html = """
<p>{institute}: {case} ({case_id})</p>
<p>Re-run requested by: {name}</p>
""".format(
institute=institute_obj["display_name"],
case=case_obj["display_name"],
case_id=case_obj["_id"],
name=user_obj["name"].encode(),
)
# compose and send the email message
msg = Message(
subject=("SCOUT: request RERUN for {}".format(case_obj["display_name"])),
html=html,
sender=sender,
recipients=[recipient],
# cc the sender of the email for confirmation
cc=[user_obj["email"]],
)
if recipient:
mail.send(msg)
else:
LOG.error("Cannot send rerun message: no recipient defined in config.")
def update_default_panels(store, current_user, institute_id, case_name, panel_ids):
"""Update default panels for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
panel_objs = [store.panel(panel_id) for panel_id in panel_ids]
store.update_default_panels(institute_obj, case_obj, user_obj, link, panel_objs)
def update_clinical_filter_hpo(
store, current_user, institute_id, case_name, hpo_clinical_filter
):
"""Update HPO clinical filter use for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for("cases.case", institute_id=institute_id, case_name=case_name)
store.update_clinical_filter_hpo(
institute_obj, case_obj, user_obj, link, hpo_clinical_filter
)
def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
for individual in case_obj["individuals"]:
if individual["individual_id"] == individual_id:
individual_obj = individual
return (individual_obj["display_name"], individual_obj["vcf2cytosure"])
def gene_variants(store, variants_query, institute_id, page=1, per_page=50):
"""Pre-process list of variants."""
# We need to call variants_collection.count_documents here
variant_count = variants_query.count()
skip_count = per_page * max(page - 1, 0)
more_variants = True if variant_count > (skip_count + per_page) else False
variant_res = variants_query.skip(skip_count).limit(per_page)
my_institutes = set(inst["_id"] for inst in user_institutes(store, current_user))
variants = []
for variant_obj in variant_res:
# Populate variant case_display_name
variant_case_obj = store.case(case_id=variant_obj["case_id"])
if not variant_case_obj:
# A variant with missing case was encountered
continue
case_display_name = variant_case_obj.get("display_name")
variant_obj["case_display_name"] = case_display_name
# hide other institutes for now
other_institutes = set([variant_case_obj.get("owner")])
other_institutes.update(set(variant_case_obj.get("collaborators", [])))
if my_institutes.isdisjoint(other_institutes):
# If the user does not have access to the information we skip it
continue
genome_build = variant_case_obj.get("genome_build", "37")
if genome_build not in ["37", "38"]:
genome_build = "37"
# Update the HGNC symbols if they are not set
variant_genes = variant_obj.get("genes")
if variant_genes is not None:
for gene_obj in variant_genes:
# If there is no hgnc id there is nothin we can do
if not gene_obj["hgnc_id"]:
continue
# Else we collect the gene object and check the id
if (
gene_obj.get("hgnc_symbol") is None
or gene_obj.get("description") is None
):
hgnc_gene = store.hgnc_gene(gene_obj["hgnc_id"], build=genome_build)
if not hgnc_gene:
continue
gene_obj["hgnc_symbol"] = hgnc_gene["hgnc_symbol"]
gene_obj["description"] = hgnc_gene["description"]
# Populate variant HGVS and predictions
gene_ids = []
gene_symbols = []
hgvs_c = []
hgvs_p = []
variant_genes = variant_obj.get("genes")
if variant_genes is not None:
functional_annotation = ""
for gene_obj in variant_genes:
hgnc_id = gene_obj["hgnc_id"]
gene_symbol = gene(store, hgnc_id)["symbol"]
gene_ids.append(hgnc_id)
gene_symbols.append(gene_symbol)
hgvs_nucleotide = "-"
# gather HGVS info from gene transcripts
transcripts_list = gene_obj.get("transcripts")
for transcript_obj in transcripts_list:
if (
transcript_obj.get("is_canonical")
and transcript_obj.get("is_canonical") is True
):
hgvs_nucleotide = str(
transcript_obj.get("coding_sequence_name")
)
hgvs_protein = str(transcript_obj.get("protein_sequence_name"))
hgvs_c.append(hgvs_nucleotide)
hgvs_p.append(hgvs_protein)
if len(gene_symbols) == 1:
if hgvs_p[0] != "None":
hgvs = hgvs_p[0]
elif hgvs_c[0] != "None":
hgvs = hgvs_c[0]
else:
hgvs = "-"
variant_obj["hgvs"] = hgvs
# populate variant predictions for display
variant_obj.update(predictions(variant_genes))
variants.append(variant_obj)
return {"variants": variants, "more_variants": more_variants}
def multiqc(store, institute_id, case_name):
"""Find MultiQC report for the case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
return dict(institute=institute_obj, case=case_obj)
def get_sanger_unevaluated(store, institute_id, user_id):
"""Get all variants for an institute having Sanger validations ordered but still not evaluated
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
Returns:
unevaluated: a list that looks like this: [ {'case1': [varID_1, varID_2, .., varID_n]}, {'case2' : [varID_1, varID_2, .., varID_n]} ],
where the keys are case_ids and the values are lists of variants with Sanger ordered but not yet validated
"""
# Retrieve a list of ids for variants with Sanger ordered grouped by case from the 'event' collection
# This way is much faster than querying over all variants in all cases of an institute
sanger_ordered_by_case = store.sanger_ordered(institute_id, user_id)
unevaluated = []
# for each object where key==case and value==[variant_id with Sanger ordered]
for item in sanger_ordered_by_case:
case_id = item["_id"]
# Get the case to collect display name
case_obj = store.case(case_id=case_id)
if not case_obj: # the case might have been removed
continue
case_display_name = case_obj.get("display_name")
# List of variant document ids
varid_list = item["vars"]
unevaluated_by_case = {}
unevaluated_by_case[case_display_name] = []
for var_id in varid_list:
# For each variant with sanger validation ordered
variant_obj = store.variant(document_id=var_id, case_id=case_id)
# Double check that Sanger was ordered (and not canceled) for the variant
if (
variant_obj is None
or variant_obj.get("sanger_ordered") is None
or variant_obj.get("sanger_ordered") is False
):
continue
validation = variant_obj.get("validation", "not_evaluated")
# Check that the variant is not evaluated
if validation in ["True positive", "False positive"]:
continue
unevaluated_by_case[case_display_name].append(variant_obj["_id"])
# If for a case there is at least one Sanger validation to evaluate add the object to the unevaluated objects list
if len(unevaluated_by_case[case_display_name]) > 0:
unevaluated.append(unevaluated_by_case)
return unevaluated
def mme_add(
store,
user_obj,
case_obj,
add_gender,
add_features,
add_disorders,
genes_only,
mme_base_url,
mme_accepts,
mme_token,
):
"""Add a patient to MatchMaker server
Args:
store(adapter.MongoAdapter)
user_obj(dict) a scout user object (to be added as matchmaker contact)
case_obj(dict) a scout case object
add_gender(bool) if True case gender will be included in matchmaker
add_features(bool) if True HPO features will be included in matchmaker
add_disorders(bool) if True OMIM diagnoses will be included in matchmaker
genes_only(bool) if True only genes and not variants will be shared
mme_base_url(str) base url of the MME server
mme_accepts(str) request content accepted by MME server
mme_token(str) auth token of the MME server
Returns:
submitted_info(dict) info submitted to MatchMaker and its responses
"""
if not mme_base_url or not mme_accepts or not mme_token:
return "Please check that Matchmaker connection parameters are valid"
url = "".join([mme_base_url, "/patient/add"])
features = [] # this is the list of HPO terms
disorders = [] # this is the list of OMIM diagnoses
g_features = []
# create contact dictionary
contact_info = {
"name": user_obj["name"],
"href": "".join(["mailto:", user_obj["email"]]),
"institution": "Scout software user, Science For Life Laboratory, Stockholm, Sweden",
}
if add_features: # create features dictionaries
features = hpo_terms(case_obj)
if add_disorders: # create OMIM disorders dictionaries
disorders = omim_terms(case_obj)
# send a POST request and collect response for each affected individual in case
server_responses = []
submitted_info = {
"contact": contact_info,
"sex": add_gender,
"features": features,
"disorders": disorders,
"genes_only": genes_only,
"patient_id": [],
}
for individual in case_obj.get("individuals"):
if not individual["phenotype"] in [
2,
"affected",
]: # include only affected individuals
continue
patient = {
"contact": contact_info,
"id": ".".join(
[case_obj["_id"], individual.get("individual_id")]
), # This is a required field form MME
"label": ".".join(
[case_obj["display_name"], individual.get("display_name")]
),
"features": features,
"disorders": disorders,
}
if add_gender:
if individual["sex"] == "1":
patient["sex"] = "MALE"
else:
patient["sex"] = "FEMALE"
if case_obj.get("suspects"):
g_features = genomic_features(
store, case_obj, individual.get("display_name"), genes_only
)
patient["genomicFeatures"] = g_features
# send add request to server and capture response
resp = matchmaker_request(
url=url,
token=mme_token,
method="POST",
content_type=mme_accepts,
accept="application/json",
data={"patient": patient},
)
server_responses.append(
{
"patient": patient,
"message": resp.get("message"),
"status_code": resp.get("status_code"),
}
)
submitted_info["server_responses"] = server_responses
return submitted_info
def mme_delete(case_obj, mme_base_url, mme_token):
"""Delete all affected samples for a case from MatchMaker
Args:
case_obj(dict) a scout case object
mme_base_url(str) base url of the MME server
mme_token(str) auth token of the MME server
Returns:
server_responses(list): a list of object of this type:
{
'patient_id': patient_id
'message': server_message,
'status_code': server_status_code
}
"""
server_responses = []
if not mme_base_url or not mme_token:
return "Please check that Matchmaker connection parameters are valid"
# for each patient of the case in matchmaker
for patient in case_obj["mme_submission"]["patients"]:
# send delete request to server and capture server's response
patient_id = patient["id"]
url = "".join([mme_base_url, "/patient/delete/", patient_id])
resp = matchmaker_request(url=url, token=mme_token, method="DELETE")
server_responses.append(
{
"patient_id": patient_id,
"message": resp.get("message"),
"status_code": resp.get("status_code"),
}
)
return server_responses
def mme_matches(case_obj, institute_obj, mme_base_url, mme_token):
"""Show Matchmaker submission data for a sample and eventual matches.
Args:
case_obj(dict): a scout case object
institute_obj(dict): an institute object
mme_base_url(str) base url of the MME server
mme_token(str) auth token of the MME server
Returns:
data(dict): data to display in the html template
"""
data = {"institute": institute_obj, "case": case_obj, "server_errors": []}
matches = {}
# loop over the submitted samples and get matches from the MatchMaker server
if not case_obj.get("mme_submission"):
return None
for patient in case_obj["mme_submission"]["patients"]:
patient_id = patient["id"]
matches[patient_id] = None
url = "".join([mme_base_url, "/matches/", patient_id])
server_resp = matchmaker_request(url=url, token=mme_token, method="GET")
if "status_code" in server_resp: # the server returned a valid response
# and this will be a list of match objects sorted by desc date
pat_matches = []
if server_resp.get("matches"):
pat_matches = parse_matches(patient_id, server_resp["matches"])
matches[patient_id] = pat_matches
else:
LOG.warning(
"Server returned error message: {}".format(server_resp["message"])
)
data["server_errors"].append(server_resp["message"])
data["matches"] = matches
return data
def mme_match(
case_obj, match_type, mme_base_url, mme_token, nodes=None, mme_accepts=None
):
"""Initiate a MatchMaker match against either other Scout patients or external nodes
Args:
case_obj(dict): a scout case object already submitted to MME
match_type(str): 'internal' or 'external'
mme_base_url(str): base url of the MME server
mme_token(str): auth token of the MME server
mme_accepts(str): request content accepted by MME server (only for internal matches)
Returns:
matches(list): a list of eventual matches
"""
query_patients = []
server_responses = []
url = None
# list of patient dictionaries is required for internal matching
query_patients = case_obj["mme_submission"]["patients"]
if match_type == "internal":
url = "".join([mme_base_url, "/match"])
for patient in query_patients:
json_resp = matchmaker_request(
url=url,
token=mme_token,
method="POST",
content_type=mme_accepts,
accept=mme_accepts,
data={"patient": patient},
)
resp_obj = {
"server": "Local MatchMaker node",
"patient_id": patient["id"],
"results": json_resp.get("results"),
"status_code": json_resp.get("status_code"),
"message": json_resp.get("message"), # None if request was successful
}
server_responses.append(resp_obj)
else: # external matching
# external matching requires only patient ID
query_patients = [patient["id"] for patient in query_patients]
node_ids = [node["id"] for node in nodes]
if match_type in node_ids: # match is against a specific external node
node_ids = [match_type]
# Match every affected patient
for patient in query_patients:
# Against every node
for node in node_ids:
url = "".join(
[mme_base_url, "/match/external/", patient, "?node=", node]
)
json_resp = matchmaker_request(url=url, token=mme_token, method="POST")
resp_obj = {
"server": node,
"patient_id": patient,
"results": json_resp.get("results"),
"status_code": json_resp.get("status_code"),
"message": json_resp.get(
"message"
), # None if request was successful
}
server_responses.append(resp_obj)
return server_responses
| 36.962795
| 146
| 0.626765
|
0f877ada96c534c44c3c89a226e9baa62d6b005b
| 2,387
|
py
|
Python
|
pysc2/agents/myAgent/myAgent_15_BIC_DDPG_2/decisionMaker/level_1/level_1.py
|
Hotpotfish/pysc2
|
3d7f7ffc01a50ab69d435b65c892cd0bc11265a8
|
[
"Apache-2.0"
] | null | null | null |
pysc2/agents/myAgent/myAgent_15_BIC_DDPG_2/decisionMaker/level_1/level_1.py
|
Hotpotfish/pysc2
|
3d7f7ffc01a50ab69d435b65c892cd0bc11265a8
|
[
"Apache-2.0"
] | null | null | null |
pysc2/agents/myAgent/myAgent_15_BIC_DDPG_2/decisionMaker/level_1/level_1.py
|
Hotpotfish/pysc2
|
3d7f7ffc01a50ab69d435b65c892cd0bc11265a8
|
[
"Apache-2.0"
] | null | null | null |
from pysc2.agents.myAgent.myAgent_15_BIC_DDPG_2.config import config
from pysc2.agents.myAgent.myAgent_15_BIC_DDPG_2.decisionMaker.level_1.DQN_for_level_1 import DQN
from pysc2.agents.myAgent.myAgent_15_BIC_DDPG_2.decisionMaker.decision_maker import decision_maker
import pysc2.agents.myAgent.myAgent_15_BIC_DDPG_2.smart_actions as sa
from pysc2.agents.myAgent.myAgent_15_BIC_DDPG_2.tools import handcraft_function
class level_1():
def __init__(self):
self.DataShape = (None, config.MAP_SIZE, config.MAP_SIZE, 39)
self.top_decision_maker = decision_maker(
DQN(config.MU, config.SIGMA, config.LEARING_RATE, len(sa.controllers), 0, self.DataShape, 'top_decision_maker'))
# 重训练模式 无需读取外部模型
def train_action(self, obs):
self.top_decision_maker.current_state = handcraft_function.get_all_observation(obs)
if self.top_decision_maker.previous_action is not None:
self.top_decision_maker.network.perceive(self.top_decision_maker.previous_state,
self.top_decision_maker.previous_action,
self.top_decision_maker.previous_reward,
self.top_decision_maker.current_state,
obs.last())
controller_number = self.top_decision_maker.network.egreedy_action(self.top_decision_maker.current_state)
self.top_decision_maker.previous_reward = obs.reward
self.top_decision_maker.previous_state = self.top_decision_maker.current_state
self.top_decision_maker.previous_action = controller_number
return controller_number
def test_action(self, obs):
self.top_decision_maker.current_state = handcraft_function.get_all_observation(obs)
return self.top_decision_maker.network.action(self.top_decision_maker.current_state)
def train_network(self, modelSavePath):
self.top_decision_maker.network.train_Q_network(modelSavePath)
def load_model(self, modelLoadPath):
self.top_decision_maker.network.restoreModel(modelLoadPath)
print('level_1 load complete!')
def save_model(self, modelSavePath, episode):
self.top_decision_maker.network.saveModel(modelSavePath, episode)
print('level_1 episode %d save complete!' % (episode))
| 55.511628
| 124
| 0.71638
|
6cc15869a6fd1c4225777070bd76e5c58a11220c
| 10,704
|
py
|
Python
|
planet/scripts/util.py
|
dakcarto/planet-client-python
|
fc151c29f2f5dd41e183768afab2814c117e7497
|
[
"Apache-2.0"
] | null | null | null |
planet/scripts/util.py
|
dakcarto/planet-client-python
|
fc151c29f2f5dd41e183768afab2814c117e7497
|
[
"Apache-2.0"
] | null | null | null |
planet/scripts/util.py
|
dakcarto/planet-client-python
|
fc151c29f2f5dd41e183768afab2814c117e7497
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from itertools import chain
import json
import logging
import re
from os import path
import sys
import tempfile
import textwrap
import threading
import time
import warnings
import click
from click import termui
from requests.packages.urllib3 import exceptions as urllib3exc
from planet import api
from planet.api import filters
def _split(value):
'''return input split on any whitespace or comma'''
return re.split(r'\s+|,', value)
# monkey patch warnings module to hide InsecurePlatformWarning - the warning
# notes 'may cause certain SSL connections to fail' so it doesn't seem to
# introduce any vulnerabilities
# we capture the warning if present and present this if any SSLError is caught
# just in case this configuration is an issue
_insecure_warning = []
showwarning = warnings.showwarning
def hack(message, category, filename, lineno):
if category is urllib3exc.InsecurePlatformWarning:
if len(_insecure_warning) == 0:
_insecure_warning.append(message)
return
showwarning(message, category, filename, lineno)
warnings.showwarning = hack
def and_filter_from_opts(opts):
'''build an AND filter from the provided opts dict as passed to a command
from the filter_options decorator. Assumes all dict values are lists of
filter dict constructs.'''
return filters.and_filter(*list(chain.from_iterable([
o for o in opts.values() if o]
)))
def check_writable(dirpath):
try:
tempfile.NamedTemporaryFile(dir=dirpath).close()
except OSError:
return False
# in windows with a vagrant ro-mount, this was raised instead
except IOError:
return False
return True
def filter_from_opts(**kw):
'''Build a AND filter from the provided kwargs defaulting to an
empty 'and' filter (@todo: API workaround) if nothing is provided.
If the 'filter_json' argument is provided, this will be assumed to contain
a filter specification and will be anded with other filters. If the
'filter_json' is a search, the search filter value will be used.
All kw values should be tuple or list
'''
filter_in = kw.pop('filter_json', None)
active = and_filter_from_opts(kw)
if filter_in:
filter_in = filter_in.get('filter', filter_in)
if len(active['config']) > 0:
active = filters.and_filter(active, filter_in)
else:
active = filter_in
return active
def search_req_from_opts(**kw):
# item_type will be list of lists - flatten
item_types = chain.from_iterable(kw.pop('item_type'))
name = kw.pop('name', '')
interval = kw.pop('interval', '')
filt = filter_from_opts(**kw)
return filters.build_search_request(
filt, item_types, name=name, interval=interval)
def call_and_wrap(func, *args, **kw):
'''call the provided function and wrap any API exception with a click
exception. this means no stack trace is visible to the user but instead
a (hopefully) nice message is provided.
note: could be a decorator but didn't play well with click
'''
try:
return func(*args, **kw)
except api.exceptions.APIException as ex:
click_exception(ex)
except urllib3exc.SSLError:
# see monkey patch above re InsecurePlatformWarning
if _insecure_warning:
click.echo(click.style(str(_insecure_warning[0]), fg='red'))
raise
def click_exception(ex):
if type(ex) is api.exceptions.APIException:
raise click.ClickException('Unexpected response: %s' % str(ex))
msg = "%s: %s" % (type(ex).__name__, str(ex))
raise click.ClickException(msg)
def echo_json_response(response, pretty, limit=None, ndjson=False):
'''Wrapper to echo JSON with optional 'pretty' printing. If pretty is not
provided explicity and stdout is a terminal (and not redirected or piped),
the default will be to indent and sort keys'''
indent = None
sort_keys = False
nl = False
if not ndjson and (pretty or (pretty is None and sys.stdout.isatty())):
indent = 2
sort_keys = True
nl = True
try:
if ndjson and hasattr(response, 'items_iter'):
items = response.items_iter(limit)
for item in items:
click.echo(json.dumps(item))
elif not ndjson and hasattr(response, 'json_encode'):
response.json_encode(click.get_text_stream('stdout'), limit=limit,
indent=indent, sort_keys=sort_keys)
else:
res = response.get_raw()
res = json.dumps(json.loads(res), indent=indent,
sort_keys=sort_keys)
click.echo(res)
if nl:
click.echo()
except IOError as ioe:
# hide scary looking broken pipe stack traces
raise click.ClickException(str(ioe))
def read(value, split=False):
'''Get the value of an option interpreting as a file implicitly or
explicitly and falling back to the value if not explicitly specified.
If the value is '@name', then a file must exist with name and the returned
value will be the contents of that file. If the value is '@-' or '-', then
stdin will be read and returned as the value. Finally, if a file exists
with the provided value, that file will be read. Otherwise, the value
will be returned.
'''
v = str(value)
retval = value
if v[0] == '@' or v == '-':
fname = '-' if v == '-' else v[1:]
try:
with click.open_file(fname) as fp:
if not fp.isatty():
retval = fp.read()
else:
retval = None
# @todo better to leave as IOError and let caller handle it
# to better report in context of call (e.g. the option/type)
except IOError as ioe:
# if explicit and problems, raise
if v[0] == '@':
raise click.ClickException(str(ioe))
elif path.exists(v) and path.isfile(v):
with click.open_file(v) as fp:
retval = fp.read()
if retval and split and type(retval) != tuple:
retval = _split(retval.strip())
return retval
class _BaseOutput(object):
refresh_rate = 1
def _report_complete(self, item, asset, path=None):
msg = {
'item': item['id'],
'asset': asset['type'],
'location': path or asset['location']
}
# cancel() allows report log to persist for both ANSI & regular output
self.cancel()
click.echo(json.dumps(msg))
def __init__(self, thread, dl):
self._thread = thread
self._timer = None
self._dl = dl
self._running = False
dl.on_complete = self._report_complete
def _schedule(self):
if self._thread.is_alive() and self._running:
self._timer = threading.Timer(self.refresh_rate, self._run)
self._timer.start()
return True
def _run(self, exit=False):
if self._running:
self._output(self._dl.stats())
if not exit and self._running and not self._schedule():
self._run(True)
def start(self):
self._running = True
self._run()
def cancel(self):
self._running = False
self._timer and self._timer.cancel()
class Output(_BaseOutput):
def _output(self, stats):
logging.info('%s', stats)
class AnsiOutput(_BaseOutput):
def __init__(self, *args, **kw):
_BaseOutput.__init__(self, *args, **kw)
self._start = time.time()
# log msg ring buffer
self._records = deque(maxlen=100)
self._lock = threading.Lock()
self._stats = {}
# highjack the root handler, remove existing and replace with one
# that feeds our ring buffer
h = logging.Handler()
root = logging.getLogger('')
h.formatter = root.handlers[0].formatter
h.emit = self._emit
root.handlers = (h,)
self._handler = h
def start(self):
click.clear()
_BaseOutput.start(self)
def _emit(self, record):
with self._lock:
self._records.append(self._handler.format(record))
self._do_output()
def _output(self, stats):
with self._lock:
self._stats.update(stats)
self._do_output()
def _do_output(self):
# renders a terminal like:
# highlighted status rows
# ....
#
# scrolling log output
# ...
width, height = click.termui.get_terminal_size()
wrapper = textwrap.TextWrapper(width=width)
self._stats['elapsed'] = '%d' % (time.time() - self._start)
stats = ['%s: %s' % (k, v) for k, v in sorted(self._stats.items())]
stats = wrapper.wrap(''.join([s.ljust(25) for s in stats]))
remaining = height - len(stats) - 2
stats = [s.ljust(width) for s in stats]
lidx = max(0, len(self._records) - remaining)
loglines = []
while remaining > 0 and lidx < len(self._records):
wrapped = wrapper.wrap(self._records[lidx])
while remaining and wrapped:
loglines.append(wrapped.pop(0))
remaining -= 1
lidx += 1
# clear/cursor-to-1,1/hightlight
click.echo(u'\u001b[2J\u001b[1;1H\u001b[30;47m' + '\n'.join(stats)
# unhighlight
+ u'\u001b[39;49m\n' + '\n'.join(loglines))
def downloader_output(dl, disable_ansi=False):
thread = threading.current_thread()
# do fancy output if we can or not explicitly disabled
if sys.stdout.isatty() and not disable_ansi and not termui.WIN:
return AnsiOutput(thread, dl)
# work around for lack of nice output for downloader on windows:
# unless told to be quiet, set logging higher to get some output
# @todo fallback to simpler 'UI' when isatty on win
if termui.WIN and not disable_ansi:
logging.getLogger('').setLevel(logging.INFO)
return Output(thread, dl)
| 33.45
| 78
| 0.63537
|
3a5ef0ae473daaa1a8153aff9d2c3a07022a3b19
| 6,382
|
py
|
Python
|
release/scripts/addons/io_mesh_ply/export_ply.py
|
noorbeast/BlenderSource
|
65ebecc5108388965678b04b43463b85f6c69c1d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2019-03-20T13:10:46.000Z
|
2019-05-15T20:00:31.000Z
|
engine/2.80/scripts/addons/io_mesh_ply/export_ply.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | null | null | null |
engine/2.80/scripts/addons/io_mesh_ply/export_ply.py
|
byteinc/Phasor
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
[
"Unlicense"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
"""
This script exports Stanford PLY files from Blender. It supports normals,
colors, and texture coordinates per face or per vertex.
Only one mesh can be exported at a time.
"""
import bpy
import os
def save_mesh(
filepath,
mesh,
use_normals=True,
use_uv_coords=True,
use_colors=True,
):
def rvec3d(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
def rvec2d(v):
return round(v[0], 6), round(v[1], 6)
file = open(filepath, "w", encoding="utf8", newline="\n")
fw = file.write
# Be sure tessellated loop trianlges are available!
if not mesh.loop_triangles and mesh.polygons:
mesh.calc_loop_triangles()
has_uv = bool(mesh.uv_layers)
has_vcol = bool(mesh.vertex_colors)
if not has_uv:
use_uv_coords = False
if not has_vcol:
use_colors = False
if not use_uv_coords:
has_uv = False
if not use_colors:
has_vcol = False
if has_uv:
active_uv_layer = mesh.uv_layers.active
if not active_uv_layer:
use_uv_coords = False
has_uv = False
else:
active_uv_layer = active_uv_layer.data
if has_vcol:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
use_colors = False
has_vcol = False
else:
active_col_layer = active_col_layer.data
# in case
color = uvcoord = uvcoord_key = normal = normal_key = None
mesh_verts = mesh.vertices # save a lookup
ply_verts = [] # list of dictionaries
# vdict = {} # (index, normal, uv) -> new index
vdict = [{} for i in range(len(mesh_verts))]
ply_faces = [[] for f in range(len(mesh.loop_triangles))]
vert_count = 0
for i, f in enumerate(mesh.loop_triangles):
smooth = not use_normals or f.use_smooth
if not smooth:
normal = f.normal[:]
normal_key = rvec3d(normal)
if has_uv:
uv = [active_uv_layer[l].uv[:] for l in f.loops]
if has_vcol:
col = [active_col_layer[l].color[:] for l in f.loops]
pf = ply_faces[i]
for j, vidx in enumerate(f.vertices):
v = mesh_verts[vidx]
if smooth:
normal = v.normal[:]
normal_key = rvec3d(normal)
if has_uv:
uvcoord = uv[j][0], uv[j][1]
uvcoord_key = rvec2d(uvcoord)
if has_vcol:
color = col[j]
color = (
int(color[0] * 255.0),
int(color[1] * 255.0),
int(color[2] * 255.0),
int(color[3] * 255.0),
)
key = normal_key, uvcoord_key, color
vdict_local = vdict[vidx]
pf_vidx = vdict_local.get(key) # Will be None initially
if pf_vidx is None: # same as vdict_local.has_key(key)
pf_vidx = vdict_local[key] = vert_count
ply_verts.append((vidx, normal, uvcoord, color))
vert_count += 1
pf.append(pf_vidx)
fw("ply\n")
fw("format ascii 1.0\n")
fw("comment Created by Blender %s - "
"www.blender.org, source file: %r\n" %
(bpy.app.version_string, os.path.basename(bpy.data.filepath)))
fw("element vertex %d\n" % len(ply_verts))
fw("property float x\n"
"property float y\n"
"property float z\n")
if use_normals:
fw("property float nx\n"
"property float ny\n"
"property float nz\n")
if use_uv_coords:
fw("property float s\n"
"property float t\n")
if use_colors:
fw("property uchar red\n"
"property uchar green\n"
"property uchar blue\n"
"property uchar alpha\n")
fw("element face %d\n" % len(mesh.loop_triangles))
fw("property list uchar uint vertex_indices\n")
fw("end_header\n")
for i, v in enumerate(ply_verts):
fw("%.6f %.6f %.6f" % mesh_verts[v[0]].co[:]) # co
if use_normals:
fw(" %.6f %.6f %.6f" % v[1]) # no
if use_uv_coords:
fw(" %.6f %.6f" % v[2]) # uv
if use_colors:
fw(" %u %u %u %u" % v[3]) # col
fw("\n")
for pf in ply_faces:
if len(pf) == 3:
fw("3 %d %d %d\n" % tuple(pf))
else:
fw("4 %d %d %d %d\n" % tuple(pf))
file.close()
print("writing %r done" % filepath)
return {'FINISHED'}
def save(
operator,
context,
filepath="",
use_mesh_modifiers=True,
use_normals=True,
use_uv_coords=True,
use_colors=True,
global_matrix=None
):
obj = context.active_object
if global_matrix is None:
from mathutils import Matrix
global_matrix = Matrix()
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
if use_mesh_modifiers and obj.modifiers:
mesh = obj.to_mesh(context.depsgraph, True)
else:
mesh = obj.data.copy()
if not mesh:
raise Exception("Error, could not get mesh data from active object")
mesh.transform(global_matrix @ obj.matrix_world)
if use_normals:
mesh.calc_normals()
ret = save_mesh(filepath, mesh,
use_normals=use_normals,
use_uv_coords=use_uv_coords,
use_colors=use_colors,
)
bpy.data.meshes.remove(mesh)
return ret
| 28.238938
| 76
| 0.573018
|
715788c394886804e8834078fdbe03a0870797e7
| 14,481
|
py
|
Python
|
delve/pca_layers.py
|
JustinShenk/delve
|
55bec2cf6c7f34971c386de15e005edb91e2a64a
|
[
"MIT"
] | 3
|
2018-08-19T06:54:19.000Z
|
2019-02-26T12:31:17.000Z
|
delve/pca_layers.py
|
JustinShenk/delve
|
55bec2cf6c7f34971c386de15e005edb91e2a64a
|
[
"MIT"
] | 6
|
2018-06-14T18:05:58.000Z
|
2018-12-27T14:04:05.000Z
|
delve/pca_layers.py
|
JustinShenk/delve
|
55bec2cf6c7f34971c386de15e005edb91e2a64a
|
[
"MIT"
] | 3
|
2018-08-23T12:41:06.000Z
|
2018-11-29T10:12:30.000Z
|
from typing import Tuple
import numpy as np
import torch
from torch.nn import Module
from torch.nn.functional import interpolate
from .logger import log
global num
def rvs(dim=3) -> np.ndarray:
"""Create random orthonormal matrix of size ``dim``.
.. note::
Yanked from hpaulj's implementation of SciPy's :func:`scipy.stats.special_ortho_group` in Numpy at https://stackoverflow.com/questions/38426349/how-to-create-random-orthonormal-matrix-in-python-numpy which is from the paper:
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
"""
random_state = np.random
H = np.eye(dim)
D = np.ones((dim, ))
for n in range(1, dim):
x = random_state.normal(size=(dim - n + 1, ))
D[n - 1] = np.sign(x[0])
x[0] -= D[n - 1] * np.sqrt((x * x).sum())
# Householder transformation
Hx = (np.eye(dim - n + 1) - 2. * np.outer(x, x) / (x * x).sum())
mat = np.eye(dim)
mat[n - 1:, n - 1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1 - (dim % 2)) * D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D * H.T).T
return H
def change_all_pca_layer_thresholds_and_inject_random_directions(
threshold: float,
network: Module,
verbose: bool = False,
device='cpu',
include_names: bool = False) -> Tuple[list, list, list]:
in_dims = []
fs_dims = []
sat = []
names = []
lc = {'lin': 0, 'conv': 0}
for module in network.modules():
if isinstance(module, LinearPCALayer):
module.threshold = threshold
fake_base = rvs(module.fs_dim)[:, :module.in_dim]
in_dims.append(module.in_dim)
fs_dims.append(module.fs_dim)
sat.append(module.sat)
fake_projection = fake_base @ fake_base.T
module.transformation_matrix.data = torch.from_numpy(
fake_projection.astype('float32')).to(device)
names.append(f'Linear-{lc["lin"]}')
lc["lin"] += 1
if verbose:
log.info(
f'Changed threshold for layer {module} to {threshold}')
elif isinstance(module, Conv2DPCALayer):
module.threshold = threshold
in_dims.append(module.in_dim)
fs_dims.append(module.fs_dim)
sat.append(module.sat)
fake_base = rvs(module.fs_dim)[:, :module.in_dim]
fake_projection = fake_base @ fake_base.T
module.transformation_matrix.data = torch.from_numpy(
fake_projection.astype('float32')).to(device)
weight = torch.nn.Parameter(
module.transformation_matrix.unsqueeze(2).unsqueeze(3))
module.convolution.weight = weight
names.append(f'Conv-{lc["conv"]}')
lc['conv'] += 1
if verbose:
log.info(
f'Changed threshold for layer {module} to {threshold}')
if include_names:
return sat, in_dims, fs_dims, names
return sat, in_dims, fs_dims
def change_all_pca_layer_thresholds(threshold: float,
network: Module,
verbose: bool = False):
in_dims = []
fs_dims = []
sat = []
names = []
lc = {'lin': 0, 'conv': 0}
for module in network.modules():
if isinstance(module, Conv2DPCALayer) or isinstance(
module, LinearPCALayer):
module.threshold = threshold
in_dims.append(module.in_dim)
fs_dims.append(module.fs_dim)
sat.append(module.sat)
if isinstance(module, Conv2DPCALayer):
names.append(f'Conv-{lc["conv"]}')
lc['conv'] += 1
else:
names.append(f"Lin-{lc['lin']}")
lc["lin"] += 1
if verbose:
log.info(
f'Changed threshold for layer {module} to {threshold}')
return sat, in_dims, fs_dims, names
class LinearPCALayer(Module):
"""Eigenspace of the covariance matrix generated in TorchCovarianceMatrix with
equation :eq:`covariance`.
"""
num = 0
def __init__(self,
in_features: int,
threshold: float = .99,
keepdim: bool = True,
verbose: bool = False,
gradient_epoch_start: int = 20,
centering: bool = True):
super(LinearPCALayer, self).__init__()
self.register_buffer('eigenvalues',
torch.zeros(in_features, dtype=torch.float64))
self.register_buffer(
'eigenvectors',
torch.zeros((in_features, in_features), dtype=torch.float64))
self.register_buffer('_threshold',
torch.Tensor([threshold]).type(torch.float64))
self.register_buffer(
'sum_squares',
torch.zeros((in_features, in_features), dtype=torch.float64))
self.register_buffer('seen_samples', torch.zeros(1,
dtype=torch.float64))
self.register_buffer('running_sum',
torch.zeros(in_features, dtype=torch.float64))
self.register_buffer('mean',
torch.zeros(in_features, dtype=torch.float32))
self.keepdim: bool = keepdim
self.verbose: bool = verbose
self.pca_computed: bool = True
self.gradient_epoch = gradient_epoch_start
self.epoch = 0
self.name = f'pca{LinearPCALayer.num}'
LinearPCALayer.num += 1
self._centering = centering
self.data_dtype = None
def is_floating_point(self):
return False
@property
def threshold(self) -> float:
return self._threshold
@threshold.setter
def threshold(self, threshold: float) -> None:
self._threshold.data = torch.Tensor([threshold]).type(
torch.float64).to(self.threshold.device)
self._compute_pca_matrix()
@property
def centering(self):
return self._centering
@centering.setter
def centering(self, centring: bool):
self._centering = centring
self._compute_pca_matrix()
def _update_autorcorrelation(self, x: torch.Tensor) -> None:
if self.data_dtype is None:
self.data_dtype = x.dtype
x = x.type(torch.float64)
# log.info(x.dtype)
self.sum_squares.data += torch.matmul(x.transpose(0, 1), x)
self.running_sum += x.sum(dim=0)
self.seen_samples.data += x.shape[0]
def _compute_autorcorrelation(self) -> torch.Tensor:
tlen = self.seen_samples
cov_mtx = self.sum_squares
cov_mtx = cov_mtx / tlen
avg = self.running_sum / tlen
if self.centering:
avg_mtx = torch.ger(avg, avg)
cov_mtx = cov_mtx - avg_mtx
return cov_mtx
def _compute_eigenspace(self):
self.eigenvalues.data, self.eigenvectors.data = self._compute_autorcorrelation(
).symeig(True) #.type(self.data_dtype)
self.eigenvalues.data, idx = self.eigenvalues.sort(descending=True)
# correct numerical error, matrix must be positivly semi-definitie
self.eigenvalues[self.eigenvalues < 0] = 0
self.eigenvectors.data = self.eigenvectors[:, idx]
def _reset_autorcorrelation(self):
self.sum_squares.data = torch.zeros(self.sum_squares.shape,
dtype=torch.float64).to(
self.sum_squares.device)
self.seen_samples.data = torch.zeros(self.seen_samples.shape,
dtype=torch.float64).to(
self.sum_squares.device)
self.running_sum.data = torch.zeros(self.running_sum.shape,
dtype=torch.float64).to(
self.sum_squares.device)
def _compute_pca_matrix(self):
if self.verbose:
log.info('computing autorcorrelation for Linear')
#log.info('Mean pre-activation vector:', self.mean)
percentages = self.eigenvalues.cumsum(0) / self.eigenvalues.sum()
eigen_space = self.eigenvectors[:, percentages < self.threshold]
if eigen_space.shape[1] == 0:
eigen_space = self.eigenvectors[:, :1]
log.info(
f'Detected singularity defaulting to single dimension {eigen_space.shape}'
)
elif self.threshold - (
percentages[percentages < self.threshold][-1]) > 0.02:
log.info(
f'Highest cumvar99 is {percentages[percentages < self.threshold][-1]}, extending eigenspace by one dimension for eigenspace of {eigen_space.shape}'
)
eigen_space = self.eigenvectors[:, :eigen_space.shape[1] + 1]
sat = round((eigen_space.shape[1] / self.eigenvalues.shape[0]) * 100,
4)
fs_dim = eigen_space.shape[0]
in_dim = eigen_space.shape[1]
if self.verbose:
log.info(
f'Saturation: {round(eigen_space.shape[1] / self.eigenvalues.shape[0], 4)}%\n'
f'Eigenspace has shape {eigen_space.shape}')
self.transformation_matrix: torch.Tensor = eigen_space.matmul(
eigen_space.t()).type(torch.float32)
self.reduced_transformation_matrix: torch.Tensor = eigen_space.type(
torch.float32)
self.sat, self.in_dim, self.fs_dim = sat, in_dim, fs_dim
def forward(self, x):
if self.training:
self.pca_computed = False
self._update_autorcorrelation(x)
return x
else:
if not self.pca_computed:
self._compute_autorcorrelation()
self._compute_eigenspace()
self._compute_pca_matrix()
self.pca_computed = True
self._reset_autorcorrelation()
self.epoch += 1
if self.keepdim:
if not self.centering:
return x @ self.transformation_matrix.t()
else:
self.mean = self.mean.to(x.device)
self.transformation_matrix = self.transformation_matrix.to(
x.device)
return ((x - self.mean)
@ self.transformation_matrix.t()) + self.mean
else:
if not self.centering:
return x @ self.reduced_transformation_matrix
else:
return ((x - self.mean)
@ self.reduced_transformation_matrix) + self.mean
class Conv2DPCALayer(LinearPCALayer):
"""Compute PCA of Conv2D layer"""
def __init__(self,
in_filters,
threshold: float = 0.99,
verbose: bool = True,
gradient_epoch_start: int = 20,
centering: bool = False,
downsampling: int = None):
super(Conv2DPCALayer,
self).__init__(centering=centering,
in_features=in_filters,
threshold=threshold,
keepdim=True,
verbose=verbose,
gradient_epoch_start=gradient_epoch_start)
if verbose:
log.info('Added Conv2D PCA Layer')
self.convolution = torch.nn.Conv2d(in_channels=in_filters,
out_channels=in_filters,
kernel_size=1,
stride=1,
bias=True)
self.mean_subtracting_convolution = torch.nn.Conv2d(
in_channels=in_filters,
out_channels=in_filters,
kernel_size=1,
stride=1,
bias=True)
self.mean_subtracting_convolution.weight = torch.nn.Parameter(
torch.zeros((in_filters, in_filters)).unsqueeze(2).unsqueeze(3))
self.downsampling = downsampling
def _compute_pca_matrix(self):
if self.verbose:
log.info('computing autorcorrelation for Conv2D')
super()._compute_pca_matrix()
# unsequeeze the matrix into 1x1xDxD in order to make it behave like a 1x1 convolution
weight = torch.nn.Parameter(
self.transformation_matrix.unsqueeze(2).unsqueeze(3))
self.convolution.weight = weight
self.mean_subtracting_convolution.weight = torch.nn.Parameter(
torch.zeros_like(
self.transformation_matrix).unsqueeze(2).unsqueeze(3))
if self.centering:
self.convolution.bias = torch.nn.Parameter(
self.mean.type(torch.float32))
self.mean_subtracting_convolution.bias = torch.nn.Parameter(
-self.mean.type(torch.float32))
else:
self.convolution.bias = torch.nn.Parameter(
torch.zeros_like(self.mean))
self.mean_subtracting_convolution.bias = torch.nn.Parameter(
torch.zeros_like(self.mean))
def forward(self, x):
if self.training:
self.pca_computed = False
if self.downsampling is not None:
x1 = interpolate(x, size=self.downsampling, mode='nearest')
else:
x1 = x
swapped: torch.Tensor = x1.permute([1, 0, 2, 3])
flattened: torch.Tensor = swapped.flatten(1)
reshaped_batch: torch.Tensor = flattened.permute([1, 0])
self._update_autorcorrelation(reshaped_batch)
return x
else:
if not self.pca_computed:
self._compute_autorcorrelation()
self._compute_eigenspace()
self._compute_pca_matrix()
self._reset_autorcorrelation()
self.pca_computed = True
x1 = self.mean_subtracting_convolution(x)
x = x + x1
return self.convolution(x)
| 40.225
| 232
| 0.560597
|
1f445feb5a0e101dd54ba153d62cf1379478205f
| 22,310
|
py
|
Python
|
tests/integration/test_storage_s3/test.py
|
mcspring/ClickHouse
|
08f713f177f950c2f675c2c75d1261c91066888c
|
[
"Apache-2.0"
] | 3
|
2021-03-06T01:52:26.000Z
|
2021-10-01T15:13:46.000Z
|
tests/integration/test_storage_s3/test.py
|
mcspring/ClickHouse
|
08f713f177f950c2f675c2c75d1261c91066888c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_storage_s3/test.py
|
mcspring/ClickHouse
|
08f713f177f950c2f675c2c75d1261c91066888c
|
[
"Apache-2.0"
] | 1
|
2021-02-20T16:00:52.000Z
|
2021-02-20T16:00:52.000Z
|
import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = cluster.minio_client
minio_client.set_bucket_policy(cluster.minio_bucket, json.dumps(bucket_read_write_policy))
cluster.minio_restricted_bucket = "{}-with-auth".format(cluster.minio_bucket)
if minio_client.bucket_exists(cluster.minio_restricted_bucket):
minio_client.remove_bucket(cluster.minio_restricted_bucket)
minio_client.make_bucket(cluster.minio_restricted_bucket)
def put_s3_file_content(cluster, bucket, filename, data):
buf = io.BytesIO(data)
cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str) -> str
data = cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"], user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mock(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format, values)
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(cluster, bucket, filename)
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
"'minio','minio123',"
])
def test_empty_put(cluster, auth):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
cluster.minio_host, cluster.minio_port, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
cluster.minio_host, cluster.minio_port, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put_csv(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}_{}/{}.csv".format(i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
("'wrongid','wrongkey',", False)
])
def test_multipart_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(cluster, bucket, filename)
def test_remote_host_filter(cluster):
instance = cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format, other_values)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
"''", # 1 arguments
"'','','','','',''" # 6 arguments
])
def test_wrong_s3_syntax(cluster, s3_storage_args):
instance = cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(cluster):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mock(cluster):
logging.info("Starting s3 mock")
container_id = cluster.get_container_id('resolver')
current_dir = os.path.dirname(__file__)
cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mock", "mock_s3.py"), "mock_s3.py")
cluster.exec_in_container(container_id, ["python", "mock_s3.py"], detach=True)
# Wait for S3 mock start
for attempt in range(10):
ping_response = cluster.exec_in_container(cluster.get_container_id('resolver'),
["curl", "-s", "http://resolver:8080/"], nothrow=True)
if ping_response != 'OK':
if attempt == 9:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
break
logging.info("S3 mock started")
def test_custom_auth_headers(cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
def test_custom_auth_headers_exclusion(cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert '403 Forbidden' in ei.value.stderr
def test_infinite_redirect(cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=bucket,
file=filename,
table_format=table_format)
instance = cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
("bin", "gzip"),
("gz", "auto")
])
def test_storage_s3_get_gzip(cluster, extension, method):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = "test_get_gzip"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(cluster, bucket, filename, buf.getvalue())
try:
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{cluster.minio_host}:{cluster.minio_port}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["565"]
finally:
run_query(instance, f"DROP TABLE {name}")
def test_storage_s3_put_uncompressed(cluster):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
try:
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, cluster.minio_host, cluster.minio_port, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
finally:
run_query(instance, f"DROP TABLE {name}")
@pytest.mark.parametrize("extension,method", [
("bin", "gzip"),
("gz", "auto")
])
def test_storage_s3_put_gzip(cluster, extension, method):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = "test_put_gzip"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
try:
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{cluster.minio_host}:{cluster.minio_port}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
finally:
run_query(instance, f"DROP TABLE {name}")
| 39.209139
| 155
| 0.603765
|
e0dc7089ed1da621a13a12a1eb40c2b2a41a7d33
| 3,858
|
py
|
Python
|
test/acceptance/integration/catalog/get_abstract_environment_details/steps.py
|
hmunfru/fiware-paas
|
dd808e986f5463dcbb85370b295404f167838ea1
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/integration/catalog/get_abstract_environment_details/steps.py
|
hmunfru/fiware-paas
|
dd808e986f5463dcbb85370b295404f167838ea1
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/integration/catalog/get_abstract_environment_details/steps.py
|
hmunfru/fiware-paas
|
dd808e986f5463dcbb85370b295404f167838ea1
|
[
"Apache-2.0"
] | 2
|
2016-08-22T16:03:25.000Z
|
2018-03-05T23:28:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
from lettuce import step, world
from lettuce_tools.dataset_utils.dataset_utils import DatasetUtils
from tools import http
from tools import environment_request
from tools.tier import Tier
from tools.constants import NAME, DESCRIPTION, PRODUCTS, NETWORKS, PAAS,\
TIER_IMAGE
dataset_utils = DatasetUtils()
@step(u'the paas manager is up and properly configured')
def the_paas_manager_is_up_and_properly_configured(step):
pass # Nothing to do here, the set up should be done by external means
@step(u'a list of tiers has been defined with data:')
def a_list_of_tiers_has_been_defined_with_data(step):
world.tiers = []
for row in step.hashes:
data = dataset_utils.prepare_data(row)
tier = Tier(data.get(NAME), world.config[PAAS][TIER_IMAGE])
tier.parse_and_add_products(data.get(PRODUCTS))
tier.parse_and_add_networks(data.get(NETWORKS))
world.tiers.append(tier)
@step(u'an abstract environment has already been created with data:')
def an_abstract_environment_has_already_been_created_with_data(step):
data = dataset_utils.prepare_data(step.hashes[0])
world.env_requests.add_abstract_environment(data.get(NAME), data.get(DESCRIPTION))
@step(u'an abstract environment has already been created with the previous tiers and data:')
def an_abstract_environment_has_already_been_created_with_the_previous_tiers_and_data(step):
data = dataset_utils.prepare_data(step.hashes[0])
world.env_requests.add_abstract_environment(data.get(NAME), data.get(DESCRIPTION), world.tiers)
@step(u'there is no abstract environment with name "([^"]*)" already created')
def there_is_no_abstract_environment_with_name_already_created(step, name):
world.env_requests.delete_abstract_environment(name) # Just in case it exists
@step(u'I request the details of the abstract environment with name "([^"]*)"')
def i_request_the_list_of_existing_abstract_environments(step, name):
name = dataset_utils.generate_fixed_length_param(name)
world.env_requests.get_abstract_environment(name)
@step(u'I receive an? "([^"]*)" response with data:')
def i_receive_a_response_of_type_with_data(step, response_type):
status_code = http.status_codes[response_type]
data = dataset_utils.prepare_data(step.hashes[0])
environment_request.check_get_environment_response(world.response, status_code,
data.get(NAME), data.get(DESCRIPTION))
@step(u'I receive an? "([^"]*)" response with the previous tiers and data:')
def i_receive_a_response_of_type_with_the_previous_tiers_and_data(step, response_type):
status_code = http.status_codes[response_type]
data = dataset_utils.prepare_data(step.hashes[0])
environment_request.check_get_environment_response(world.response, status_code,
data.get(NAME), data.get(DESCRIPTION),
world.tiers)
@step(u'I receive an? "([^"]*)" response$')
def i_receive_a_response_of_type(step, response_type):
status_code = http.status_codes[response_type]
environment_request.check_get_environment_response(world.response, status_code)
| 41.042553
| 99
| 0.77268
|
0927bc831acd81a3356aaddf04eb95d4b4283f5e
| 4,438
|
py
|
Python
|
tests/integration/widgets/test_toggle.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | 1
|
2020-03-21T04:11:51.000Z
|
2020-03-21T04:11:51.000Z
|
tests/integration/widgets/test_toggle.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | 2
|
2021-05-08T11:43:21.000Z
|
2021-05-10T19:16:43.000Z
|
tests/integration/widgets/test_toggle.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.core.enums import ButtonType
from bokeh.layouts import column
from bokeh.models import Circle, ColumnDataSource, CustomAction, CustomJS, Plot, Range1d, Toggle
from bokeh._testing.util.selenium import RECORD
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.bokeh",
)
@pytest.mark.integration
@pytest.mark.selenium
class Test_Toggle(object):
def test_displays_label(self, bokeh_model_page):
button = Toggle(label="label", css_classes=["foo"])
page = bokeh_model_page(button)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
assert button.text == "label"
@pytest.mark.parametrize('typ', list(ButtonType))
def test_displays_button_type(self, typ, bokeh_model_page):
button = Toggle(button_type=typ, css_classes=["foo"])
page = bokeh_model_page(button)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
assert typ in button.get_attribute('class')
def test_server_on_click_round_trip(self, bokeh_server_page):
def modify_doc(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
button = Toggle(css_classes=['foo'])
def cb(value):
if value:
source.data=dict(x=[10, 20], y=[10, 10])
else:
source.data=dict(x=[100, 200], y=[100, 100])
button.on_click(cb)
doc.add_root(column(button, plot))
page = bokeh_server_page(modify_doc)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [10, 20], 'y': [10, 10]}}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [100, 200], 'y': [100, 100]}}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [10, 20], 'y': [10, 10]}}
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
# XXX (bev) Toggle does not register to process ButtonClick events
def test_js_on_click_executes(self, bokeh_model_page):
button = Toggle(css_classes=['foo'])
button.js_on_click(CustomJS(code=RECORD("value", "cb_obj.active")))
page = bokeh_model_page(button)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
results = page.results
assert results == {'value': True}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
results = page.results
assert results == {'value': False}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
results = page.results
assert results == {'value': True}
assert page.has_no_console_errors()
| 34.403101
| 116
| 0.54822
|
68892d21ec6ae208e5b299a236f2e780e8957752
| 1,142
|
py
|
Python
|
ext_modules/_maix_nn/example/load_forward_sobel_edge_camera.py
|
znstj/MaixPy3
|
f672b2049b668a5a72ad249933cf9a009760799e
|
[
"MIT"
] | 93
|
2021-01-12T01:56:06.000Z
|
2022-03-30T12:52:01.000Z
|
ext_modules/_maix_nn/example/load_forward_sobel_edge_camera.py
|
JasperG1998/MaixPy3
|
b36800b8d6aebf55018894c215c23a73d2fe406d
|
[
"MIT"
] | 29
|
2021-02-04T10:37:26.000Z
|
2022-03-20T15:10:55.000Z
|
ext_modules/_maix_nn/example/load_forward_sobel_edge_camera.py
|
JasperG1998/MaixPy3
|
b36800b8d6aebf55018894c215c23a73d2fe406d
|
[
"MIT"
] | 25
|
2021-01-25T18:10:09.000Z
|
2022-03-31T13:55:36.000Z
|
from maix import nn
from PIL import Image, ImageDraw
import numpy as np
import time
from maix import display, camera
camera.config(size=(224, 224))
model = {
"param": "/root/models/sobel_int8.param",
"bin": "/root/models/sobel_int8.bin"
}
input_size = (224, 224, 3)
output_size = (222, 222, 3)
options = {
"model_type": "awnn",
"inputs": {
"input0": input_size
},
"outputs": {
"output0": output_size
},
"mean": [127.5, 127.5, 127.5],
"norm": [0.0078125, 0.0078125, 0.0078125],
}
print("-- load model:", model)
m = nn.load(model, opt=options)
print("-- load ok")
while 1:
img = camera.capture()
if not img:
time.sleep(0.01)
continue
print("-- read image ok")
print("-- forward model with image as input")
out = m.forward(img, quantize=True, layout="hwc")
# print("-- read image ok")
# out = out.reshape(222, 222, 3)
print("-- out:", out.shape, out.dtype)
out = out.astype(np.float32).reshape(output_size)
out = (np.abs(out) * 255 / out.max()).astype(np.uint8)
img2 = Image.fromarray(out, mode="RGB")
display.show(img2)
| 23.791667
| 58
| 0.600701
|
f9d8bb640c32d2a7a807d931f7028bf57171d837
| 12,296
|
py
|
Python
|
tests/components/sensor/test_device_condition.py
|
ymanton/home-assistant
|
274cf232692396a6f0359a45d949ff94b681af52
|
[
"Apache-2.0"
] | 5
|
2020-09-17T10:48:51.000Z
|
2021-11-22T00:08:17.000Z
|
tests/components/sensor/test_device_condition.py
|
ymanton/home-assistant
|
274cf232692396a6f0359a45d949ff94b681af52
|
[
"Apache-2.0"
] | 9
|
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
tests/components/sensor/test_device_condition.py
|
ymanton/home-assistant
|
274cf232692396a6f0359a45d949ff94b681af52
|
[
"Apache-2.0"
] | 2
|
2020-12-09T02:21:27.000Z
|
2021-08-07T04:58:01.000Z
|
"""The test for sensor device automation."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.sensor import DOMAIN
from homeassistant.components.sensor.device_condition import ENTITY_CONDITIONS
from homeassistant.const import CONF_PLATFORM, STATE_UNKNOWN
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.testing_config.custom_components.test.sensor import DEVICE_CLASSES
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for condition in ENTITY_CONDITIONS[device_class]
if device_class != "none"
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a sensor condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["battery"].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_capabilities = {
"extra_fields": [
{
"description": {"suffix": "%"},
"name": "above",
"optional": True,
"type": "float",
},
{
"description": {"suffix": "%"},
"name": "below",
"optional": True,
"type": "float",
},
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert len(conditions) == 1
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_get_condition_capabilities_none(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a sensor condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
conditions = [
{
"condition": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": "sensor.beer",
"type": "is_battery_level",
},
{
"condition": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": platform.ENTITIES["none"].entity_id,
"type": "is_battery_level",
},
]
expected_capabilities = {}
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state_not_above_below(hass, calls, caplog):
"""Test for bad value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
}
],
"action": {"service": "test.automation"},
}
]
},
)
assert "must contain at least one of below, above" in caplog.text
async def test_if_state_above(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"above": 10,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
async def test_if_state_below(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"below": 10,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
async def test_if_state_between(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"above": 10,
"below": 20,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
hass.states.async_set(sensor1.entity_id, 21)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(sensor1.entity_id, 19)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "event - test_event1"
| 33.142857
| 87
| 0.557092
|
8ebe5b2741b77537b46b8057d9aa9c36dc99aeec
| 17,952
|
py
|
Python
|
ppocr/postprocess/rec_postprocess.py
|
maxberezov/PaddleOCR
|
5ecd1447ad80b1e22a0126ce8bc845ffcd5ae773
|
[
"Apache-2.0"
] | 2
|
2021-08-11T03:49:31.000Z
|
2021-08-11T03:50:32.000Z
|
ppocr/postprocess/rec_postprocess.py
|
maxberezov/PaddleOCR
|
5ecd1447ad80b1e22a0126ce8bc845ffcd5ae773
|
[
"Apache-2.0"
] | 1
|
2021-07-19T10:41:33.000Z
|
2021-07-19T13:38:34.000Z
|
ppocr/postprocess/rec_postprocess.py
|
maxberezov/PaddleOCR
|
5ecd1447ad80b1e22a0126ce8bc845ffcd5ae773
|
[
"Apache-2.0"
] | 1
|
2021-09-08T08:01:24.000Z
|
2021-09-08T08:01:24.000Z
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import string
import paddle
from paddle.nn import functional as F
class BaseRecLabelDecode(object):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='ch',
use_space_char=False):
support_character_type = [
'ch', 'en', 'EN_symbol', 'french', 'german', 'japan', 'korean',
'it', 'xi', 'pu', 'ru', 'ar', 'ta', 'ug', 'fa', 'ur', 'rs', 'oc',
'rsc', 'bg', 'uk', 'be', 'te', 'ka', 'chinese_cht', 'hi', 'mr',
'ne', 'EN', 'latin', 'arabic', 'cyrillic', 'devanagari'
]
assert character_type in support_character_type, "Only {} are supported now but get {}".format(
support_character_type, character_type)
self.beg_str = "sos"
self.end_str = "eos"
if character_type == "en":
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
dict_character = list(self.character_str)
elif character_type == "EN_symbol":
# same with ASTER setting (use 94 char).
self.character_str = string.printable[:-6]
dict_character = list(self.character_str)
elif character_type in support_character_type:
self.character_str = []
assert character_dict_path is not None, "character_dict_path should not be None when character_type is {}".format(
character_type)
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
self.character_str.append(line)
if use_space_char:
self.character_str.append(" ")
dict_character = list(self.character_str)
else:
raise NotImplementedError
self.character_type = character_type
dict_character = self.add_special_char(dict_character)
self.dict = {}
for i, char in enumerate(dict_character):
self.dict[char] = i
self.character = dict_character
def add_special_char(self, dict_character):
return dict_character
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
batch_size = len(text_index)
for batch_idx in range(batch_size):
char_list = []
conf_list = []
for idx in range(len(text_index[batch_idx])):
if text_index[batch_idx][idx] in ignored_tokens:
continue
if is_remove_duplicate:
# only for predict
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
batch_idx][idx]:
continue
char_list.append(self.character[int(text_index[batch_idx][
idx])])
if text_prob is not None:
conf_list.append(text_prob[batch_idx][idx])
else:
conf_list.append(1)
text = ''.join(char_list)
result_list.append((text, np.mean(conf_list)))
return result_list
def get_ignored_tokens(self):
return [0] # for ctc blank
class CTCLabelDecode(BaseRecLabelDecode):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='ch',
use_space_char=False,
**kwargs):
super(CTCLabelDecode, self).__init__(character_dict_path,
character_type, use_space_char)
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
if label is None:
return text
label = self.decode(label)
return text, label
def add_special_char(self, dict_character):
dict_character = ['blank'] + dict_character
return dict_character
class DistillationCTCLabelDecode(CTCLabelDecode):
"""
Convert
Convert between text-label and text-index
"""
def __init__(self,
character_dict_path=None,
character_type='ch',
use_space_char=False,
model_name=["student"],
key=None,
**kwargs):
super(DistillationCTCLabelDecode, self).__init__(
character_dict_path, character_type, use_space_char)
if not isinstance(model_name, list):
model_name = [model_name]
self.model_name = model_name
self.key = key
def __call__(self, preds, label=None, *args, **kwargs):
output = dict()
for name in self.model_name:
pred = preds[name]
if self.key is not None:
pred = pred[self.key]
output[name] = super().__call__(pred, label=label, *args, **kwargs)
return output
class AttnLabelDecode(BaseRecLabelDecode):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='ch',
use_space_char=False,
**kwargs):
super(AttnLabelDecode, self).__init__(character_dict_path,
character_type, use_space_char)
def add_special_char(self, dict_character):
self.beg_str = "sos"
self.end_str = "eos"
dict_character = dict_character
dict_character = [self.beg_str] + dict_character + [self.end_str]
return dict_character
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
[beg_idx, end_idx] = self.get_ignored_tokens()
batch_size = len(text_index)
for batch_idx in range(batch_size):
char_list = []
conf_list = []
for idx in range(len(text_index[batch_idx])):
if text_index[batch_idx][idx] in ignored_tokens:
continue
if int(text_index[batch_idx][idx]) == int(end_idx):
break
if is_remove_duplicate:
# only for predict
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
batch_idx][idx]:
continue
char_list.append(self.character[int(text_index[batch_idx][
idx])])
if text_prob is not None:
conf_list.append(text_prob[batch_idx][idx])
else:
conf_list.append(1)
text = ''.join(char_list)
result_list.append((text, np.mean(conf_list)))
return result_list
def __call__(self, preds, label=None, *args, **kwargs):
"""
text = self.decode(text)
if label is None:
return text
else:
label = self.decode(label, is_remove_duplicate=False)
return text, label
"""
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
if label is None:
return text
label = self.decode(label, is_remove_duplicate=False)
return text, label
def get_ignored_tokens(self):
beg_idx = self.get_beg_end_flag_idx("beg")
end_idx = self.get_beg_end_flag_idx("end")
return [beg_idx, end_idx]
def get_beg_end_flag_idx(self, beg_or_end):
if beg_or_end == "beg":
idx = np.array(self.dict[self.beg_str])
elif beg_or_end == "end":
idx = np.array(self.dict[self.end_str])
else:
assert False, "unsupport type %s in get_beg_end_flag_idx" \
% beg_or_end
return idx
class SRNLabelDecode(BaseRecLabelDecode):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='en',
use_space_char=False,
**kwargs):
super(SRNLabelDecode, self).__init__(character_dict_path,
character_type, use_space_char)
self.max_text_length = kwargs.get('max_text_length', 25)
def __call__(self, preds, label=None, *args, **kwargs):
pred = preds['predict']
char_num = len(self.character_str) + 2
if isinstance(pred, paddle.Tensor):
pred = pred.numpy()
pred = np.reshape(pred, [-1, char_num])
preds_idx = np.argmax(pred, axis=1)
preds_prob = np.max(pred, axis=1)
preds_idx = np.reshape(preds_idx, [-1, self.max_text_length])
preds_prob = np.reshape(preds_prob, [-1, self.max_text_length])
text = self.decode(preds_idx, preds_prob)
if label is None:
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
return text
label = self.decode(label)
return text, label
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
batch_size = len(text_index)
for batch_idx in range(batch_size):
char_list = []
conf_list = []
for idx in range(len(text_index[batch_idx])):
if text_index[batch_idx][idx] in ignored_tokens:
continue
if is_remove_duplicate:
# only for predict
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
batch_idx][idx]:
continue
char_list.append(self.character[int(text_index[batch_idx][
idx])])
if text_prob is not None:
conf_list.append(text_prob[batch_idx][idx])
else:
conf_list.append(1)
text = ''.join(char_list)
result_list.append((text, np.mean(conf_list)))
return result_list
def add_special_char(self, dict_character):
dict_character = dict_character + [self.beg_str, self.end_str]
return dict_character
def get_ignored_tokens(self):
beg_idx = self.get_beg_end_flag_idx("beg")
end_idx = self.get_beg_end_flag_idx("end")
return [beg_idx, end_idx]
def get_beg_end_flag_idx(self, beg_or_end):
if beg_or_end == "beg":
idx = np.array(self.dict[self.beg_str])
elif beg_or_end == "end":
idx = np.array(self.dict[self.end_str])
else:
assert False, "unsupport type %s in get_beg_end_flag_idx" \
% beg_or_end
return idx
class TableLabelDecode(object):
""" """
def __init__(self,
character_dict_path,
**kwargs):
list_character, list_elem = self.load_char_elem_dict(character_dict_path)
list_character = self.add_special_char(list_character)
list_elem = self.add_special_char(list_elem)
self.dict_character = {}
self.dict_idx_character = {}
for i, char in enumerate(list_character):
self.dict_idx_character[i] = char
self.dict_character[char] = i
self.dict_elem = {}
self.dict_idx_elem = {}
for i, elem in enumerate(list_elem):
self.dict_idx_elem[i] = elem
self.dict_elem[elem] = i
def load_char_elem_dict(self, character_dict_path):
list_character = []
list_elem = []
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
substr = lines[0].decode('utf-8').strip("\n").strip("\r\n").split("\t")
character_num = int(substr[0])
elem_num = int(substr[1])
for cno in range(1, 1 + character_num):
character = lines[cno].decode('utf-8').strip("\n").strip("\r\n")
list_character.append(character)
for eno in range(1 + character_num, 1 + character_num + elem_num):
elem = lines[eno].decode('utf-8').strip("\n").strip("\r\n")
list_elem.append(elem)
return list_character, list_elem
def add_special_char(self, list_character):
self.beg_str = "sos"
self.end_str = "eos"
list_character = [self.beg_str] + list_character + [self.end_str]
return list_character
def __call__(self, preds):
structure_probs = preds['structure_probs']
loc_preds = preds['loc_preds']
if isinstance(structure_probs,paddle.Tensor):
structure_probs = structure_probs.numpy()
if isinstance(loc_preds,paddle.Tensor):
loc_preds = loc_preds.numpy()
structure_idx = structure_probs.argmax(axis=2)
structure_probs = structure_probs.max(axis=2)
structure_str, structure_pos, result_score_list, result_elem_idx_list = self.decode(structure_idx,
structure_probs, 'elem')
res_html_code_list = []
res_loc_list = []
batch_num = len(structure_str)
for bno in range(batch_num):
res_loc = []
for sno in range(len(structure_str[bno])):
text = structure_str[bno][sno]
if text in ['<td>', '<td']:
pos = structure_pos[bno][sno]
res_loc.append(loc_preds[bno, pos])
res_html_code = ''.join(structure_str[bno])
res_loc = np.array(res_loc)
res_html_code_list.append(res_html_code)
res_loc_list.append(res_loc)
return {'res_html_code': res_html_code_list, 'res_loc': res_loc_list, 'res_score_list': result_score_list,
'res_elem_idx_list': result_elem_idx_list,'structure_str_list':structure_str}
def decode(self, text_index, structure_probs, char_or_elem):
"""convert text-label into text-index.
"""
if char_or_elem == "char":
current_dict = self.dict_idx_character
else:
current_dict = self.dict_idx_elem
ignored_tokens = self.get_ignored_tokens('elem')
beg_idx, end_idx = ignored_tokens
result_list = []
result_pos_list = []
result_score_list = []
result_elem_idx_list = []
batch_size = len(text_index)
for batch_idx in range(batch_size):
char_list = []
elem_pos_list = []
elem_idx_list = []
score_list = []
for idx in range(len(text_index[batch_idx])):
tmp_elem_idx = int(text_index[batch_idx][idx])
if idx > 0 and tmp_elem_idx == end_idx:
break
if tmp_elem_idx in ignored_tokens:
continue
char_list.append(current_dict[tmp_elem_idx])
elem_pos_list.append(idx)
score_list.append(structure_probs[batch_idx, idx])
elem_idx_list.append(tmp_elem_idx)
result_list.append(char_list)
result_pos_list.append(elem_pos_list)
result_score_list.append(score_list)
result_elem_idx_list.append(elem_idx_list)
return result_list, result_pos_list, result_score_list, result_elem_idx_list
def get_ignored_tokens(self, char_or_elem):
beg_idx = self.get_beg_end_flag_idx("beg", char_or_elem)
end_idx = self.get_beg_end_flag_idx("end", char_or_elem)
return [beg_idx, end_idx]
def get_beg_end_flag_idx(self, beg_or_end, char_or_elem):
if char_or_elem == "char":
if beg_or_end == "beg":
idx = self.dict_character[self.beg_str]
elif beg_or_end == "end":
idx = self.dict_character[self.end_str]
else:
assert False, "Unsupport type %s in get_beg_end_flag_idx of char" \
% beg_or_end
elif char_or_elem == "elem":
if beg_or_end == "beg":
idx = self.dict_elem[self.beg_str]
elif beg_or_end == "end":
idx = self.dict_elem[self.end_str]
else:
assert False, "Unsupport type %s in get_beg_end_flag_idx of elem" \
% beg_or_end
else:
assert False, "Unsupport type %s in char_or_elem" \
% char_or_elem
return idx
| 39.282276
| 126
| 0.575925
|
b5868a1a0eb89b66e805111fcce4058d00366d34
| 11,725
|
py
|
Python
|
basetrainer/pruning/nni_pruning.py
|
PanJinquan/pytorch-base-trainer
|
37799c948f72b2f9d3771ff469e06cdbff4a1d07
|
[
"MIT"
] | 11
|
2022-01-18T10:07:52.000Z
|
2022-03-16T02:40:31.000Z
|
basetrainer/pruning/nni_pruning.py
|
PanJinquan/pytorch-base-trainer
|
37799c948f72b2f9d3771ff469e06cdbff4a1d07
|
[
"MIT"
] | null | null | null |
basetrainer/pruning/nni_pruning.py
|
PanJinquan/pytorch-base-trainer
|
37799c948f72b2f9d3771ff469e06cdbff4a1d07
|
[
"MIT"
] | 1
|
2022-01-26T06:31:29.000Z
|
2022-01-26T06:31:29.000Z
|
# -*-coding: utf-8 -*-
"""
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2021-12-09 19:16:19
"""
import os
import copy
import torch
import torch.nn as nn
import torch.onnx
import torch.nn.functional as F
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.compression.pytorch.speedup import ModelSpeedup
from nni.algorithms.compression.pytorch import pruning
from nni.compression.pytorch import apply_compression_results
def model_pruning(model: nn.Module,
input_size=[1, 3, 128, 128],
sparsity=0.2,
prune_mod="L1",
reuse=False,
speedup=True,
output_prune="pruning_output",
mask_file="",
dependency_aware=True,
device="cpu",
verbose=False,
**kwargs):
"""
使用NNI进行模型剪枝和压缩
https://github.com/microsoft/nni/blob/master/docs/en_US/Compression/compression_pipeline_example.ipynb
use l1filter pruner to prune the model
Note that if you use a compressor that need you to pass a optimizer,
you need a new optimizer instead of you have used above, because NNI might modify the optimizer.
And of course this modified optimizer can not be used in finetuning.
Usage:
model = build_model()
model = model_pruning(model,input_size,sparsity=0.2)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for epoch in range(0, epochs):
trainer(model, optimizer, criterion, epoch)
evaluator(model)
torch.save(model.state_dict(), "model_pruning.pth")
:param model: Pytorch模型
:param input_size: 模型输入的维度[batch-size,channel,H,W]
:param sparsity: 模型剪枝目标稀疏度,值越大,模型越稀疏,计算量越小;0.5表示剪除50%
:param reuse: 是否复现模型剪枝的结果
False: 进行模型剪枝(pruning+ModelSpeedup),会生成mask文件(mask_naive_l1filter.pth)
True : 复现模型剪枝的结果,需要提供mask_file文件
:param speedup: 是否加速模型
:param output_prune: 模型剪枝输出文件
:param mask_file: reuse=True需要提供模型剪枝的mask文件,默认保存在output_prune目录下(mask_naive_l1filter.pth)
:param dependency_aware 依赖感知模式 https://nni.readthedocs.io/zh/stable/Compression/DependencyAware.html
:param device:
:param config: 模型剪枝配置,用于指定需要剪枝网络层;
如果不指定op_names,默认对所以层进行剪枝
config_list = [{'sparsity': sparsity,'op_types': ['Conv2d'],'op_names': ['conv1']}]
:return:
"""
info = ""
model = model.to(device)
if not os.path.exists(output_prune): os.makedirs(output_prune)
prune_file = os.path.join(output_prune, 'pruned_naive_{}filter.pth'.format(prune_mod))
onnx_file = os.path.join(output_prune, 'pruned_naive_{}filter.onnx'.format(prune_mod))
mask_file = os.path.join(output_prune, 'mask_naive_{}filter.pth'.format(prune_mod)) if not mask_file else mask_file
dummy_input = torch.randn(input_size).to(device)
# 原始模型的计算量和参数量
flops, params, _ = count_flops_params(model, dummy_input, verbose=verbose)
info += f"origin-Model FLOPs {flops / 1e6:.2f}M, Params {params / 1e6:.2f}M\n"
# 模型剪枝,会生成mask文件(mask_naive_l1filter.pth)
if not reuse:
"""
Choose a pruner and pruning
use l1filter pruner to prune the model
Note that if you use a compressor that need you to pass a optimizer,
you need a new optimizer instead of you have used above, because NNI might modify the optimizer.
And of course this modified optimizer can not be used in finetuning.
"""
if prune_mod.lower() == "Level".lower():
config = [{'sparsity': sparsity, 'op_types': ['Conv2d']}]
pruner = pruning.LevelPruner(model, config)
elif prune_mod.lower() == "L1".lower():
# op_types : Only Conv2d is supported in L1FilterPruner.
# config = [{'sparsity': sparsity, 'op_types': ['Conv2d'], "exclude": False}]
config = [{'sparsity': sparsity, 'op_types': ['Conv2d']}]
pruner = pruning.L1FilterPruner(model, config, dependency_aware, dummy_input=dummy_input)
elif prune_mod.lower() == "L2".lower():
# op_types : Only Conv2d is supported in L2FilterPruner.
config = [{'sparsity': sparsity, 'op_types': ['Conv2d']}]
pruner = pruning.L2FilterPruner(model, config, dependency_aware, dummy_input=dummy_input)
elif prune_mod.lower() == "FPGM".lower():
# op_types : Only Conv2d is supported in FPGM Pruner
config = [{'sparsity': sparsity, 'op_types': ['Conv2d']}]
pruner = pruning.FPGMPruner(model, config, dependency_aware, dummy_input=dummy_input)
elif prune_mod.lower() == "Slim".lower():
# op_types : Only BatchNorm2d is supported in Slim Pruner.
config = [{'sparsity': sparsity, 'op_types': ['BatchNorm2d']}]
pruner = pruning.SlimPruner(model,
config,
optimizer=None,
trainer=None,
criterion=None,
sparsifying_training_epochs=10)
elif prune_mod.lower() == "Slim".lower():
config = [{'sparsity': sparsity, 'op_types': ['BatchNorm2d']}]
pruner = pruning.ActivationMeanRankFilterPruner()
else:
raise Exception("Error prune_mod:{}".format(prune_mod))
# compress the model, the mask will be updated.
pruner.compress()
# pruner.get_pruned_weights()
# use a dummy input to apply the sparsify.
out = model(dummy_input)
# 剪枝后模型的计算量和参数量
flops, params, _ = count_flops_params(model, dummy_input, verbose=verbose)
info += f"pruner-Model FLOPs {flops / 1e6:.2f}M, Params {params / 1e6:.2f}M\n"
# export the sparsified and mask model
pruner.export_model(model_path=prune_file, mask_path=mask_file,
onnx_path=onnx_file, input_shape=dummy_input.shape,
device=device,
opset_version=11)
# speedup the model with provided weight mask.If you use a wrapped model, don't forget to unwrap it.
pruner._unwrap_model()
# 将掩码应用到模型,模型会变得更小,推理延迟也会减小
# apply_compression_results(model, mask_file, device)
if speedup:
if not os.path.exists(mask_file): raise Exception("not found mask file:{}".format(mask_file))
print("load mask file to speed up:{}".format(mask_file))
speed_up = ModelSpeedup(model, dummy_input=dummy_input, masks_file=mask_file)
speed_up.speedup_model()
out = model(dummy_input)
# speedup后模型的计算量和参数量
flops, params, _ = count_flops_params(model, dummy_input, verbose=verbose)
info += f"speedup-Model FLOPs {flops / 1e6:.2f}M, Params {params / 1e6:.2f}M\n"
# finetune the model to recover the accuracy.
model = model.to(device)
print(info)
return model
def nni_model_pruning_test(model, input_size=[1, 3, 416, 416], sparsity=0.5, output="output", device="cpu"):
"""
https://zhuanlan.zhihu.com/p/382638682
:param model:
:param input_size:
:param sparsity:
:param output:
:param device:
:return:
"""
from ptflops.flops_counter import get_model_complexity_info
if not os.path.exists(output):
os.makedirs(output)
config = [{
'sparsity': sparsity,
'op_types': ['Conv2d']
# 'op_types': ['BatchNorm2d']
}]
dummy_input = torch.randn(input_size).to(device)
#
origin_path = os.path.join(output, 'origin.pth')
onnx_path = os.path.join(output, 'origin.onnx')
torch.save(model.state_dict(), origin_path)
torch.onnx.export(model,
dummy_input,
onnx_path,
do_constant_folding=True,
verbose=False,
export_params=True,
opset_version=11,
input_names=['input'],
output_names=['output'])
tmp_model = copy.deepcopy(model).to(device)
out_tensor = tmp_model(dummy_input)
flops, params, _ = count_flops_params(tmp_model, dummy_input, verbose=True)
print(f"Model FLOPs {flops / 1e6:.2f}M, Params {params / 1e6:.2f}M")
pruner = pruning.FPGMPruner(tmp_model, config)
pruner.compress()
flops, params, _ = count_flops_params(tmp_model, dummy_input, verbose=True)
print(f"Model FLOPs {flops / 1e6:.2f}M, Params {params / 1e6:.2f}M")
pruned_model_path = os.path.join(output, 'slim_pruned.pth')
pruned_model_mask = os.path.join(output, 'slim_pruned_mask.pth')
pruned_model_onnx = os.path.join(output, 'slim_pruned.onnx')
pruner.export_model(model_path=pruned_model_path, mask_path=pruned_model_mask,
onnx_path=pruned_model_onnx, input_shape=dummy_input.shape,
device=device,
opset_version=11)
tmp_model = copy.deepcopy(model).to(device)
print('model pruned done.')
# apply_compression_results
apply_compression_results(tmp_model, masks_file=pruned_model_mask)
print('apply_compression_results')
out_tensor = tmp_model(dummy_input)
# Speedup
m_speedup = ModelSpeedup(tmp_model, dummy_input, masks_file=pruned_model_mask)
m_speedup.speedup_model()
out_tensor = tmp_model(dummy_input)
print('speedup_model ')
# # print(model)
slim_speedup_path = os.path.join(output, 'slim_speedup_model.pth')
slim_speedup_onnx = os.path.join(output, 'slim_speedup_model.onnx')
torch.save(tmp_model.state_dict(), slim_speedup_path)
torch.onnx.export(tmp_model, dummy_input, slim_speedup_onnx, verbose=False, opset_version=11)
print('pruned model exported.')
flops, params, _ = count_flops_params(tmp_model, dummy_input, verbose=True)
print(f"Model FLOPs {flops / 1e6:.2f}M, Params {params / 1e6:.2f}M")
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc = nn.Linear(128, 256)
self.classifier = nn.Linear(256, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
x = self.fc(x)
x = self.classifier(x)
return x
if __name__ == "__main__":
from torchvision.models.resnet import resnet50, resnet18
from torchvision.models.squeezenet import SqueezeNet
from torchvision.models.mobilenet import MobileNetV2
from segment.models import build_model
from libs.performance import performance
device = "cuda:0"
batch_size = 2
width_mult = 1.0
num_classes = 20
input_size = [1, 3, 224, 224]
net_type = 'modnet_v2'
model = resnet18(pretrained=True)
# model = SimpleModel()
# model = MobileNetV2()
# model = build_model.get_models(net_type, input_size, num_classes, width_mult=width_mult, pretrained=False)
model.eval()
inputs = torch.randn(input_size)
model = model.to((device))
inputs = inputs.to((device))
out = model(inputs)
performance.model_performance(model, inputs)
prune_model = copy.deepcopy(model)
prune_model = model_pruning(prune_model, input_size=input_size, sparsity=0.9, dependency_aware=True, device=device)
performance.model_performance(model, inputs)
performance.model_performance(prune_model, inputs)
| 44.078947
| 119
| 0.640256
|
da64d5599ffc0cd156084f86fe521e5a3f4482ee
| 5,519
|
py
|
Python
|
tensorcv/experiment/experiment_spec.py
|
afcarl/tensorcv
|
6545a48722dafb5c1cead3ed4f810ebcbef2c597
|
[
"MIT"
] | 1
|
2019-04-22T16:37:36.000Z
|
2019-04-22T16:37:36.000Z
|
tensorcv/experiment/experiment_spec.py
|
afcarl/tensorcv
|
6545a48722dafb5c1cead3ed4f810ebcbef2c597
|
[
"MIT"
] | null | null | null |
tensorcv/experiment/experiment_spec.py
|
afcarl/tensorcv
|
6545a48722dafb5c1cead3ed4f810ebcbef2c597
|
[
"MIT"
] | null | null | null |
import os
import json
from configparser import ConfigParser, ExtendedInterpolation
class ExperimentSpec(object):
def __init__(self, config_path):
assert os.path.exists(config_path), '{} not exists.'.format(config_path)
self.config = ConfigParser(
delimiters='=',
interpolation=ExtendedInterpolation())
self.config.read(config_path)
@property
def exp_dir(self):
return self.config.get('env', 'exp_dir')
@property
def data_dir(self):
default = os.path.join(self.exp_dir, 'data')
return self.config.get('env', 'data_dir', fallback=default)
@property
def log_dir(self):
default = os.path.join(self.exp_dir, 'log')
return self.config.get('env', 'log_dir', fallback=default)
@property
def model_dir(self):
default = os.path.join(self.exp_dir, 'model')
return self.config.get('env', 'model_dir', fallback=default)
@property
def eval_dir(self):
default = os.path.join(self.exp_dir, 'eval')
return self.config.get('env', 'eval_dir', fallback=default)
@property
def train_data(self):
return self.config.get('data', 'train_data', fallback='')
@property
def validation_data(self):
return self.config.get('data', 'validation_data', fallback='')
@property
def test_data(self):
raw_string = self.config.get('data', 'test_data', fallback='')
test_data = {}
for testset in raw_string.split('\n'):
if testset:
name, path = testset.split()
test_data[name] = path
return test_data
@property
def image_height(self):
return self.config.getint('data', 'image_height')
@property
def image_width(self):
return self.config.getint('data', 'image_width')
@property
def image_channels(self):
return self.config.getint('data', 'image_channels')
@property
def image_format(self):
return self.config.get('data', 'image_format', fallback='jpeg')
@property
def batch_size(self):
return self.config.getint('data', 'batch_size')
@property
def shuffle_buffer_size(self):
return self.config.getint('data', 'shuffle_buffer_size', fallback=10000)
@property
def prefetch_batches(self):
return self.config.getint('data', 'prefetch_batches', fallback=20)
@property
def num_data_processes(self):
return self.config.getint('data', 'num_data_processes', fallback=10)
@property
def dataset_type(self):
return self.config.get('data', 'dataset_type')
@property
def dataset_params(self):
params = self.config.get('data', 'dataset_params', fallback='{}')
return json.loads(params)
@property
def net(self):
return self.config.get('train', 'net')
@property
def net_params(self):
params = self.config.get('train', 'net_params', fallback='{}')
return json.loads(params)
@property
def loss(self):
return self.config.get('train', 'loss')
@property
def loss_params(self):
params = self.config.get('train', 'loss_params', fallback='{}')
return json.loads(params)
@property
def predictions(self):
return self.config.get('train', 'predictions')
@property
def predictions_params(self):
params = self.config.get('train', 'predictions_params', fallback='{}')
return json.loads(params)
@property
def metrics(self):
return self.config.get('train', 'metrics')
@property
def metrics_params(self):
params = self.config.get('train', 'metrics_params', fallback='{}')
return json.loads(params)
@property
def lr_policy(self):
return self.config.get('train', 'lr_policy')
@property
def lr_policy_params(self):
params = self.config.get('train', 'lr_policy_params', fallback='{}')
return json.loads(params)
@property
def optimizer(self):
return self.config.get('train', 'optimizer')
@property
def optimizer_params(self):
params = self.config.get('train', 'optimizer_params', fallback='{}')
return json.loads(params)
@property
def summary(self):
return self.config.get('train', 'summary')
@property
def summary_params(self):
params = self.config.get('train', 'summary_params', fallback='{}')
return json.loads(params)
@property
def max_steps(self):
return self.config.getint('train', 'max_step', fallback=None)
@property
def summary_steps(self):
return self.config.getint('train', 'summary_steps', fallback=100)
@property
def model_save_steps(self):
return self.config.getint('train', 'model_save_steps', fallback=1000)
@property
def transfer_checkpoint(self):
return self.config.get('train', 'transfer_checkpoint', fallback="")
@property
def transfer_params(self):
params = self.config.get('train', 'transfer_params', fallback='{}')
return json.loads(params)
@property
def model_step(self):
return self.config.get('evaluate', 'model_step')
@property
def predict_saver_type(self):
return self.config.get('evaluate', 'predict_saver_type')
@property
def predict_saver_params(self):
params = self.config.get('evaluate', 'predict_saver_params', fallback='{}')
return json.loads(params)
| 28.595855
| 83
| 0.633629
|
ea3604bbd8fab7946db1572dc0454d77187e2125
| 10,206
|
py
|
Python
|
scan.py
|
boost/bucket-antivirus-function
|
6eb93406e28f81a4c612f0dec29670451e0c5589
|
[
"Apache-2.0"
] | null | null | null |
scan.py
|
boost/bucket-antivirus-function
|
6eb93406e28f81a4c612f0dec29670451e0c5589
|
[
"Apache-2.0"
] | null | null | null |
scan.py
|
boost/bucket-antivirus-function
|
6eb93406e28f81a4c612f0dec29670451e0c5589
|
[
"Apache-2.0"
] | 1
|
2020-07-16T12:47:24.000Z
|
2020-07-16T12:47:24.000Z
|
# -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
from urllib.parse import unquote_plus
from distutils.util import strtobool
import sys
import boto3
import botocore
import clamav
import metrics
from common import AV_DEFINITION_S3_BUCKET
from common import AV_DEFINITION_S3_PREFIX
from common import AV_DELETE_INFECTED_FILES
from common import AV_PROCESS_ORIGINAL_VERSION_ONLY
from common import AV_SCAN_START_METADATA
from common import AV_SCAN_START_SNS_ARN
from common import AV_SIGNATURE_METADATA
from common import AV_STATUS_CLEAN
from common import AV_STATUS_INFECTED
from common import AV_STATUS_METADATA
from common import AV_STATUS_SNS_ARN
from common import AV_STATUS_SNS_PUBLISH_CLEAN
from common import AV_STATUS_SNS_PUBLISH_INFECTED
from common import AV_TIMESTAMP_METADATA
from common import create_dir
from common import get_timestamp
def event_object(event, event_source="s3"):
# SNS events are slightly different
if event_source.upper() == "SNS":
event = json.loads(event["Records"][0]["Sns"]["Message"])
# Break down the record
records = event["Records"]
if len(records) == 0:
raise Exception("No records found in event!")
record = records[0]
s3_obj = record["s3"]
# Get the bucket name
if "bucket" not in s3_obj:
raise Exception("No bucket found in event!")
bucket_name = s3_obj["bucket"].get("name", None)
# Get the key name
if "object" not in s3_obj:
raise Exception("No key found in event!")
key_name = s3_obj["object"].get("key", None)
if key_name:
key_name = unquote_plus(key_name)
# Ensure both bucket and key exist
if (not bucket_name) or (not key_name):
raise Exception("Unable to retrieve object from event.\n{}".format(event))
# Create and return the object
s3 = boto3.resource("s3")
return s3.Object(bucket_name, key_name)
def verify_s3_object_version(s3, s3_object):
# validate that we only process the original version of a file, if asked to do so
# security check to disallow processing of a new (possibly infected) object version
# while a clean initial version is getting processed
# downstream services may consume latest version by mistake and get the infected version instead
bucket_versioning = s3.BucketVersioning(s3_object.bucket_name)
if bucket_versioning.status == "Enabled":
bucket = s3.Bucket(s3_object.bucket_name)
versions = list(bucket.object_versions.filter(Prefix=s3_object.key))
if len(versions) > 1:
raise Exception(
"Detected multiple object versions in %s.%s, aborting processing"
% (s3_object.bucket_name, s3_object.key)
)
else:
# misconfigured bucket, left with no or suspended versioning
raise Exception(
"Object versioning is not enabled in bucket %s" % s3_object.bucket_name
)
def get_local_path(s3_object, local_prefix):
return os.path.join(local_prefix, s3_object.bucket_name, s3_object.key)
def delete_s3_object(s3_object):
try:
s3_object.delete()
except Exception:
raise Exception(
"Failed to delete infected file: %s.%s"
% (s3_object.bucket_name, s3_object.key)
)
else:
print("Infected file deleted: %s.%s" % (s3_object.bucket_name, s3_object.key))
def set_av_metadata(s3_object, scan_result, scan_signature, timestamp):
content_type = s3_object.content_type
metadata = s3_object.metadata
metadata[AV_SIGNATURE_METADATA] = scan_signature
metadata[AV_STATUS_METADATA] = scan_result
metadata[AV_TIMESTAMP_METADATA] = timestamp
s3_object.copy(
{"Bucket": s3_object.bucket_name, "Key": s3_object.key},
ExtraArgs={
"ContentType": content_type,
"Metadata": metadata,
"MetadataDirective": "REPLACE",
},
)
def set_av_tags(s3_client, s3_object, scan_result, scan_signature, timestamp):
try:
curr_tags = s3_client.get_object_tagging(
Bucket=s3_object.bucket_name, Key=s3_object.key
)["TagSet"]
new_tags = copy.copy(curr_tags)
for tag in curr_tags:
if tag["Key"] in [
AV_SIGNATURE_METADATA,
AV_STATUS_METADATA,
AV_TIMESTAMP_METADATA,
]:
new_tags.remove(tag)
new_tags.append({"Key": AV_SIGNATURE_METADATA, "Value": scan_signature})
new_tags.append({"Key": AV_STATUS_METADATA, "Value": scan_result})
new_tags.append({"Key": AV_TIMESTAMP_METADATA, "Value": timestamp})
s3_client.put_object_tagging(
Bucket=s3_object.bucket_name, Key=s3_object.key, Tagging={"TagSet": new_tags}
)
except botocore.exceptions.ClientError as error:
# Error on file missing is 'AccessDenied' due to our specific permissions set
if error.response['Error']['Code'] == 'AccessDenied':
print("Exiting early, the object has been deleted.")
sys.exit()
else:
raise error
def sns_start_scan(sns_client, s3_object, scan_start_sns_arn, timestamp):
message = {
"bucket": s3_object.bucket_name,
"key": s3_object.key,
"version": s3_object.version_id,
AV_SCAN_START_METADATA: True,
AV_TIMESTAMP_METADATA: timestamp,
}
sns_client.publish(
TargetArn=scan_start_sns_arn,
Message=json.dumps({"default": json.dumps(message)}),
MessageStructure="json",
)
def sns_scan_results(
sns_client, s3_object, sns_arn, scan_result, scan_signature, timestamp
):
# Don't publish if scan_result is CLEAN and CLEAN results should not be published
if scan_result == AV_STATUS_CLEAN and not str_to_bool(AV_STATUS_SNS_PUBLISH_CLEAN):
return
# Don't publish if scan_result is INFECTED and INFECTED results should not be published
if scan_result == AV_STATUS_INFECTED and not str_to_bool(
AV_STATUS_SNS_PUBLISH_INFECTED
):
return
message = {
"bucket": s3_object.bucket_name,
"key": s3_object.key,
"version": s3_object.version_id,
AV_SIGNATURE_METADATA: scan_signature,
AV_STATUS_METADATA: scan_result,
AV_TIMESTAMP_METADATA: get_timestamp(),
}
sns_client.publish(
TargetArn=sns_arn,
Message=json.dumps({"default": json.dumps(message)}),
MessageStructure="json",
MessageAttributes={
AV_STATUS_METADATA: {"DataType": "String", "StringValue": scan_result},
AV_SIGNATURE_METADATA: {
"DataType": "String",
"StringValue": scan_signature,
},
},
)
def lambda_handler(event, context):
s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
sns_client = boto3.client("sns")
# Get some environment variables
ENV = os.getenv("ENV", "")
EVENT_SOURCE = os.getenv("EVENT_SOURCE", "S3")
start_time = get_timestamp()
print("Script starting at %s\n" % (start_time))
s3_object = event_object(event, event_source=EVENT_SOURCE)
if str_to_bool(AV_PROCESS_ORIGINAL_VERSION_ONLY):
verify_s3_object_version(s3, s3_object)
# Publish the start time of the scan
if AV_SCAN_START_SNS_ARN not in [None, ""]:
start_scan_time = get_timestamp()
sns_start_scan(sns_client, s3_object, AV_SCAN_START_SNS_ARN, start_scan_time)
file_path = get_local_path(s3_object, "/tmp")
create_dir(os.path.dirname(file_path))
try:
print("Trying to download %s" % file_path)
s3_object.download_file(file_path)
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'NoSuchKey':
print("Exiting early, the object has been deleted.")
sys.exit()
to_download = clamav.update_defs_from_s3(
s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX
)
for download in to_download.values():
s3_path = download["s3_path"]
local_path = download["local_path"]
print("Downloading definition file %s from s3://%s" % (local_path, s3_path))
s3.Bucket(AV_DEFINITION_S3_BUCKET).download_file(s3_path, local_path)
print("Downloading definition file %s complete!" % (local_path))
scan_result, scan_signature = clamav.scan_file(file_path)
print(
"Scan of s3://%s resulted in %s\n"
% (os.path.join(s3_object.bucket_name, s3_object.key), scan_result)
)
result_time = get_timestamp()
# Set the properties on the object with the scan results
if "AV_UPDATE_METADATA" in os.environ:
set_av_metadata(s3_object, scan_result, scan_signature, result_time)
set_av_tags(s3_client, s3_object, scan_result, scan_signature, result_time)
# Publish the scan results
if AV_STATUS_SNS_ARN not in [None, ""]:
sns_scan_results(
sns_client,
s3_object,
AV_STATUS_SNS_ARN,
scan_result,
scan_signature,
result_time,
)
metrics.send(
env=ENV, bucket=s3_object.bucket_name, key=s3_object.key, status=scan_result
)
# Delete downloaded file to free up room on re-usable lambda function container
try:
os.remove(file_path)
except OSError:
pass
if str_to_bool(AV_DELETE_INFECTED_FILES) and scan_result == AV_STATUS_INFECTED:
delete_s3_object(s3_object)
stop_scan_time = get_timestamp()
print("Script finished at %s\n" % stop_scan_time)
def str_to_bool(s):
return bool(strtobool(str(s)))
| 35.193103
| 100
| 0.682932
|
aa7e60ec7b058e42ac43d6738ad9551b078a3702
| 1,961
|
py
|
Python
|
files/programs/Chungle.py
|
ArezalGame89/ChungOS
|
62c45a6ab80712d8a863fa9604d3e6519e7e04d1
|
[
"MIT"
] | null | null | null |
files/programs/Chungle.py
|
ArezalGame89/ChungOS
|
62c45a6ab80712d8a863fa9604d3e6519e7e04d1
|
[
"MIT"
] | 1
|
2022-02-20T15:07:17.000Z
|
2022-02-20T15:08:11.000Z
|
files/programs/Chungle.py
|
ArezalGame89/ChungOS
|
62c45a6ab80712d8a863fa9604d3e6519e7e04d1
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtCore import *
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.browser = QWebEngineView()
self.browser.setUrl(QUrl("https://google.com"))
self.setCentralWidget(self.browser)
self.showMaximized()
navbar = QToolBar()
self.addToolBar(navbar)
back_btn = QAction("Back", self)
back_btn.triggered.connect(self.browser.back)
navbar.addAction(back_btn)
forward_btn = QAction("Foward", self)
forward_btn.triggered.connect(self.browser.forward)
navbar.addAction(forward_btn)
reload_btn = QAction("Reload", self)
reload_btn.triggered.connect(self.browser.reload)
navbar.addAction(reload_btn)
home_btn = QAction("Home", self)
home_btn.triggered.connect(self.navigate_home)
navbar.addAction(home_btn)
self.url_bar = QLineEdit()
self.url_bar.returnPressed.connect(self.nav_url)
navbar.addWidget(self.url_bar)
def navigate_home(self):
self.browser.setUrl(QUrl("https://google.com"))
def nav_url(self):
url = self.url_bar.text()
if url[0:7] != "https://":
self.browser.setUrl(QUrl("https://" + url))
else:
self.browser.setUrl(QUrl(url))
for i in range(2):
print(url)
app = QApplication(sys.argv)
QApplication.setApplicationName("Chungle")
window = MainWindow()
app.exec_()
| 44.568182
| 71
| 0.515043
|
2c82daa106423a89d59dfe63c1b36cb4f9ab02c2
| 275
|
py
|
Python
|
src/11/simple_authentication_of_clients/client.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 14
|
2017-05-20T04:06:46.000Z
|
2022-01-23T06:48:45.000Z
|
src/11/simple_authentication_of_clients/client.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 1
|
2021-06-10T20:17:55.000Z
|
2021-06-10T20:17:55.000Z
|
src/11/simple_authentication_of_clients/client.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 15
|
2017-03-29T17:57:33.000Z
|
2021-08-24T02:20:08.000Z
|
from socket import socket, AF_INET, SOCK_STREAM
from auth import client_authenticate
secret_key = b'peekaboo'
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost', 18000))
client_authenticate(s, secret_key)
s.send(b'Hello World')
resp = s.recv(1024)
print('Got:', resp)
| 22.916667
| 47
| 0.763636
|
de3e06e3f9a2d0cacc1a74e9f6b98371f4a1fa52
| 1,680
|
py
|
Python
|
test/functional/feature_blocksdir.py
|
patentcoin/patentcoin
|
5dd4ba6cf18946be6be0f564952a55358c81fe95
|
[
"MIT"
] | 1
|
2021-12-11T19:14:32.000Z
|
2021-12-11T19:14:32.000Z
|
test/functional/feature_blocksdir.py
|
Cryptogits/patentcoin
|
5dd4ba6cf18946be6be0f564952a55358c81fe95
|
[
"MIT"
] | null | null | null |
test/functional/feature_blocksdir.py
|
Cryptogits/patentcoin
|
5dd4ba6cf18946be6be0f564952a55358c81fe95
|
[
"MIT"
] | 2
|
2020-06-27T10:26:01.000Z
|
2021-12-11T15:33:50.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the blocksdir option.
"""
import os
import shutil
from test_framework.test_framework import PatentcoinTestFramework, initialize_datadir
class BlocksdirTest(PatentcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
assert os.path.isdir(os.path.join(self.nodes[0].datadir, self.chain, "blocks"))
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "blocks"))
shutil.rmtree(self.nodes[0].datadir)
initialize_datadir(self.options.tmpdir, 0, self.chain)
self.log.info("Starting with nonexistent blocksdir ...")
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))
os.mkdir(blocksdir_path)
self.log.info("Starting with existing blocksdir ...")
self.start_node(0, ["-blocksdir=" + blocksdir_path])
self.log.info("mining blocks..")
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
assert os.path.isfile(os.path.join(blocksdir_path, self.chain, "blocks", "blk00000.dat"))
assert os.path.isdir(os.path.join(self.nodes[0].datadir, self.chain, "blocks", "index"))
if __name__ == '__main__':
BlocksdirTest().main()
| 43.076923
| 167
| 0.704167
|
87c01afcd8ba8eb6a6f92139952ede0a633c53b1
| 6,915
|
py
|
Python
|
backend/test_2_29699/settings.py
|
crowdbotics-apps/test-2-29699
|
985498b8dd29302ef219edbd95b7766a7f57622a
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/test_2_29699/settings.py
|
crowdbotics-apps/test-2-29699
|
985498b8dd29302ef219edbd95b7766a7f57622a
|
[
"FTL",
"AML",
"RSA-MD"
] | 29
|
2021-08-15T22:23:51.000Z
|
2022-03-13T17:39:18.000Z
|
backend/test_2_29699/settings.py
|
crowdbotics-apps/test-2-29699
|
985498b8dd29302ef219edbd95b7766a7f57622a
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for test_2_29699 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_2_29699.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_2_29699.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.551282
| 112
| 0.730441
|
b06ffa1c4f2d8b7d26f684635663410cc43278e6
| 7,365
|
py
|
Python
|
ee/clickhouse/queries/event_query.py
|
rightlyip/posthog
|
c00ad7a2b02df68930ca332675fc04ce4ed83a60
|
[
"MIT"
] | null | null | null |
ee/clickhouse/queries/event_query.py
|
rightlyip/posthog
|
c00ad7a2b02df68930ca332675fc04ce4ed83a60
|
[
"MIT"
] | null | null | null |
ee/clickhouse/queries/event_query.py
|
rightlyip/posthog
|
c00ad7a2b02df68930ca332675fc04ce4ed83a60
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, List, Tuple, Union
from ee.clickhouse.materialized_columns.columns import ColumnName
from ee.clickhouse.models.cohort import format_person_query, format_precalculated_cohort_query, is_precalculated_query
from ee.clickhouse.models.property import get_property_string_expr, parse_prop_clauses
from ee.clickhouse.models.util import PersonPropertiesMode
from ee.clickhouse.queries.column_optimizer import ColumnOptimizer
from ee.clickhouse.queries.groups_join_query import GroupsJoinQuery
from ee.clickhouse.queries.person_query import ClickhousePersonQuery
from ee.clickhouse.queries.util import parse_timestamps
from ee.clickhouse.sql.person import GET_TEAM_PERSON_DISTINCT_IDS
from posthog.models import Cohort, Filter, Property
from posthog.models.filters.path_filter import PathFilter
from posthog.models.filters.retention_filter import RetentionFilter
from posthog.models.filters.session_recordings_filter import SessionRecordingsFilter
class ClickhouseEventQuery(metaclass=ABCMeta):
DISTINCT_ID_TABLE_ALIAS = "pdi"
PERSON_TABLE_ALIAS = "person"
EVENT_TABLE_ALIAS = "e"
_filter: Union[Filter, PathFilter, RetentionFilter, SessionRecordingsFilter]
_team_id: int
_column_optimizer: ColumnOptimizer
_person_query: ClickhousePersonQuery
_should_join_distinct_ids = False
_should_join_persons = False
_should_round_interval = False
_extra_fields: List[ColumnName]
_extra_person_fields: List[ColumnName]
def __init__(
self,
filter: Union[Filter, PathFilter, RetentionFilter, SessionRecordingsFilter],
team_id: int,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
# Extra events/person table columns to fetch since parent query needs them
extra_fields: List[ColumnName] = [],
extra_person_fields: List[ColumnName] = [],
**kwargs,
) -> None:
self._filter = filter
self._team_id = team_id
self._column_optimizer = ColumnOptimizer(self._filter, self._team_id)
self._person_query = ClickhousePersonQuery(
self._filter, self._team_id, self._column_optimizer, extra_fields=extra_person_fields
)
self.params: Dict[str, Any] = {
"team_id": self._team_id,
}
self._should_join_distinct_ids = should_join_distinct_ids
self._should_join_persons = should_join_persons
self._extra_fields = extra_fields
self._extra_person_fields = extra_person_fields
if not self._should_join_distinct_ids:
self._determine_should_join_distinct_ids()
if not self._should_join_persons:
self._determine_should_join_persons()
self._should_round_interval = round_interval
@abstractmethod
def get_query(self) -> Tuple[str, Dict[str, Any]]:
pass
@abstractmethod
def _determine_should_join_distinct_ids(self) -> None:
pass
def _get_disintct_id_query(self) -> str:
if self._should_join_distinct_ids:
return f"""
INNER JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) AS {self.DISTINCT_ID_TABLE_ALIAS}
ON events.distinct_id = {self.DISTINCT_ID_TABLE_ALIAS}.distinct_id
"""
else:
return ""
def _determine_should_join_persons(self) -> None:
if self._person_query.is_used:
self._should_join_distinct_ids = True
self._should_join_persons = True
return
# :KLUDGE: The following is mostly making sure if cohorts are included as well.
# Can be simplified significantly after https://github.com/PostHog/posthog/issues/5854
if any(self._should_property_join_persons(prop) for prop in self._filter.properties):
self._should_join_distinct_ids = True
self._should_join_persons = True
return
if any(
self._should_property_join_persons(prop) for entity in self._filter.entities for prop in entity.properties
):
self._should_join_distinct_ids = True
self._should_join_persons = True
return
def _should_property_join_persons(self, prop: Property) -> bool:
return prop.type == "cohort" and self._does_cohort_need_persons(prop)
def _does_cohort_need_persons(self, prop: Property) -> bool:
try:
cohort: Cohort = Cohort.objects.get(pk=prop.value, team_id=self._team_id)
except Cohort.DoesNotExist:
return False
if is_precalculated_query(cohort):
return True
if cohort.is_static:
return True
for group in cohort.groups:
if group.get("properties"):
return True
return False
def _get_person_query(self) -> Tuple[str, Dict]:
if self._should_join_persons:
person_query, params = self._person_query.get_query()
return (
f"""
INNER JOIN ({person_query}) {self.PERSON_TABLE_ALIAS}
ON {self.PERSON_TABLE_ALIAS}.id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id
""",
params,
)
else:
return "", {}
def _get_groups_query(self) -> Tuple[str, Dict]:
return GroupsJoinQuery(self._filter, self._team_id, self._column_optimizer).get_join_query()
def _get_date_filter(self) -> Tuple[str, Dict]:
parsed_date_from, parsed_date_to, date_params = parse_timestamps(filter=self._filter, team_id=self._team_id)
query = f"""
{parsed_date_from}
{parsed_date_to}
"""
return query, date_params
def _get_props(self, filters: List[Property]) -> Tuple[str, Dict]:
final = []
params: Dict[str, Any] = {}
for idx, prop in enumerate(filters):
if prop.type == "cohort":
person_id_query, cohort_filter_params = self._get_cohort_subquery(prop)
params = {**params, **cohort_filter_params}
final.append(f"AND {person_id_query}")
else:
filter_query, filter_params = parse_prop_clauses(
[prop],
self._team_id,
prepend=f"global_{idx}",
allow_denormalized_props=True,
person_properties_mode=PersonPropertiesMode.EXCLUDE,
)
final.append(filter_query)
params.update(filter_params)
return " ".join(final), params
def _get_cohort_subquery(self, prop) -> Tuple[str, Dict[str, Any]]:
try:
cohort: Cohort = Cohort.objects.get(pk=prop.value, team_id=self._team_id)
except Cohort.DoesNotExist:
return "0 = 11", {} # If cohort doesn't exist, nothing can match
is_precalculated = is_precalculated_query(cohort)
person_id_query, cohort_filter_params = (
format_precalculated_cohort_query(
cohort.pk, 0, custom_match_field=f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id"
)
if is_precalculated
else format_person_query(cohort, 0, custom_match_field=f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id")
)
return person_id_query, cohort_filter_params
| 39.175532
| 118
| 0.670468
|
cd2ffca9e92a676c7372d0529e65f4813f91beb3
| 2,967
|
py
|
Python
|
gyro.py
|
martyni/astro_bird
|
2d6fb8a2071f9a89700af6ddb9bee8925139bfba
|
[
"MIT"
] | null | null | null |
gyro.py
|
martyni/astro_bird
|
2d6fb8a2071f9a89700af6ddb9bee8925139bfba
|
[
"MIT"
] | null | null | null |
gyro.py
|
martyni/astro_bird
|
2d6fb8a2071f9a89700af6ddb9bee8925139bfba
|
[
"MIT"
] | null | null | null |
from sense_hat import SenseHat
from random import choice
import time
GREEN = (0, 255, 0)
YELLOW = (255, 255, 0)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
WHITE = (255,255,255)
NOTHING = (0,0,0)
PINK = (255,105, 180)
class Screen(object):
def __init__(self, width=8, height=8):
self.s = SenseHat()
self.s.low_light = True
self.width = width
self.height = height
self.buffer = [NOTHING if pixel < self.width * self.height /2 else NOTHING for pixel in range(self.width * self.height)]
self.buffer_original = list(self.buffer)
self.score = 0
self.button_presses = {}
def colour_pixel(self, x, y, colour):
try:
self.buffer[ y * self.width + x ] = colour
except:
pass
def draw_frame(self):
self.s.set_pixels(self.buffer)
def reset_buffer(self):
self.buffer = list(self.buffer_original)
def get_button_presses(self):
button_presses_now = {event.direction : event.action for event in self.s.stick.get_events()}
self.button_presses.update(button_presses_now)
class Sprite(object):
def __init__(self,screen, position=[0,4], height=1, width=1, colour=YELLOW):
self.screen = screen
self.position = list(position)
self.colour = colour
self.height = height
self.width = width
def draw_sprite(self, custom_size=None, custom_position=None):
if custom_size is not None:
width,height = custom_size
else:
width,height = self.width, self.height
if custom_position is not None:
position = custom_position
else:
position = self.position
for i in range(width):
for j in range(height):
x = position[0] + i
y = position[1] + j
self.screen.colour_pixel(x, y, self.colour)
print('drew {}, {}'.format(self.position, self.colour))
def update(self):
pass
class Ball(Sprite):
def update(self):
acceleration = self.screen.s.get_accelerometer_raw()
x = acceleration['x']
y = acceleration['y']
z = acceleration['z']
x=round(x, 0)
y=round(y, 0)
z=round(z, 0)
self.position[0] += int(x)
self.position[1] += int(y)
overflow_array = [self.width, self.height]
for pos, value in enumerate(self.position):
max_value = 8 - overflow_array[pos]
if self.position[pos] > max_value:
self.position[pos] = max_value
if self.position[pos] < 0:
self.position[pos] = 0
print("x={0}, y={1}, z={2}".format(x, y, z))
print("x={0}, y={1}, z={2}".format( self.position[0], self.position[1], z))
def main():
the_screen = Screen()
ball = Ball(the_screen, width=2, height=2,position=[4,4])
sprites = [ ball ]
speed = 0
game = True
while game:
for sprite in sprites:
sprite.update()
sprite.draw_sprite()
the_screen.draw_frame()
the_screen.reset_buffer()
time.sleep(speed)
if __name__ == "__main__":
main()
| 26.256637
| 124
| 0.61847
|
8e91a64535e50e749981bf8fa4041c02fa020d58
| 891
|
py
|
Python
|
test/test_v1_load_balancer_ingress.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | 1
|
2021-11-20T13:37:43.000Z
|
2021-11-20T13:37:43.000Z
|
test/test_v1_load_balancer_ingress.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | null | null | null |
test/test_v1_load_balancer_ingress.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | null | null | null |
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import argocd_python_client
from argocd_python_client.model.v1_port_status import V1PortStatus
globals()['V1PortStatus'] = V1PortStatus
from argocd_python_client.model.v1_load_balancer_ingress import V1LoadBalancerIngress
class TestV1LoadBalancerIngress(unittest.TestCase):
"""V1LoadBalancerIngress unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1LoadBalancerIngress(self):
"""Test V1LoadBalancerIngress"""
# FIXME: construct object with mandatory attributes with example values
# model = V1LoadBalancerIngress() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.447368
| 85
| 0.72615
|
68c79f1e9c929a054f88178ac359e3e4c1d48169
| 143
|
py
|
Python
|
codewars/8kyu/doha22/kata8/getting_age/getting_age.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | null | null | null |
codewars/8kyu/doha22/kata8/getting_age/getting_age.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | 2
|
2019-01-22T10:53:42.000Z
|
2019-01-31T08:02:48.000Z
|
codewars/8kyu/doha22/kata8/getting_age/getting_age.py
|
doha22/Training_one
|
0cd7cf86c7da0f6175834146296b763d1841766b
|
[
"MIT"
] | 13
|
2019-01-22T10:37:42.000Z
|
2019-01-25T13:30:43.000Z
|
def get_age(age):
for s in age.split():
if s.isdigit():
return int(s)
def get_age2(age):
return int(age[0])
| 14.3
| 26
| 0.517483
|
91514cd1e001a7f9d61afe68e13dc624c37c4b2a
| 48,882
|
py
|
Python
|
test/test_exception_scope.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
test/test_exception_scope.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
test/test_exception_scope.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.exception_scope import ExceptionScope # noqa: E501
from swagger_client.rest import ApiException
class TestExceptionScope(unittest.TestCase):
"""ExceptionScope unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExceptionScope(self):
"""Test ExceptionScope"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.exception_scope.ExceptionScope() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 1,192.243902
| 48,043
| 0.490303
|
9ee977281904c3a63b825f00a4d85a7cd762110c
| 917
|
py
|
Python
|
backend/database/settings/development.py
|
JMSoler7/database
|
b90326a8f3929d1bad9a810fcbe91d9bb2c3d5f4
|
[
"MIT"
] | null | null | null |
backend/database/settings/development.py
|
JMSoler7/database
|
b90326a8f3929d1bad9a810fcbe91d9bb2c3d5f4
|
[
"MIT"
] | null | null | null |
backend/database/settings/development.py
|
JMSoler7/database
|
b90326a8f3929d1bad9a810fcbe91d9bb2c3d5f4
|
[
"MIT"
] | null | null | null |
import os
from database.settings.common import * # NOQA F401,F403
from database.settings.common import SITE_PACKAGE_NAME
from database.settings.common import BASE_DIR
from database.settings.common import INSTALLED_APPS
from database.settings.common import MIDDLEWARE
from database.settings.common import LOGGING
from database.settings.common import REST_FRAMEWORK
SECRET_KEY = 'AIXO_ES_UNA_CLAU_DE_DEVELOPMENT'
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
LOGGING['loggers']['']['handlers'] = ['stdout']
LOGGING['loggers'][SITE_PACKAGE_NAME]['level'] = 'DEBUG'
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] += (
'rest_framework.authentication.BasicAuthentication',)
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
| 33.962963
| 67
| 0.76663
|
d57eb15ea86195774d84437c0cc542fe5ba507ea
| 511
|
py
|
Python
|
vtcff/_codec_avc.py
|
rtmigo/vtcff_py
|
e26a5e55ea455b10995932dccd319c1f7fc28385
|
[
"MIT"
] | null | null | null |
vtcff/_codec_avc.py
|
rtmigo/vtcff_py
|
e26a5e55ea455b10995932dccd319c1f7fc28385
|
[
"MIT"
] | null | null | null |
vtcff/_codec_avc.py
|
rtmigo/vtcff_py
|
e26a5e55ea455b10995932dccd319c1f7fc28385
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: (c) 2021 Artёm IG <github.com/rtmigo>
# SPDX-License-Identifier: MIT
from typing import Optional, Iterable, Tuple
from ._codec import VideoCodec
from ._codec_avc_preset import VcPreset
class Avc(VideoCodec):
def __init__(self, preset: VcPreset = None):
self.preset: Optional[VcPreset] = preset
def args(self) -> Iterable[Tuple[str, str]]:
yield "-codec:v", "libx264"
if self.preset is not None:
yield "-preset", str(self.preset.value)
| 26.894737
| 63
| 0.686888
|
2d05b0a21007de064483c38cdab66625ccea36cb
| 579
|
py
|
Python
|
opentmi_client/utils/__init__.py
|
OpenTMI/opentmi-pyclient
|
034c539d36fe13a2d6538ea421e4c01f00f5687d
|
[
"MIT"
] | null | null | null |
opentmi_client/utils/__init__.py
|
OpenTMI/opentmi-pyclient
|
034c539d36fe13a2d6538ea421e4c01f00f5687d
|
[
"MIT"
] | 36
|
2018-06-18T10:03:58.000Z
|
2022-03-30T00:16:31.000Z
|
opentmi_client/utils/__init__.py
|
OpenTMI/opentmi-pyclient
|
034c539d36fe13a2d6538ea421e4c01f00f5687d
|
[
"MIT"
] | 1
|
2019-04-17T08:49:24.000Z
|
2019-04-17T08:49:24.000Z
|
"""
Collect all utils API's
"""
from opentmi_client.utils.tools import is_object_id
from opentmi_client.utils.tools import resolve_host, resolve_token
from opentmi_client.utils.tools import archive_files
from opentmi_client.utils.tools import remove_empty_from_dict
from opentmi_client.utils.logger import get_logger
from opentmi_client.utils.exceptions import OpentmiException
from opentmi_client.utils.exceptions import TransportException
from opentmi_client.utils.Query import Query, Find, Distinct
from opentmi_client.utils.decorators import setter_rules, requires_logged_in
| 44.538462
| 76
| 0.870466
|
e5ebb4bea71ef32e49e516fd60e6e56c20cd5fd1
| 383
|
py
|
Python
|
test/test_processor.py
|
eurotech/prom2teams
|
8554be07071518e7a736f0d0d7a974b97a1061c5
|
[
"MIT"
] | null | null | null |
test/test_processor.py
|
eurotech/prom2teams
|
8554be07071518e7a736f0d0d7a974b97a1061c5
|
[
"MIT"
] | null | null | null |
test/test_processor.py
|
eurotech/prom2teams
|
8554be07071518e7a736f0d0d7a974b97a1061c5
|
[
"MIT"
] | null | null | null |
import prom2teams.processor
def test_processor():
with open("test/test_processor/template.j2", 'r') as template_file:
template = template_file.read()
processor = prom2teams.processor.Processor(template, ["http://localhost:18080/"])
with open("test/test_processor/alert.json", 'r') as alert_file:
alert = alert_file.read()
processor.process(alert)
| 29.461538
| 85
| 0.704961
|
a0c6a3e6d5889f217418c2a238fa431257825fd6
| 7,638
|
py
|
Python
|
IPython/kernel/client.py
|
chebee7i/ipython
|
85b169fa3afc3d374973295c7f1409ededddbaca
|
[
"BSD-3-Clause-Clear"
] | 26
|
2018-02-14T23:52:58.000Z
|
2021-08-16T13:50:03.000Z
|
IPython/kernel/client.py
|
chebee7i/ipython
|
85b169fa3afc3d374973295c7f1409ededddbaca
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
IPython/kernel/client.py
|
chebee7i/ipython
|
85b169fa3afc3d374973295c7f1409ededddbaca
|
[
"BSD-3-Clause-Clear"
] | 10
|
2018-08-13T19:38:39.000Z
|
2020-04-19T03:02:00.000Z
|
"""Base class to manage the interaction with a running kernel
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import zmq
# Local imports
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.traitlets import (
Any, Instance, Type,
)
from .zmq.session import Session
from .channels import (
ShellChannel, IOPubChannel,
HBChannel, StdInChannel,
)
from .clientabc import KernelClientABC
from .connect import ConnectionFileMixin
#-----------------------------------------------------------------------------
# Main kernel client class
#-----------------------------------------------------------------------------
class KernelClient(LoggingConfigurable, ConnectionFileMixin):
"""Communicates with a single kernel on any host via zmq channels.
There are four channels associated with each kernel:
* shell: for request/reply calls to the kernel.
* iopub: for the kernel to publish results to frontends.
* hb: for monitoring the kernel's heartbeat.
* stdin: for frontends to reply to raw_input calls in the kernel.
The methods of the channels are exposed as methods of the client itself
(KernelClient.execute, complete, history, etc.).
See the channels themselves for documentation of these methods.
"""
# The PyZMQ Context to use for communication with the kernel.
context = Instance(zmq.Context)
def _context_default(self):
return zmq.Context.instance()
# The Session to use for communication with the kernel.
session = Instance(Session)
def _session_default(self):
return Session(parent=self)
# The classes to use for the various channels
shell_channel_class = Type(ShellChannel)
iopub_channel_class = Type(IOPubChannel)
stdin_channel_class = Type(StdInChannel)
hb_channel_class = Type(HBChannel)
# Protected traits
_shell_channel = Any
_iopub_channel = Any
_stdin_channel = Any
_hb_channel = Any
#--------------------------------------------------------------------------
# Channel proxy methods
#--------------------------------------------------------------------------
def _get_msg(channel, *args, **kwargs):
return channel.get_msg(*args, **kwargs)
def get_shell_msg(self, *args, **kwargs):
"""Get a message from the shell channel"""
return self.shell_channel.get_msg(*args, **kwargs)
def get_iopub_msg(self, *args, **kwargs):
"""Get a message from the iopub channel"""
return self.iopub_channel.get_msg(*args, **kwargs)
def get_stdin_msg(self, *args, **kwargs):
"""Get a message from the stdin channel"""
return self.stdin_channel.get_msg(*args, **kwargs)
#--------------------------------------------------------------------------
# Channel management methods
#--------------------------------------------------------------------------
def start_channels(self, shell=True, iopub=True, stdin=True, hb=True):
"""Starts the channels for this kernel.
This will create the channels if they do not exist and then start
them (their activity runs in a thread). If port numbers of 0 are
being used (random ports) then you must first call
:method:`start_kernel`. If the channels have been stopped and you
call this, :class:`RuntimeError` will be raised.
"""
if shell:
self.shell_channel.start()
for method in self.shell_channel.proxy_methods:
setattr(self, method, getattr(self.shell_channel, method))
if iopub:
self.iopub_channel.start()
for method in self.iopub_channel.proxy_methods:
setattr(self, method, getattr(self.iopub_channel, method))
if stdin:
self.stdin_channel.start()
for method in self.stdin_channel.proxy_methods:
setattr(self, method, getattr(self.stdin_channel, method))
self.shell_channel.allow_stdin = True
else:
self.shell_channel.allow_stdin = False
if hb:
self.hb_channel.start()
def stop_channels(self):
"""Stops all the running channels for this kernel.
This stops their event loops and joins their threads.
"""
if self.shell_channel.is_alive():
self.shell_channel.stop()
if self.iopub_channel.is_alive():
self.iopub_channel.stop()
if self.stdin_channel.is_alive():
self.stdin_channel.stop()
if self.hb_channel.is_alive():
self.hb_channel.stop()
@property
def channels_running(self):
"""Are any of the channels created and running?"""
return (self.shell_channel.is_alive() or self.iopub_channel.is_alive() or
self.stdin_channel.is_alive() or self.hb_channel.is_alive())
@property
def shell_channel(self):
"""Get the shell channel object for this kernel."""
if self._shell_channel is None:
url = self._make_url('shell')
self.log.debug("connecting shell channel to %s", url)
self._shell_channel = self.shell_channel_class(
self.context, self.session, url
)
return self._shell_channel
@property
def iopub_channel(self):
"""Get the iopub channel object for this kernel."""
if self._iopub_channel is None:
url = self._make_url('iopub')
self.log.debug("connecting iopub channel to %s", url)
self._iopub_channel = self.iopub_channel_class(
self.context, self.session, url
)
return self._iopub_channel
@property
def stdin_channel(self):
"""Get the stdin channel object for this kernel."""
if self._stdin_channel is None:
url = self._make_url('stdin')
self.log.debug("connecting stdin channel to %s", url)
self._stdin_channel = self.stdin_channel_class(
self.context, self.session, url
)
return self._stdin_channel
@property
def hb_channel(self):
"""Get the hb channel object for this kernel."""
if self._hb_channel is None:
url = self._make_url('hb')
self.log.debug("connecting heartbeat channel to %s", url)
self._hb_channel = self.hb_channel_class(
self.context, self.session, url
)
return self._hb_channel
def is_alive(self):
"""Is the kernel process still running?"""
if self._hb_channel is not None:
# We didn't start the kernel with this KernelManager so we
# use the heartbeat.
return self._hb_channel.is_beating()
else:
# no heartbeat and not local, we can't tell if it's running,
# so naively return True
return True
#-----------------------------------------------------------------------------
# ABC Registration
#-----------------------------------------------------------------------------
KernelClientABC.register(KernelClient)
| 36.898551
| 81
| 0.569259
|
1ef52dda36be3c428e8e73130f9cf7914e738d8b
| 37,443
|
py
|
Python
|
flytekit/core/promise.py
|
latchbio/flaightkit
|
365e339cea21965680a9252b91085f6a5c3207d5
|
[
"Apache-2.0"
] | null | null | null |
flytekit/core/promise.py
|
latchbio/flaightkit
|
365e339cea21965680a9252b91085f6a5c3207d5
|
[
"Apache-2.0"
] | null | null | null |
flytekit/core/promise.py
|
latchbio/flaightkit
|
365e339cea21965680a9252b91085f6a5c3207d5
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
import collections
import dataclasses
import typing
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from typing_extensions import Protocol
from flytekit.common import constants as _common_constants
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.core import context_manager as _flyte_context
from flytekit.core import interface as flyte_interface
from flytekit.core import type_engine
from flytekit.core.context_manager import BranchEvalMode, ExecutionState, FlyteContext, FlyteContextManager
from flytekit.core.interface import Interface
from flytekit.core.node import Node
from flytekit.core.type_engine import DataclassTransformer, DictTransformer, ListTransformer, TypeEngine
from flytekit.models import interface as _interface_models
from flytekit.models import literals as _literal_models
from flytekit.models import literals as _literals_models
from flytekit.models import types as _type_models
from flytekit.models import types as type_models
from flytekit.models.core import workflow as _workflow_model
from flytekit.models.literals import Primitive
_type = type
def translate_inputs_to_literals(
ctx: FlyteContext,
incoming_values: Dict[str, Any],
flyte_interface_types: Dict[str, _interface_models.Variable],
native_types: Dict[str, type],
) -> Dict[str, _literal_models.Literal]:
"""
The point of this function is to extract out Literals from a collection of either Python native values (which would
be converted into Flyte literals) or Promises (the literals in which would just get extracted).
When calling a task inside a workflow, a user might do something like this.
def my_wf(in1: int) -> int:
a = task_1(in1=in1)
b = task_2(in1=5, in2=a)
return b
If this is the case, when task_2 is called in local workflow execution, we'll need to translate the Python native
literal 5 to a Flyte literal.
More interesting is this:
def my_wf(in1: int, in2: int) -> int:
a = task_1(in1=in1)
b = task_2(in1=5, in2=[a, in2])
return b
Here, in task_2, during execution we'd have a list of Promises. We have to make sure to give task2 a Flyte
LiteralCollection (Flyte's name for list), not a Python list of Flyte literals.
This helper function is used both when sorting out inputs to a task, as well as outputs of a function.
:param ctx: Context needed in case a non-primitive literal needs to be translated to a Flyte literal (like a file)
:param incoming_values: This is a map of your task's input or wf's output kwargs basically
:param flyte_interface_types: One side of an :py:class:`flytekit.models.interface.TypedInterface` basically.
:param native_types: Map to native Python type.
"""
def extract_value(
ctx: FlyteContext, input_val: Any, val_type: type, flyte_literal_type: _type_models.LiteralType
) -> _literal_models.Literal:
if flyte_literal_type.sum is not None:
for s in flyte_literal_type.sum.summands:
try:
return extract_value(ctx, input_val, val_type, s)
except:
continue
raise Exception(f"Could not extract value for sum type: '{flyte_literal_type}' from '{input_val}'")
if input_val is None:
return _literal_models.Literal(scalar=_literal_models.Scalar(none_type=_literal_models.Void()))
elif isinstance(input_val, list):
if flyte_literal_type.collection_type is None:
raise Exception(f"Not a collection type {flyte_literal_type} but got a list {input_val}")
try:
sub_type = ListTransformer.get_sub_type(val_type)
except ValueError:
if len(input_val) == 0:
raise
sub_type = type(input_val[0])
literals = [extract_value(ctx, v, sub_type, flyte_literal_type.collection_type) for v in input_val]
return _literal_models.Literal(collection=_literal_models.LiteralCollection(literals=literals))
elif isinstance(input_val, dict):
if (
flyte_literal_type.map_value_type is None
and flyte_literal_type.simple != _type_models.SimpleType.STRUCT
):
raise Exception(f"Not a map type {flyte_literal_type} but got a map {input_val}")
k_type, sub_type = DictTransformer.get_dict_types(val_type)
if flyte_literal_type.simple == _type_models.SimpleType.STRUCT:
return TypeEngine.to_literal(ctx, input_val, type(input_val), flyte_literal_type)
else:
literals = {
k: extract_value(ctx, v, sub_type, flyte_literal_type.map_value_type) for k, v in input_val.items()
}
return _literal_models.Literal(map=_literal_models.LiteralMap(literals=literals))
elif isinstance(input_val, Promise):
# In the example above, this handles the "in2=a" type of argument
return input_val.val
elif isinstance(input_val, VoidPromise):
raise AssertionError(
f"Outputs of a non-output producing task {input_val.task_name} cannot be passed to another task."
)
elif isinstance(input_val, tuple):
raise AssertionError(
"Tuples are not a supported type for individual values in Flyte - got a tuple -"
f" {input_val}. If using named tuple in an inner task, please, de-reference the"
"actual attribute that you want to use. For example, in NamedTuple('OP', x=int) then"
"return v.x, instead of v, even if this has a single element"
)
elif dataclasses.is_dataclass(input_val):
if flyte_literal_type.record is None:
raise Exception(f"Got a dataclass for a non-record type: '{input_val}' expected '{flyte_literal_type}'")
res = {}
for f in dataclasses.fields(input_val):
res[f.name] = extract_value(
ctx, getattr(input_val, f.name), f.type, flyte_literal_type.record.field_types[f.name]
)
return _literal_models.Literal(record=_literal_models.Record(fields=res))
else:
# This handles native values, the 5 example
return TypeEngine.to_literal(ctx, input_val, val_type, flyte_literal_type)
if incoming_values is None:
raise ValueError("Incoming values cannot be None, must be a dict")
result = {} # So as to not overwrite the input_kwargs
for k, v in incoming_values.items():
if k not in flyte_interface_types:
raise ValueError(f"Received unexpected keyword argument {k}")
var = flyte_interface_types[k]
t = native_types[k]
result[k] = extract_value(ctx, v, t, var.type)
return result
def get_primitive_val(prim: Primitive) -> Any:
if prim.integer:
return prim.integer
if prim.datetime:
return prim.datetime
if prim.boolean:
return prim.boolean
if prim.duration:
return prim.duration
if prim.string_value:
return prim.string_value
return prim.float_value
class ConjunctionOps(Enum):
AND = "and"
OR = "or"
class ComparisonOps(Enum):
EQ = "=="
NE = "!="
GT = ">"
GE = ">="
LT = "<"
LE = "<="
_comparators = {
ComparisonOps.EQ: lambda x, y: x == y,
ComparisonOps.NE: lambda x, y: x != y,
ComparisonOps.GT: lambda x, y: x > y,
ComparisonOps.GE: lambda x, y: x >= y,
ComparisonOps.LT: lambda x, y: x < y,
ComparisonOps.LE: lambda x, y: x <= y,
}
class ComparisonExpression(object):
"""
ComparisonExpression refers to an expression of the form (lhs operator rhs), where lhs and rhs are operands
and operator can be any comparison expression like <, >, <=, >=, ==, !=
"""
def __init__(self, lhs: Union["Promise", Any], op: ComparisonOps, rhs: Union["Promise", Any]):
self._op = op
self._lhs = None
self._rhs = None
if isinstance(lhs, Promise):
self._lhs = lhs
if lhs.is_ready:
if lhs.val.scalar is None or lhs.val.scalar.primitive is None:
raise ValueError("Only primitive values can be used in comparison")
if isinstance(rhs, Promise):
self._rhs = rhs
if rhs.is_ready:
if rhs.val.scalar is None or rhs.val.scalar.primitive is None:
raise ValueError("Only primitive values can be used in comparison")
if self._lhs is None:
self._lhs = type_engine.TypeEngine.to_literal(FlyteContextManager.current_context(), lhs, type(lhs), None)
if self._rhs is None:
self._rhs = type_engine.TypeEngine.to_literal(FlyteContextManager.current_context(), rhs, type(rhs), None)
@property
def rhs(self) -> Union["Promise", _literal_models.Literal]:
return self._rhs
@property
def lhs(self) -> Union["Promise", _literal_models.Literal]:
return self._lhs
@property
def op(self) -> ComparisonOps:
return self._op
def eval(self) -> bool:
if isinstance(self.lhs, Promise):
lhs = self.lhs.eval()
else:
lhs = get_primitive_val(self.lhs.scalar.primitive)
if isinstance(self.rhs, Promise):
rhs = self.rhs.eval()
else:
rhs = get_primitive_val(self.rhs.scalar.primitive)
return _comparators[self.op](lhs, rhs)
def __and__(self, other):
return ConjunctionExpression(lhs=self, op=ConjunctionOps.AND, rhs=other)
def __or__(self, other):
return ConjunctionExpression(lhs=self, op=ConjunctionOps.OR, rhs=other)
def __bool__(self):
raise ValueError(
"Cannot perform truth value testing,"
" This is a limitation in python. For Logical `and\\or` use `&\\|` (bitwise) instead."
f" Expr {self}"
)
def __repr__(self):
return f"Comp({self._lhs} {self._op.value} {self._rhs})"
class ConjunctionExpression(object):
"""
A Conjunction Expression is an expression of the form either (A and B) or (A or B).
where A, B are two expressions (comparsion or conjunctions) and (and, or) are logical truth operators.
A conjunctionExpression evaluates to True or False depending on the logical operator and the truth values of
each of the expressions A & B
"""
def __init__(
self,
lhs: Union[ComparisonExpression, "ConjunctionExpression"],
op: ConjunctionOps,
rhs: Union[ComparisonExpression, "ConjunctionExpression"],
):
self._lhs = lhs
self._rhs = rhs
self._op = op
@property
def rhs(self) -> Union[ComparisonExpression, "ConjunctionExpression"]:
return self._rhs
@property
def lhs(self) -> Union[ComparisonExpression, "ConjunctionExpression"]:
return self._lhs
@property
def op(self) -> ConjunctionOps:
return self._op
def eval(self) -> bool:
l_eval = self.lhs.eval()
if self.op == ConjunctionOps.AND and l_eval is False:
return False
if self.op == ConjunctionOps.OR and l_eval is True:
return True
r_eval = self.rhs.eval()
if self.op == ConjunctionOps.AND:
return l_eval and r_eval
return l_eval or r_eval
def __and__(self, other: Union[ComparisonExpression, "ConjunctionExpression"]):
return ConjunctionExpression(lhs=self, op=ConjunctionOps.AND, rhs=other)
def __or__(self, other: Union[ComparisonExpression, "ConjunctionExpression"]):
return ConjunctionExpression(lhs=self, op=ConjunctionOps.OR, rhs=other)
def __bool__(self):
raise ValueError(
"Cannot perform truth value testing,"
" This is a limitation in python. For Logical `and\\or` use `&\\|` (bitwise) instead. Refer to: PEP-335"
)
def __repr__(self):
return f"( {self._lhs} {self._op} {self._rhs} )"
# TODO: The NodeOutput object, which this Promise wraps, has an sdk_type. Since we're no longer using sdk types,
# we should consider adding a literal type to this object as well for downstream checking when Bindings are created.
class Promise(object):
"""
This object is a wrapper and exists for three main reasons. Let's assume we're dealing with a task like ::
@task
def t1() -> (int, str): ...
#. Handling the duality between compilation and local execution - when the task function is run in a local execution
mode inside a workflow function, a Python integer and string are produced. When the task is being compiled as
part of the workflow, the task call creates a Node instead, and the task returns two Promise objects that
point to that Node.
#. One needs to be able to call ::
x = t1().with_overrides(...)
If the task returns an integer or a ``(int, str)`` tuple like ``t1`` above, calling ``with_overrides`` on the
result would throw an error. This Promise object adds that.
#. Assorted handling for conditionals.
"""
# TODO: Currently, NodeOutput we're creating is the slimmer core package Node class, but since only the
# id is used, it's okay for now. Let's clean all this up though.
def __init__(self, var: str, val: Union[NodeOutput, _literal_models.Literal]):
self._var = var
self._promise_ready = True
self._val = val
if val and isinstance(val, NodeOutput):
self._ref = val
self._promise_ready = False
self._val = None
def __hash__(self):
return hash(id(self))
def with_var(self, new_var: str) -> Promise:
if self.is_ready:
return Promise(var=new_var, val=self.val)
return Promise(var=new_var, val=self.ref)
@property
def is_ready(self) -> bool:
"""
Returns if the Promise is READY (is not a reference and the val is actually ready)
Usage:
p = Promise(...)
...
if p.is_ready():
print(p.val)
else:
print(p.ref)
"""
return self._promise_ready
@property
def val(self) -> _literal_models.Literal:
"""
If the promise is ready then this holds the actual evaluate value in Flyte's type system
"""
return self._val
@property
def ref(self) -> NodeOutput:
"""
If the promise is NOT READY / Incomplete, then it maps to the origin node that owns the promise
"""
return self._ref
@property
def var(self) -> str:
"""
Name of the variable bound with this promise
"""
return self._var
def eval(self) -> Any:
if not self._promise_ready or self._val is None:
raise ValueError("Cannot Eval with incomplete promises")
if self.val.scalar is None or self.val.scalar.primitive is None:
raise ValueError("Eval can be invoked for primitive types only")
return get_primitive_val(self.val.scalar.primitive)
def is_(self, v: bool) -> ComparisonExpression:
return ComparisonExpression(self, ComparisonOps.EQ, v)
def is_false(self) -> ComparisonExpression:
return self.is_(False)
def is_true(self):
return self.is_(True)
def __eq__(self, other) -> ComparisonExpression:
return ComparisonExpression(self, ComparisonOps.EQ, other)
def __ne__(self, other) -> ComparisonExpression:
return ComparisonExpression(self, ComparisonOps.NE, other)
def __gt__(self, other) -> ComparisonExpression:
return ComparisonExpression(self, ComparisonOps.GT, other)
def __ge__(self, other) -> ComparisonExpression:
return ComparisonExpression(self, ComparisonOps.GE, other)
def __lt__(self, other) -> ComparisonExpression:
return ComparisonExpression(self, ComparisonOps.LT, other)
def __le__(self, other) -> ComparisonExpression:
return ComparisonExpression(self, ComparisonOps.LE, other)
def __bool__(self):
raise ValueError(
"Flytekit does not support Unary expressions or performing truth value testing,"
" This is a limitation in python. For Logical `and\\or` use `&\\|` (bitwise) instead"
)
def __and__(self, other):
raise ValueError("Cannot perform Logical AND of Promise with other")
def __or__(self, other):
raise ValueError("Cannot perform Logical OR of Promise with other")
def with_overrides(self, *args, **kwargs):
if not self.is_ready:
# TODO, this should be forwarded, but right now this results in failure and we want to test this behavior
print(f"Forwarding to node {self.ref.node.id}")
self.ref.node.with_overrides(*args, **kwargs)
return self
def __repr__(self):
if self._promise_ready:
return f"Resolved({self._var}={self._val})"
return f"Promise(node:{self.ref.node_id}.{self._var})"
def __str__(self):
return str(self.__repr__())
def create_native_named_tuple(
ctx: FlyteContext, promises: Union[Promise, typing.List[Promise]], entity_interface: Interface
) -> Optional[Tuple]:
"""
Creates and returns a Named tuple with all variables that match the expected named outputs. this makes
it possible to run things locally and expect a more native behavior, i.e. address elements of a named tuple
by name.
"""
if entity_interface is None:
raise ValueError("Interface of the entity is required to generate named outputs")
if promises is None:
return None
if isinstance(promises, Promise):
v = [v for k, v in entity_interface.outputs.items()][0] # get output native type
return TypeEngine.to_python_value(ctx, promises.val, v)
if len(promises) == 0:
return None
named_tuple_name = "DefaultNamedTupleOutput"
if entity_interface.output_tuple_name:
named_tuple_name = entity_interface.output_tuple_name
outputs = {}
for p in promises:
if not isinstance(p, Promise):
raise AssertionError(
"Workflow outputs can only be promises that are returned by tasks. Found a value of"
f"type {type(p)}. Workflows cannot return local variables or constants."
)
outputs[p.var] = TypeEngine.to_python_value(ctx, p.val, entity_interface.outputs[p.var])
# Should this class be part of the Interface?
t = collections.namedtuple(named_tuple_name, list(outputs.keys()))
return t(**outputs)
# To create a class that is a named tuple, we might have to create namedtuplemeta and manipulate the tuple
def create_task_output(
promises: Optional[Union[List[Promise], Promise]], entity_interface: Optional[Interface] = None
) -> Optional[Union[Tuple[Promise], Promise]]:
# TODO: Add VoidPromise here to simplify things at call site. Consider returning for [] below as well instead of
# raising an exception.
if promises is None:
return None
if isinstance(promises, Promise):
return promises
if len(promises) == 0:
raise Exception(
"This function should not be called with an empty list. It should have been handled with a"
"VoidPromise at this function's call-site."
)
if len(promises) == 1:
if not entity_interface:
return promises[0]
# See transform_signature_to_interface for more information, we're using the existence of a name as a proxy
# for the user having specified a one-element typing.NamedTuple, which means we should _not_ extract it. We
# should still return a tuple but it should be one of ours.
if not entity_interface.output_tuple_name:
return promises[0]
# More than one promise, let us wrap it into a tuple
# Start with just the var names in the promises
variables = [p.var for p in promises]
# These should be OrderedDicts so it should be safe to iterate over the keys.
if entity_interface:
variables = [k for k in entity_interface.outputs.keys()]
named_tuple_name = "DefaultNamedTupleOutput"
if entity_interface and entity_interface.output_tuple_name:
named_tuple_name = entity_interface.output_tuple_name
# Should this class be part of the Interface?
class Output(collections.namedtuple(named_tuple_name, variables)):
def with_overrides(self, *args, **kwargs):
val = self.__getattribute__(self._fields[0])
val.with_overrides(*args, **kwargs)
return self
@property
def ref(self):
for p in promises:
if p.ref:
return p.ref
return None
def runs_before(self, other: Any):
"""
This function is just here to allow local workflow execution to run. See the corresponding function in
flytekit.core.node.Node for more information. Local workflow execution in the manual ``create_node``
paradigm is already determined by the order in which the nodes were created.
"""
# TODO: If possible, add a check and raise an Exception if create_node was not called in the correct order.
return self
def __rshift__(self, other: Any):
# See comment for runs_before
return self
return Output(*promises)
def binding_data_from_python_std(
ctx: _flyte_context.FlyteContext,
expected_literal_type: _type_models.LiteralType,
t_value: Any,
t_value_type: type,
) -> _literals_models.BindingData:
# This handles the case where the given value is the output of another task
if isinstance(t_value, Promise):
if not t_value.is_ready:
return _literals_models.BindingData(promise=t_value.ref)
elif isinstance(t_value, VoidPromise):
raise AssertionError(
f"Cannot pass output from task {t_value.task_name} that produces no outputs to a downstream task"
)
elif isinstance(t_value, list):
if expected_literal_type.collection_type is None:
raise AssertionError(f"this should be a list and it is not: {type(t_value)} vs {expected_literal_type}")
sub_type = ListTransformer.get_sub_type(t_value_type)
collection = _literals_models.BindingDataCollection(
bindings=[
binding_data_from_python_std(ctx, expected_literal_type.collection_type, t, sub_type) for t in t_value
]
)
return _literals_models.BindingData(collection=collection)
elif dataclasses.is_dataclass(t_value):
fields: Dict[str, _literals_models.BindingData] = {}
for f in dataclasses.fields(t_value):
type = TypeEngine.to_literal_type(f.type)
fields[f.name] = binding_data_from_python_std(ctx, type, getattr(t_value, f.name), f.type)
return _literal_models.BindingData(record=_literal_models.BindingRecord(fields=fields))
elif isinstance(t_value, dict):
if (
expected_literal_type.map_value_type is None
and expected_literal_type.simple != _type_models.SimpleType.STRUCT
):
raise AssertionError(
f"this should be a Dictionary type and it is not: {type(t_value)} vs {expected_literal_type}"
)
k_type, v_type = DictTransformer.get_dict_types(t_value_type)
if expected_literal_type.simple == _type_models.SimpleType.STRUCT:
lit = TypeEngine.to_literal(ctx, t_value, type(t_value), expected_literal_type)
return _literals_models.BindingData(scalar=lit.scalar)
else:
m = _literals_models.BindingDataMap(
bindings={
k: binding_data_from_python_std(ctx, expected_literal_type.map_value_type, v, v_type)
for k, v in t_value.items()
}
)
return _literals_models.BindingData(map=m)
elif isinstance(t_value, tuple):
raise AssertionError(
"Tuples are not a supported type for individual values in Flyte - got a tuple -"
f" {t_value}. If using named tuple in an inner task, please, de-reference the"
"actual attribute that you want to use. For example, in NamedTuple('OP', x=int) then"
"return v.x, instead of v, even if this has a single element"
)
if hasattr(t_value_type, "__origin__") and t_value_type.__origin__ == Union:
if expected_literal_type.sum is None:
raise AssertionError(
f"Expected type is not a sum type: '{expected_literal_type}' (python type '{t_value_type}')"
)
for t in t_value_type.__args__:
try:
typ = None
ltyp = TypeEngine.to_literal_type(t)
for (
s
) in expected_literal_type.sum.summands: # todo(maximsmol): O(n^2) algo, not sure if Type is hashable
if ltyp != s:
continue
typ = s
if typ is None:
continue
return binding_data_from_python_std(ctx, typ, t_value, t)
except ValueError as e:
if "not supported" not in str(e):
raise e
continue
raise ValueError(f"Could not find suitable union instantiation: '{t_value}' ('{t_value_type}')")
# This is the scalar case - e.g. my_task(in1=5)
scalar = TypeEngine.to_literal(ctx, t_value, t_value_type, expected_literal_type).scalar
return _literals_models.BindingData(scalar=scalar)
def binding_from_python_std(
ctx: _flyte_context.FlyteContext,
var_name: str,
expected_literal_type: _type_models.LiteralType,
t_value: typing.Any,
t_value_type: type,
) -> _literals_models.Binding:
binding_data = binding_data_from_python_std(ctx, expected_literal_type, t_value, t_value_type)
return _literals_models.Binding(var=var_name, binding=binding_data)
def to_binding(p: Promise) -> _literals_models.Binding:
return _literals_models.Binding(var=p.var, binding=_literals_models.BindingData(promise=p.ref))
class VoidPromise(object):
"""
This object is returned for tasks that do not return any outputs (declared interface is empty)
VoidPromise cannot be interacted with and does not allow comparisons or any operations
"""
def __init__(self, task_name: str):
self._task_name = task_name
def runs_before(self, *args, **kwargs):
"""
This is a placeholder and should do nothing. It is only here to enable local execution of workflows
where a task returns nothing.
"""
def __rshift__(self, *args, **kwargs):
... # See runs_before
@property
def task_name(self):
return self._task_name
def __eq__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __and__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __or__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __le__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __ge__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __gt__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __lt__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __add__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __cmp__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __bool__(self):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __mod__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __xor__(self, other):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __str__(self):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
def __repr__(self):
raise AssertionError(f"Task {self._task_name} returns nothing, NoneType return cannot be used")
class NodeOutput(type_models.OutputReference):
def __init__(self, node: Node, var: str):
"""
:param node:
:param var: The name of the variable this NodeOutput references
"""
self._node = node
super(NodeOutput, self).__init__(self._node.id, var)
@property
def node_id(self):
"""
Override the underlying node_id property to refer to SdkNode.
:rtype: Text
"""
return self.node.id
@property
def node(self) -> Node:
"""Return Node object."""
return self._node
def __repr__(self) -> str:
s = f"Node({self.node if self.node.id is not None else None}:{self.var})"
return s
class SupportsNodeCreation(Protocol):
@property
def name(self) -> str:
...
@property
def python_interface(self) -> flyte_interface.Interface:
...
def construct_node_metadata(self) -> _workflow_model.NodeMetadata:
...
def create_and_link_node(
ctx: FlyteContext,
entity: SupportsNodeCreation,
**kwargs,
):
"""
This method is used to generate a node with bindings. This is not used in the execution path.
"""
if ctx.compilation_state is None:
raise _user_exceptions.FlyteAssertion("Cannot create node when not compiling...")
used_inputs = set()
bindings = []
interface = entity.python_interface
typed_interface = flyte_interface.transform_interface_to_typed_interface(interface)
# Mypy needs some extra help to believe that `typed_interface` will not be `None`
assert typed_interface is not None
for k in sorted(interface.inputs):
var = typed_interface.inputs[k]
if k not in kwargs:
raise _user_exceptions.FlyteAssertion("Input was not specified for: {} of type {}".format(k, var.type))
v = kwargs[k]
# This check ensures that tuples are not passed into a function, as tuples are not supported by Flyte
# Usually a Tuple will indicate that multiple outputs from a previous task were accidentally passed
# into the function.
if isinstance(v, tuple):
raise AssertionError(
f"Variable({k}) for function({entity.name}) cannot receive a multi-valued tuple {v}."
f" Check if the predecessor function returning more than one value?"
)
bindings.append(
binding_from_python_std(
ctx, var_name=k, expected_literal_type=var.type, t_value=v, t_value_type=interface.inputs[k]
)
)
used_inputs.add(k)
extra_inputs = used_inputs ^ set(kwargs.keys())
if len(extra_inputs) > 0:
raise _user_exceptions.FlyteAssertion(
"Too many inputs were specified for the interface. Extra inputs were: {}".format(extra_inputs)
)
# Detect upstream nodes
# These will be our core Nodes until we can amend the Promise to use NodeOutputs that reference our Nodes
upstream_nodes = list(
set(
[
input_val.ref.node
for input_val in kwargs.values()
if isinstance(input_val, Promise) and input_val.ref.node_id != _common_constants.GLOBAL_INPUT_NODE_ID
]
)
)
flytekit_node = Node(
# TODO: Better naming, probably a derivative of the function name.
id=f"{ctx.compilation_state.prefix}n{len(ctx.compilation_state.nodes)}",
metadata=entity.construct_node_metadata(),
bindings=sorted(bindings, key=lambda b: b.var),
upstream_nodes=upstream_nodes,
flyte_entity=entity,
)
ctx.compilation_state.add_node(flytekit_node)
if len(typed_interface.outputs) == 0:
return VoidPromise(entity.name)
# Create a node output object for each output, they should all point to this node of course.
node_outputs = []
for output_name, output_var_model in typed_interface.outputs.items():
# TODO: If node id gets updated later, we have to make sure to update the NodeOutput model's ID, which
# is currently just a static str
node_outputs.append(Promise(output_name, NodeOutput(node=flytekit_node, var=output_name)))
# Don't print this, it'll crash cuz sdk_node._upstream_node_ids might be None, but idl code will break
return create_task_output(node_outputs, interface)
class LocallyExecutable(Protocol):
def local_execute(self, ctx: FlyteContext, **kwargs) -> Union[Tuple[Promise], Promise, VoidPromise]:
...
def flyte_entity_call_handler(entity: Union[SupportsNodeCreation, LocallyExecutable], *args, **kwargs):
"""
This function is the call handler for tasks, workflows, and launch plans (which redirects to the underlying
workflow). The logic is the same for all three, but we did not want to create base class, hence this separate
method. When one of these entities is () aka __called__, there are three things we may do:
#. Compilation Mode - this happens when the function is called as part of a workflow (potentially
dynamic task?). Instead of running the user function, produce promise objects and create a node.
#. Workflow Execution Mode - when a workflow is being run locally. Even though workflows are functions
and everything should be able to be passed through naturally, we'll want to wrap output values of the
function into objects, so that potential .with_cpu or other ancillary functions can be attached to do
nothing. Subsequent tasks will have to know how to unwrap these. If by chance a non-Flyte task uses a
task output as an input, things probably will fail pretty obviously.
#. Start a local execution - This means that we're not already in a local workflow execution, which means that
we should expect inputs to be native Python values and that we should return Python native values.
"""
# Sanity checks
# Only keyword args allowed
if len(args) > 0:
raise _user_exceptions.FlyteAssertion(
f"When calling tasks, only keyword args are supported. "
f"Aborting execution as detected {len(args)} positional args {args}"
)
# Make sure arguments are part of interface
for k, v in kwargs.items():
if k not in entity.python_interface.inputs:
raise ValueError(f"Received unexpected keyword argument {k}")
ctx = FlyteContextManager.current_context()
if ctx.compilation_state is not None and ctx.compilation_state.mode == 1:
return create_and_link_node(ctx, entity=entity, **kwargs)
elif ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION:
if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED:
if len(entity.python_interface.inputs) > 0 or len(entity.python_interface.outputs) > 0:
output_names = list(entity.python_interface.outputs.keys())
if len(output_names) == 0:
return VoidPromise(entity.name)
vals = [Promise(var, None) for var in output_names]
return create_task_output(vals, entity.python_interface)
else:
return None
return entity.local_execute(ctx, **kwargs)
else:
with FlyteContextManager.with_context(
ctx.with_execution_state(
ctx.new_execution_state().with_params(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION)
)
) as child_ctx:
result = entity.local_execute(child_ctx, **kwargs)
expected_outputs = len(entity.python_interface.outputs)
if expected_outputs == 0:
if result is None or isinstance(result, VoidPromise):
return None
else:
raise Exception(f"Workflow local execution expected 0 outputs but something received {result}")
if (1 < expected_outputs == len(result)) or (result is not None and expected_outputs == 1):
return create_native_named_tuple(ctx, result, entity.python_interface)
raise ValueError(
f"Expected outputs and actual outputs do not match. Result {result}. "
f"Python interface: {entity.python_interface}"
)
| 40.218045
| 120
| 0.660257
|
ea87d07a557ceafc0a1084e64399446a45e55ae1
| 1,918
|
py
|
Python
|
vscode/extensions/ms-python.python-2020.2.64397/pythonFiles/lib/python/new_ptvsd/no_wheels/ptvsd/server/options.py
|
Adespinoza/dotfiles
|
e2509402a7fd2623a3ea401b6f9fcbf6a372fc60
|
[
"CC0-1.0"
] | null | null | null |
vscode/extensions/ms-python.python-2020.2.64397/pythonFiles/lib/python/new_ptvsd/no_wheels/ptvsd/server/options.py
|
Adespinoza/dotfiles
|
e2509402a7fd2623a3ea401b6f9fcbf6a372fc60
|
[
"CC0-1.0"
] | 8
|
2020-07-19T23:39:31.000Z
|
2022-02-27T01:38:46.000Z
|
vscode/extensions/ms-python.python-2020.2.64397/pythonFiles/lib/python/new_ptvsd/wheels/ptvsd/server/options.py
|
Adespinoza/dotfiles
|
e2509402a7fd2623a3ea401b6f9fcbf6a372fc60
|
[
"CC0-1.0"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""Global server options that are set via command line, environment variables,
or configuration files.
"""
target_kind = None
"""One of: None, 'file', 'module', 'code', or 'pid'.
"""
target = None
"""Specifies what to debug.
If target_kind is None, then target is None, indicating that the current process
is the one that is initiating debugger attach to itself.
If target_kind is 'file', then target is a path to the file to run.
If target_kind is 'module', then target is the qualified name of the module to run.
If target_kind is 'code', then target is the code to run.
If target_kind is 'pid', then target is the process ID to attach to.
"""
host = "127.0.0.1"
"""Name or IP address of the network interface used by ptvsd.server. If runing in server
mode, this is the interface on which it listens for incoming connections. If running
in client mode, this is the interface to which it connects.
"""
port = 5678
"""Port number used by ptvsd.server. If running in server mode, this is the port on which it
listens for incoming connections. If running in client mode, this is port to which it
connects.
"""
client = False
"""If True, this instance of ptvsd is operating in client mode - i.e. it connects
to the IDE, instead of waiting for an incoming connection from the IDE.
"""
wait = False
"""If True, wait until the debugger is connected before running any code."
"""
multiprocess = True
"""Whether this ptvsd instance is running in multiprocess mode, detouring creation
of new processes and enabling debugging for them.
"""
client_access_token = None
"""Access token to authenticate with the adapter."""
| 33.068966
| 93
| 0.73097
|
2ff26365e68a68eda44c728b29962e4d0f90a546
| 1,067
|
py
|
Python
|
test_remove_duplicate_letter.py
|
brigitteunger/katas
|
3f9af88fe5d98753360457084741f573c863dc25
|
[
"MIT"
] | null | null | null |
test_remove_duplicate_letter.py
|
brigitteunger/katas
|
3f9af88fe5d98753360457084741f573c863dc25
|
[
"MIT"
] | null | null | null |
test_remove_duplicate_letter.py
|
brigitteunger/katas
|
3f9af88fe5d98753360457084741f573c863dc25
|
[
"MIT"
] | null | null | null |
import unittest
import string
from collections import Counter
class Solution:
def removeDuplicateLetters(self, s: str) -> str:
stack = []
counter = Counter(s)
visited = {key: False for key in counter.keys()}
for letter in s:
counter[letter] -= 1
if visited[letter]:
continue
while stack and letter < stack[-1] and counter[stack[-1]] > 0:
visited[stack[-1]] = False
stack.pop()
stack.append(letter)
visited[letter] = True
return "".join(stack)
class TestRemoveDuplicateLetters(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def testRemoveDuplicateLetters_1(self):
s = "bcabc"
word = self.sol.removeDuplicateLetters(s)
self.assertEqual(word, "abc")
def testRemoveDuplicateLetters_2(self):
s = "cbacdcbc"
word = self.sol.removeDuplicateLetters(s)
self.assertEqual(word, "acdb")
if __name__ == "__main__":
unittest.main()
| 23.711111
| 74
| 0.585754
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.