hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aef808c706cbf6f11a9c1272fa0254cb60087342
| 325
|
py
|
Python
|
pychemin/constants.py
|
ewen-lbh/pychemin
|
3e41b9a13a5517e55151e6874932ce92bfd9fa53
|
[
"MIT"
] | 1
|
2020-04-15T07:11:21.000Z
|
2020-04-15T07:11:21.000Z
|
pychemin/constants.py
|
ewen-lbh/pychemin
|
3e41b9a13a5517e55151e6874932ce92bfd9fa53
|
[
"MIT"
] | 1
|
2020-03-26T23:54:30.000Z
|
2020-03-26T23:57:41.000Z
|
pychemin/constants.py
|
ewen-lbh/pychemin
|
3e41b9a13a5517e55151e6874932ce92bfd9fa53
|
[
"MIT"
] | 1
|
2020-03-23T16:51:09.000Z
|
2020-03-23T16:51:09.000Z
|
from os import path
GODSPEED_MODE = False
DEBUG_MODE = True
BASE_DIR = path.dirname(path.dirname(__file__))
STAT_NAMES = {
'hp': "Vie",
'speed': "Vitesse",
'reputation': "Réputation",
'food': "Niveau d'alimentation",
'strength': "Force",
'smart': "Intelligence",
"name": "Nom"
}
NON_NUMERIC_STATS = ['name']
| 18.055556
| 47
| 0.658462
|
f10747c541aee9e19fd6b862021398a13217fd34
| 5,761
|
py
|
Python
|
mindware/components/ensemble/unnamed_ensemble.py
|
aman-gupta-1995/Machine-Learning-Mindware
|
8b3050720711730520683c89949e3dbdfb168961
|
[
"MIT"
] | 27
|
2021-07-19T09:03:34.000Z
|
2022-03-31T06:19:23.000Z
|
mindware/components/ensemble/unnamed_ensemble.py
|
aman-gupta-1995/Machine-Learning-Mindware
|
8b3050720711730520683c89949e3dbdfb168961
|
[
"MIT"
] | 4
|
2021-07-15T12:17:10.000Z
|
2022-01-26T17:16:58.000Z
|
mindware/components/ensemble/unnamed_ensemble.py
|
aman-gupta-1995/Machine-Learning-Mindware
|
8b3050720711730520683c89949e3dbdfb168961
|
[
"MIT"
] | 17
|
2020-05-12T20:24:50.000Z
|
2021-07-11T03:31:38.000Z
|
import numpy as np
import pandas as pd
import scipy.spatial
from sklearn.metrics._scorer import _BaseScorer
from mindware.components.utils.constants import CLS_TASKS
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import accuracy_score
def choose_base_models_regression(predictions, labels, num_model):
base_mask = [0] * len(predictions)
dif = predictions - labels
dif[dif > 0] = 1
dif[dif < 0] = -1
'''Calculate the distance between each model'''
dist = scipy.spatial.distance.cdist(dif, dif)
total_dist = np.sum(dist, 1)
'''Select the model which has large distance to other models'''
selected_models = total_dist.argsort()[-num_model:]
for model in selected_models:
base_mask[model] = 1
return base_mask
def choose_base_models_classification(predictions, num_model, interval=20):
num_class = predictions.shape[2]
num_total_models = predictions.shape[0]
base_mask = [0] * len(predictions)
bucket = np.arange(interval + 1) / interval
bucket[0] -= 1e-8
bucket[-1] += 1e-8
distribution = []
for prediction in predictions:
freq_array = []
for i in range(num_class):
class_i = prediction[:, i]
group = pd.cut(class_i, bucket, right=False)
counts = group.value_counts()
freq = list(counts / counts.sum())
freq_array += freq
# TODO: Debug inf output
# print(prediction)
# print(freq_array)
distribution.append(freq_array) # Shape: (num_total_models,20*num_class)
distribution = np.array(distribution)
# Apply the clustering algorithm
model = AgglomerativeClustering(n_clusters=num_model, linkage="complete")
cluster = model.fit(distribution)
"""
Select models which are the most nearest to the clustering center
selected_models = []
"""
for cluster_label in range(num_model):
cluster_center = np.zeros(distribution.shape[1])
count = 0
"""
Averaging the distribution which belong the same clustering class
and then get the corresponding distribution center
"""
for i in range(num_total_models):
if cluster.labels_[i] == cluster_label:
count += 1
cluster_center += distribution[i]
cluster_center = cluster_center / count
distances = np.sqrt(np.sum(np.asarray(cluster_center - distribution) ** 2, axis=1))
selected_model = distances.argmin()
base_mask[selected_model] = 1
return base_mask
def calculate_weights(predictions, labels, base_mask):
num_total_models = predictions.shape[0]
num_samples = predictions.shape[1]
weights = np.zeros((num_samples, num_total_models))
for i in range(num_total_models):
if base_mask[i] != 0:
predicted_labels = np.argmax(predictions[i], 1)
acc = accuracy_score(predicted_labels, labels)
model_weight = 0.5 * np.log(acc / (1 - acc)) # a concrete value
shannon_ent = -1.0 * np.sum(predictions[i] * np.log2(predictions[i]), 1) # shape: (1, num_samples)
confidence = 1 / np.exp(shannon_ent)
model_weight = model_weight * confidence # The weight of current model to all samples
model_weight = model_weight.reshape(num_samples, 1)
weights[:, i] = model_weight
return weights
def calculate_weights_simple(predictions, labels, base_mask):
num_total_models = predictions.shape[0]
weights = [0] * num_total_models
for i in range(num_total_models):
if base_mask[i] != 0:
predicted_labels = np.argmax(predictions[i], 1)
acc = accuracy_score(predicted_labels, labels)
model_weight = 0.5 * np.log(acc / (1 - acc)) # a concrete value
weights[i] = model_weight
return weights
class UnnamedEnsemble:
def __init__(
self,
ensemble_size: int,
task_type: int,
metric: _BaseScorer,
random_state: np.random.RandomState = None,
):
self.ensemble_size = ensemble_size
self.task_type = task_type
self.metric = metric
self.random_state = random_state
self.base_model_mask = None
self.weights_ = None
def fit(self, predictions, labels):
"""
:param predictions: proba_predictions for cls. Shape: (num_models,num_samples,num_class) for cls
:param labels: Shape: (num_samples,)
:return: self
"""
if self.task_type in CLS_TASKS: # If classification
self.base_model_mask = choose_base_models(predictions, labels, self.ensemble_size)
self.weights_ = calculate_weights(predictions, labels, self.base_model_mask)
else:
pass
return self
def predict(self, predictions):
predictions = np.asarray(predictions)
# if predictions.shape[0] == len(self.weights_),
# predictions include those of zero-weight models.
if predictions.shape[0] == len(self.weights_):
return np.average(predictions, axis=0, weights=self.weights_)
# if prediction model.shape[0] == len(non_null_weights),
# predictions do not include those of zero-weight models.
elif predictions.shape[0] == np.count_nonzero(self.weights_):
non_null_weights = [w for w in self.weights_ if w > 0]
return np.average(predictions, axis=0, weights=non_null_weights)
# If none of the above applies, then something must have gone wrong.
else:
raise ValueError("The dimensions of ensemble predictions"
" and ensemble weights do not match!")
| 38.152318
| 111
| 0.646242
|
de6a9e447f2b8a947d8e58117aa0dcdb4b85ec7b
| 36,975
|
py
|
Python
|
ddsp/core.py
|
chetakks/ddsp
|
72923ec557e208a6e4374b7b5dfb6d871130807e
|
[
"Apache-2.0"
] | null | null | null |
ddsp/core.py
|
chetakks/ddsp
|
72923ec557e208a6e4374b7b5dfb6d871130807e
|
[
"Apache-2.0"
] | null | null | null |
ddsp/core.py
|
chetakks/ddsp
|
72923ec557e208a6e4374b7b5dfb6d871130807e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of functions for differentiable digital signal processing (DDSP)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Any, Dict, Text, TypeVar
import gin
import numpy as np
from scipy import fftpack
import tensorflow.compat.v2 as tf
Number = TypeVar('Number', int, float, np.ndarray, tf.Tensor)
# Utility Functions ------------------------------------------------------------
def tf_float32(x):
"""Ensure array/tensor is a float32 tf.Tensor."""
if isinstance(x, tf.Tensor):
return tf.cast(x, dtype=tf.float32) # This is a no-op if x is float32.
else:
return tf.convert_to_tensor(x, tf.float32)
def make_iterable(x):
"""Wrap in a list if not iterable, return empty list if None."""
if x is None:
return []
else:
return x if isinstance(x, collections.Iterable) else [x]
def nested_lookup(nested_key: Text,
nested_dict: Dict[Text, Any],
delimiter: Text = '/') -> tf.Tensor:
"""Returns the value of a nested dict according to a parsed input string.
Args:
nested_key: String of the form "key/key/key...".
nested_dict: Nested dictionary.
delimiter: String that splits the nested keys.
Returns:
value: Value of the key from the nested dictionary.
"""
# Parse the input string.
keys = nested_key.split(delimiter)
# Return the nested value.
value = nested_dict
for key in keys:
value = value[key]
return value
def midi_to_hz(notes: Number) -> Number:
"""TF-compatible midi_to_hz function."""
notes = tf_float32(notes)
return 440.0 * (2.0**((notes - 69.0) / 12.0))
def hz_to_midi(frequencies: Number) -> Number:
"""TF-compatible hz_to_midi function."""
frequencies = tf_float32(frequencies)
log2 = lambda x: tf.math.log(x) / tf.math.log(2.0)
notes = 12.0 * (log2(frequencies) - log2(440.0)) + 69.0
# Map 0 Hz to MIDI 0 (Replace -inf with 0.)
cond = tf.equal(notes, -np.inf)
notes = tf.where(cond, 0.0, notes)
return notes
def unit_to_midi(unit: Number,
midi_min: Number = 20.0,
midi_max: Number = 90.0,
clip: bool = False) -> Number:
"""Map the unit interval [0, 1] to MIDI notes."""
unit = tf.clip_by_value(unit, 0.0, 1.0) if clip else unit
return midi_min + (midi_max - midi_min) * unit
def midi_to_unit(midi: Number,
midi_min: Number = 20.0,
midi_max: Number = 90.0,
clip: bool = False) -> Number:
"""Map MIDI notes to the unit interval [0, 1]."""
unit = (midi - midi_min) / (midi_max - midi_min)
return tf.clip_by_value(unit, 0.0, 1.0) if clip else unit
def unit_to_hz(unit: Number,
hz_min: Number,
hz_max: Number,
clip: bool = False) -> Number:
"""Map unit interval [0, 1] to [hz_min, hz_max], scaling logarithmically."""
midi = unit_to_midi(unit,
midi_min=hz_to_midi(hz_min),
midi_max=hz_to_midi(hz_max),
clip=clip)
return midi_to_hz(midi)
def hz_to_unit(hz: Number,
hz_min: Number,
hz_max: Number,
clip: bool = False) -> Number:
"""Map [hz_min, hz_max] to unit interval [0, 1], scaling logarithmically."""
midi = hz_to_midi(hz)
return midi_to_unit(midi,
midi_min=hz_to_midi(hz_min),
midi_max=hz_to_midi(hz_max),
clip=clip)
def resample(inputs: tf.Tensor,
n_timesteps: int,
method: Text = 'linear',
add_endpoint: bool = True) -> tf.Tensor:
"""Interpolates a tensor from n_frames to n_timesteps.
Args:
inputs: Framewise 1-D, 2-D, 3-D, or 4-D Tensor. Shape [n_frames],
[batch_size, n_frames], [batch_size, n_frames, channels], or
[batch_size, n_frames, n_freq, channels].
n_timesteps: Time resolution of the output signal.
method: Type of resampling, must be in ['nearest', 'linear', 'cubic',
'window']. Linear and cubic ar typical bilinear, bicubic interpolation.
'window' uses overlapping windows (only for upsampling) which is smoother
for amplitude envelopes with large frame sizes.
add_endpoint: Hold the last timestep for an additional step as the endpoint.
Then, n_timesteps is divided evenly into n_frames segments. If false, use
the last timestep as the endpoint, producing (n_frames - 1) segments with
each having a length of n_timesteps / (n_frames - 1).
Returns:
Interpolated 1-D, 2-D, 3-D, or 4-D Tensor. Shape [n_timesteps],
[batch_size, n_timesteps], [batch_size, n_timesteps, channels], or
[batch_size, n_timesteps, n_freqs, channels].
Raises:
ValueError: If method is 'window' and input is 4-D.
ValueError: If method is not one of 'nearest', 'linear', 'cubic', or
'window'.
"""
inputs = tf_float32(inputs)
is_1d = len(inputs.shape) == 1
is_2d = len(inputs.shape) == 2
is_4d = len(inputs.shape) == 4
# Ensure inputs are at least 3d.
if is_1d:
inputs = inputs[tf.newaxis, :, tf.newaxis]
elif is_2d:
inputs = inputs[:, :, tf.newaxis]
def _image_resize(method):
"""Closure around tf.image.resize."""
# Image resize needs 4-D input. Add/remove extra axis if not 4-D.
outputs = inputs[:, :, tf.newaxis, :] if not is_4d else inputs
outputs = tf.compat.v1.image.resize(outputs,
[n_timesteps, outputs.shape[2]],
method=method,
align_corners=not add_endpoint)
return outputs[:, :, 0, :] if not is_4d else outputs
# Perform resampling.
if method == 'nearest':
outputs = _image_resize(tf.compat.v1.image.ResizeMethod.NEAREST_NEIGHBOR)
elif method == 'linear':
outputs = _image_resize(tf.compat.v1.image.ResizeMethod.BILINEAR)
elif method == 'cubic':
outputs = _image_resize(tf.compat.v1.image.ResizeMethod.BICUBIC)
elif method == 'window':
outputs = upsample_with_windows(inputs, n_timesteps, add_endpoint)
else:
raise ValueError('Method ({}) is invalid. Must be one of {}.'.format(
method, "['nearest', 'linear', 'cubic', 'window']"))
# Return outputs to the same dimensionality of the inputs.
if is_1d:
outputs = outputs[0, :, 0]
elif is_2d:
outputs = outputs[:, :, 0]
return outputs
def upsample_with_windows(inputs: tf.Tensor,
n_timesteps: int,
add_endpoint: bool = True) -> tf.Tensor:
"""Upsample a series of frames using using overlapping hann windows.
Good for amplitude envelopes.
Args:
inputs: Framewise 3-D tensor. Shape [batch_size, n_frames, n_channels].
n_timesteps: The time resolution of the output signal.
add_endpoint: Hold the last timestep for an additional step as the endpoint.
Then, n_timesteps is divided evenly into n_frames segments. If false, use
the last timestep as the endpoint, producing (n_frames - 1) segments with
each having a length of n_timesteps / (n_frames - 1).
Returns:
Upsampled 3-D tensor. Shape [batch_size, n_timesteps, n_channels].
Raises:
ValueError: If input does not have 3 dimensions.
ValueError: If attempting to use function for downsampling.
ValueError: If n_timesteps is not divisible by n_frames (if add_endpoint is
true) or n_frames - 1 (if add_endpoint is false).
"""
inputs = tf_float32(inputs)
if len(inputs.shape) != 3:
raise ValueError('Upsample_with_windows() only supports 3 dimensions, '
'not {}.'.format(inputs.shape))
# Mimic behavior of tf.image.resize.
# For forward (not endpointed), hold value for last interval.
if add_endpoint:
inputs = tf.concat([inputs, inputs[:, -1:, :]], axis=1)
n_frames = int(inputs.shape[1])
n_intervals = (n_frames - 1)
if n_frames >= n_timesteps:
raise ValueError('Upsample with windows cannot be used for downsampling'
'More input frames ({}) than output timesteps ({})'.format(
n_frames, n_timesteps))
if n_timesteps % n_intervals != 0.0:
minus_one = '' if add_endpoint else ' - 1'
raise ValueError(
'For upsampling, the target the number of timesteps must be divisible '
'by the number of input frames{}. (timesteps:{}, frames:{}, '
'add_endpoint={}).'.format(minus_one, n_timesteps, n_frames,
add_endpoint))
# Constant overlap-add, half overlapping windows.
hop_size = n_timesteps // n_intervals
window_length = 2 * hop_size
window = tf.signal.hann_window(window_length) # [window]
# Transpose for overlap_and_add.
x = tf.transpose(inputs, perm=[0, 2, 1]) # [batch_size, n_channels, n_frames]
# Broadcast multiply.
# Add dimension for windows [batch_size, n_channels, n_frames, window].
x = x[:, :, :, tf.newaxis]
window = window[tf.newaxis, tf.newaxis, tf.newaxis, :]
x_windowed = (x * window)
x = tf.signal.overlap_and_add(x_windowed, hop_size)
# Transpose back.
x = tf.transpose(x, perm=[0, 2, 1]) # [batch_size, n_timesteps, n_channels]
# Trim the rise and fall of the first and last window.
return x[:, hop_size:-hop_size, :]
def log_scale(x, min_x, max_x):
"""Scales a -1 to 1 value logarithmically between min and max."""
x = tf_float32(x)
x = (x + 1.0) / 2.0 # Scale [-1, 1] to [0, 1]
return tf.exp((1.0 - x) * tf.math.log(min_x) + x * tf.math.log(max_x))
@gin.register
def exp_sigmoid(x, exponent=10.0, max_value=2.0, threshold=1e-7):
"""Exponentiated Sigmoid pointwise nonlinearity.
Bounds input to [threshold, max_value] with slope given by exponent.
Args:
x: Input tensor.
exponent: In nonlinear regime (away from x=0), the output varies by this
factor for every change of x by 1.0.
max_value: Limiting value at x=inf.
threshold: Limiting value at x=-inf. Stablizes training when outputs are
pushed to 0.
Returns:
A tensor with pointwise nonlinearity applied.
"""
x = tf_float32(x)
return max_value * tf.nn.sigmoid(x)**tf.math.log(exponent) + threshold
@gin.register
def sym_exp_sigmoid(x, width=8.0):
"""Symmetrical version of exp_sigmoid centered at (0, 1e-7)."""
x = tf_float32(x)
return exp_sigmoid(width * (tf.abs(x)/2.0 - 1.0))
# Additive Synthesizer ---------------------------------------------------------
def remove_above_nyquist(frequency_envelopes: tf.Tensor,
amplitude_envelopes: tf.Tensor,
sample_rate: int = 16000) -> tf.Tensor:
"""Set amplitudes for oscillators above nyquist to 0.
Args:
frequency_envelopes: Sample-wise oscillator frequencies (Hz). Shape
[batch_size, n_samples, n_sinusoids].
amplitude_envelopes: Sample-wise oscillator amplitude. Shape [batch_size,
n_samples, n_sinusoids].
sample_rate: Sample rate in samples per a second.
Returns:
amplitude_envelopes: Sample-wise filtered oscillator amplitude.
Shape [batch_size, n_samples, n_sinusoids].
"""
frequency_envelopes = tf_float32(frequency_envelopes)
amplitude_envelopes = tf_float32(amplitude_envelopes)
amplitude_envelopes = tf.where(
tf.greater_equal(frequency_envelopes, sample_rate / 2.0),
tf.zeros_like(amplitude_envelopes), amplitude_envelopes)
return amplitude_envelopes
def oscillator_bank(frequency_envelopes: tf.Tensor,
amplitude_envelopes: tf.Tensor,
sample_rate: int = 16000,
sum_sinusoids: bool = True) -> tf.Tensor:
"""Generates audio from sample-wise frequencies for a bank of oscillators.
Args:
frequency_envelopes: Sample-wise oscillator frequencies (Hz). Shape
[batch_size, n_samples, n_sinusoids].
amplitude_envelopes: Sample-wise oscillator amplitude. Shape [batch_size,
n_samples, n_sinusoids].
sample_rate: Sample rate in samples per a second.
sum_sinusoids: Add up audio from all the sinusoids.
Returns:
wav: Sample-wise audio. Shape [batch_size, n_samples, n_sinusoids] if
sum_sinusoids=False, else shape is [batch_size, n_samples].
"""
frequency_envelopes = tf_float32(frequency_envelopes)
amplitude_envelopes = tf_float32(amplitude_envelopes)
# Don't exceed Nyquist.
amplitude_envelopes = remove_above_nyquist(frequency_envelopes,
amplitude_envelopes,
sample_rate)
# Change Hz to radians per sample.
omegas = frequency_envelopes * (2.0 * np.pi) # rad / sec
omegas = omegas / float(sample_rate) # rad / sample
# Accumulate phase and synthesize.
phases = tf.cumsum(omegas, axis=1)
wavs = tf.sin(phases)
audio = amplitude_envelopes * wavs # [mb, n_samples, n_sinusoids]
if sum_sinusoids:
audio = tf.reduce_sum(audio, axis=-1) # [mb, n_samples]
return audio
def get_harmonic_frequencies(frequencies: tf.Tensor,
n_harmonics: int) -> tf.Tensor:
"""Create integer multiples of the fundamental frequency.
Args:
frequencies: Fundamental frequencies (Hz). Shape [batch_size, :, 1].
n_harmonics: Number of harmonics.
Returns:
harmonic_frequencies: Oscillator frequencies (Hz).
Shape [batch_size, :, n_harmonics].
"""
frequencies = tf_float32(frequencies)
f_ratios = tf.linspace(1.0, float(n_harmonics), int(n_harmonics))
f_ratios = f_ratios[tf.newaxis, tf.newaxis, :]
harmonic_frequencies = frequencies * f_ratios
return harmonic_frequencies
def harmonic_synthesis(frequencies: tf.Tensor,
amplitudes: tf.Tensor,
harmonic_shifts: tf.Tensor = None,
harmonic_distribution: tf.Tensor = None,
n_samples: int = 64000,
sample_rate: int = 16000,
amp_resample_method: Text = 'window') -> tf.Tensor:
"""Generate audio from frame-wise monophonic harmonic oscillator bank.
Args:
frequencies: Frame-wise fundamental frequency in Hz. Shape [batch_size,
n_frames, 1].
amplitudes: Frame-wise oscillator peak amplitude. Shape [batch_size,
n_frames, 1].
harmonic_shifts: Harmonic frequency variations (Hz), zero-centered. Total
frequency of a harmonic is equal to (frequencies * harmonic_number * (1 +
harmonic_shifts)). Shape [batch_size, n_frames, n_harmonics].
harmonic_distribution: Harmonic amplitude variations, ranged zero to one.
Total amplitude of a harmonic is equal to (amplitudes *
harmonic_distribution). Shape [batch_size, n_frames, n_harmonics].
n_samples: Total length of output audio. Interpolates and crops to this.
sample_rate: Sample rate.
amp_resample_method: Mode with which to resample amplitude envelopes.
Returns:
audio: Output audio. Shape [batch_size, n_samples, 1]
"""
frequencies = tf_float32(frequencies)
amplitudes = tf_float32(amplitudes)
if harmonic_distribution is not None:
harmonic_distribution = tf_float32(harmonic_distribution)
n_harmonics = int(harmonic_distribution.shape[-1])
elif harmonic_shifts is not None:
harmonic_shifts = tf_float32(harmonic_shifts)
n_harmonics = int(harmonic_shifts.shape[-1])
else:
n_harmonics = 1
# Create harmonic frequencies [batch_size, n_frames, n_harmonics].
harmonic_frequencies = get_harmonic_frequencies(frequencies, n_harmonics)
if harmonic_shifts is not None:
harmonic_frequencies *= (1.0 + harmonic_shifts)
# Create harmonic amplitudes [batch_size, n_frames, n_harmonics].
if harmonic_distribution is not None:
harmonic_amplitudes = amplitudes * harmonic_distribution
else:
harmonic_amplitudes = amplitudes
# Create sample-wise envelopes.
frequency_envelopes = resample(harmonic_frequencies, n_samples) # cycles/sec
amplitude_envelopes = resample(harmonic_amplitudes, n_samples,
method=amp_resample_method)
# Synthesize from harmonics [batch_size, n_samples].
audio = oscillator_bank(frequency_envelopes,
amplitude_envelopes,
sample_rate=sample_rate)
return audio
# Wavetable Synthesizer --------------------------------------------------------
def linear_lookup(phase: tf.Tensor,
wavetables: tf.Tensor) -> tf.Tensor:
"""Lookup from wavetables with linear interpolation.
Args:
phase: The instantaneous phase of the base oscillator, ranging from 0 to
1.0. This gives the position to lookup in the wavetable.
Shape [batch_size, n_samples, 1].
wavetables: Wavetables to be read from on lookup. Shape [batch_size,
n_samples, n_wavetable] or [batch_size, n_wavetable].
Returns:
The resulting audio from linearly interpolated lookup of the wavetables at
each point in time. Shape [batch_size, n_samples].
"""
phase, wavetables = tf_float32(phase), tf_float32(wavetables)
# Add a time dimension if not present.
if len(wavetables.shape) == 2:
wavetables = wavetables[:, tf.newaxis, :]
# Add a wavetable dimension if not present.
if len(phase.shape) == 2:
phase = phase[:, :, tf.newaxis]
# Add first sample to end of wavetable for smooth linear interpolation
# between the last point in the wavetable and the first point.
wavetables = tf.concat([wavetables, wavetables[..., 0:1]], axis=-1)
n_wavetable = int(wavetables.shape[-1])
# Get a phase value for each point on the wavetable.
phase_wavetables = tf.linspace(0.0, 1.0, n_wavetable)
# Get pair-wise distances from the oscillator phase to each wavetable point.
# Axes are [batch, time, n_wavetable].
phase_distance = tf.abs((phase - phase_wavetables[tf.newaxis, tf.newaxis, :]))
# Put distance in units of wavetable samples.
phase_distance *= n_wavetable - 1
# Weighting for interpolation.
# Distance is > 1.0 (and thus weights are 0.0) for all but nearest neighbors.
weights = tf.nn.relu(1.0 - phase_distance)
weighted_wavetables = weights * wavetables
# Interpolated audio from summing the weighted wavetable at each timestep.
return tf.reduce_sum(weighted_wavetables, axis=-1)
def wavetable_synthesis(frequencies: tf.Tensor,
amplitudes: tf.Tensor,
wavetables: tf.Tensor,
n_samples: int = 64000,
sample_rate: int = 16000):
"""Monophonic wavetable synthesizer.
Args:
frequencies: Frame-wise frequency in Hertz of the fundamental oscillator.
Shape [batch_size, n_frames, 1].
amplitudes: Frame-wise amplitude envelope to apply to the oscillator. Shape
[batch_size, n_frames, 1].
wavetables: Frame-wise wavetables from which to lookup. Shape
[batch_size, n_wavetable] or [batch_size, n_frames, n_wavetable].
n_samples: Total length of output audio. Interpolates and crops to this.
sample_rate: Number of samples per a second.
Returns:
audio: Audio at the frequency and amplitude of the inputs, with harmonics
given by the wavetable. Shape [batch_size, n_samples].
"""
wavetables = tf_float32(wavetables)
# Create sample-wise envelopes.
amplitude_envelope = resample(amplitudes, n_samples, method='window')[:, :, 0]
frequency_envelope = resample(frequencies, n_samples) # cycles / sec
# Create intermediate wavetables.
wavetable_shape = wavetables.shape.as_list()
if len(wavetable_shape) == 3 and wavetable_shape[1] > 1:
wavetables = resample(wavetables, n_samples)
# Accumulate phase (in cycles which range from 0.0 to 1.0).
phase_velocity = frequency_envelope / float(sample_rate) # cycles / sample
# Note: Cumsum accumulates _very_ small errors at float32 precision.
# On the order of milli-Hertz.
phase = tf.cumsum(phase_velocity, axis=1, exclusive=True) % 1.0
# Synthesize with linear lookup.
audio = linear_lookup(phase, wavetables)
# Modulate with amplitude envelope.
audio *= amplitude_envelope
return audio
def variable_length_delay(phase: tf.Tensor,
audio: tf.Tensor,
max_length: int = 512) -> tf.Tensor:
"""Delay audio by a time-vaying amount using linear interpolation.
Useful for modulation effects such as vibrato, chorus, and flanging.
Args:
phase: The normlaized instantaneous length of the delay, ranging from 0 to
1.0. This corresponds to a delay of 0 to max_length samples. Shape
[batch_size, n_samples, 1].
audio: Audio signal to be delayed. Shape [batch_size, n_samples].
max_length: Maximimum delay in samples.
Returns:
The delayed audio signal. Shape [batch_size, n_samples].
"""
phase, audio = tf_float32(phase), tf_float32(audio)
# Make causal by zero-padding audio up front.
audio = tf.pad(audio, [(0, 0), (max_length - 1, 0)])
# Cut audio up into frames of max_length.
frames = tf.signal.frame(audio,
frame_length=max_length,
frame_step=1,
pad_end=False)
# Reverse frames so that [0, 1] phase corresponds to [0, max_length] delay.
frames = frames[..., ::-1]
# Read audio from the past frames.
return linear_lookup(phase, frames)
# Time-varying convolution -----------------------------------------------------
def get_fft_size(frame_size: int, ir_size: int, power_of_2: bool = True) -> int:
"""Calculate final size for efficient FFT.
Args:
frame_size: Size of the audio frame.
ir_size: Size of the convolving impulse response.
power_of_2: Constrain to be a power of 2. If False, allow other 5-smooth
numbers. TPU requires power of 2, while GPU is more flexible.
Returns:
fft_size: Size for efficient FFT.
"""
convolved_frame_size = ir_size + frame_size - 1
if power_of_2:
# Next power of 2.
fft_size = int(2**np.ceil(np.log2(convolved_frame_size)))
else:
fft_size = int(fftpack.helper.next_fast_len(convolved_frame_size))
return fft_size
def crop_and_compensate_delay(audio: tf.Tensor, audio_size: int, ir_size: int,
padding: Text,
delay_compensation: int) -> tf.Tensor:
"""Crop audio output from convolution to compensate for group delay.
Args:
audio: Audio after convolution. Tensor of shape [batch, time_steps].
audio_size: Initial size of the audio before convolution.
ir_size: Size of the convolving impulse response.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
ir_timesteps - 1).
delay_compensation: Samples to crop from start of output audio to compensate
for group delay of the impulse response. If delay_compensation < 0 it
defaults to automatically calculating a constant group delay of the
windowed linear phase filter from frequency_impulse_response().
Returns:
Tensor of cropped and shifted audio.
Raises:
ValueError: If padding is not either 'valid' or 'same'.
"""
# Crop the output.
if padding == 'valid':
crop_size = ir_size + audio_size - 1
elif padding == 'same':
crop_size = audio_size
else:
raise ValueError('Padding must be \'valid\' or \'same\', instead '
'of {}.'.format(padding))
# Compensate for the group delay of the filter by trimming the front.
# For an impulse response produced by frequency_impulse_response(),
# the group delay is constant because the filter is linear phase.
total_size = int(audio.shape[-1])
crop = total_size - crop_size
start = ((ir_size - 1) // 2 -
1 if delay_compensation < 0 else delay_compensation)
end = crop - start
return audio[:, start:-end]
def fft_convolve(audio: tf.Tensor,
impulse_response: tf.Tensor,
padding: Text = 'same',
delay_compensation: int = -1) -> tf.Tensor:
"""Filter audio with frames of time-varying impulse responses.
Time-varying filter. Given audio [batch, n_samples], and a series of impulse
responses [batch, n_frames, n_impulse_response], splits the audio into frames,
applies filters, and then overlap-and-adds audio back together.
Applies non-windowed non-overlapping STFT/ISTFT to efficiently compute
convolution for large impulse response sizes.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
impulse_response: Finite impulse response to convolve. Can either be a 2-D
Tensor of shape [batch, ir_size], or a 3-D Tensor of shape [batch,
ir_frames, ir_size]. A 2-D tensor will apply a single linear
time-invariant filter to the audio. A 3-D Tensor will apply a linear
time-varying filter. Automatically chops the audio into equally shaped
blocks to match ir_frames.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
ir_timesteps - 1).
delay_compensation: Samples to crop from start of output audio to compensate
for group delay of the impulse response. If delay_compensation is less
than 0 it defaults to automatically calculating a constant group delay of
the windowed linear phase filter from frequency_impulse_response().
Returns:
audio_out: Convolved audio. Tensor of shape
[batch, audio_timesteps + ir_timesteps - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
Raises:
ValueError: If audio and impulse response have different batch size.
ValueError: If audio cannot be split into evenly spaced frames. (i.e. the
number of impulse response frames is on the order of the audio size and
not a multiple of the audio size.)
"""
audio, impulse_response = tf_float32(audio), tf_float32(impulse_response)
# Add a frame dimension to impulse response if it doesn't have one.
ir_shape = impulse_response.shape.as_list()
if len(ir_shape) == 2:
impulse_response = impulse_response[:, tf.newaxis, :]
ir_shape = impulse_response.shape.as_list()
# Get shapes of audio and impulse response.
batch_size_ir, n_ir_frames, ir_size = ir_shape
batch_size, audio_size = audio.shape.as_list()
# Validate that batch sizes match.
if batch_size != batch_size_ir:
raise ValueError('Batch size of audio ({}) and impulse response ({}) must '
'be the same.'.format(batch_size, batch_size_ir))
# Cut audio into frames.
frame_size = int(np.ceil(audio_size / n_ir_frames))
hop_size = frame_size
audio_frames = tf.signal.frame(audio, frame_size, hop_size, pad_end=True)
# Check that number of frames match.
n_audio_frames = int(audio_frames.shape[1])
if n_audio_frames != n_ir_frames:
raise ValueError(
'Number of Audio frames ({}) and impulse response frames ({}) do not '
'match. For small hop size = ceil(audio_size / n_ir_frames), '
'number of impulse response frames must be a multiple of the audio '
'size.'.format(n_audio_frames, n_ir_frames))
# Pad and FFT the audio and impulse responses.
fft_size = get_fft_size(frame_size, ir_size, power_of_2=True)
audio_fft = tf.signal.rfft(audio_frames, [fft_size])
ir_fft = tf.signal.rfft(impulse_response, [fft_size])
# Multiply the FFTs (same as convolution in time).
audio_ir_fft = tf.multiply(audio_fft, ir_fft)
# Take the IFFT to resynthesize audio.
audio_frames_out = tf.signal.irfft(audio_ir_fft)
audio_out = tf.signal.overlap_and_add(audio_frames_out, hop_size)
# Crop and shift the output audio.
return crop_and_compensate_delay(audio_out, audio_size, ir_size, padding,
delay_compensation)
# Filter Design ----------------------------------------------------------------
def apply_window_to_impulse_response(impulse_response: tf.Tensor,
window_size: int = 0,
causal: bool = False) -> tf.Tensor:
"""Apply a window to an impulse response and put in causal form.
Args:
impulse_response: A series of impulse responses frames to window, of shape
[batch, n_frames, ir_size].
window_size: Size of the window to apply in the time domain. If window_size
is less than 1, it defaults to the impulse_response size.
causal: Impulse responnse input is in causal form (peak in the middle).
Returns:
impulse_response: Windowed impulse response in causal form, with last
dimension cropped to window_size if window_size is greater than 0 and less
than ir_size.
"""
impulse_response = tf_float32(impulse_response)
# If IR is in causal form, put it in zero-phase form.
if causal:
impulse_response = tf.signal.fftshift(impulse_response, axes=-1)
# Get a window for better time/frequency resolution than rectangular.
# Window defaults to IR size, cannot be bigger.
ir_size = int(impulse_response.shape[-1])
if (window_size <= 0) or (window_size > ir_size):
window_size = ir_size
window = tf.signal.hann_window(window_size)
# Zero pad the window and put in in zero-phase form.
padding = ir_size - window_size
if padding > 0:
half_idx = (window_size + 1) // 2
window = tf.concat([window[half_idx:],
tf.zeros([padding]),
window[:half_idx]], axis=0)
else:
window = tf.signal.fftshift(window, axes=-1)
# Apply the window, to get new IR (both in zero-phase form).
window = tf.broadcast_to(window, impulse_response.shape)
impulse_response = window * tf.math.real(impulse_response)
# Put IR in causal form and trim zero padding.
if padding > 0:
first_half_start = (ir_size - (half_idx - 1)) + 1
second_half_end = half_idx + 1
impulse_response = tf.concat([impulse_response[..., first_half_start:],
impulse_response[..., :second_half_end]],
axis=-1)
else:
impulse_response = tf.signal.fftshift(impulse_response, axes=-1)
return impulse_response
def frequency_impulse_response(magnitudes: tf.Tensor,
window_size: int = 0) -> tf.Tensor:
"""Get windowed impulse responses using the frequency sampling method.
Follows the approach in:
https://ccrma.stanford.edu/~jos/sasp/Windowing_Desired_Impulse_Response.html
Args:
magnitudes: Frequency transfer curve. Float32 Tensor of shape [batch,
n_frames, n_frequencies] or [batch, n_frequencies]. The frequencies of the
last dimension are ordered as [0, f_nyqist / (n_frames -1), ...,
f_nyquist], where f_nyquist is (sample_rate / 2). Automatically splits the
audio into equally sized frames to match frames in magnitudes.
window_size: Size of the window to apply in the time domain. If window_size
is less than 1, it defaults to the impulse_response size.
Returns:
impulse_response: Time-domain FIR filter of shape
[batch, frames, window_size] or [batch, window_size].
Raises:
ValueError: If window size is larger than fft size.
"""
# Get the IR (zero-phase form).
magnitudes = tf.complex(magnitudes, tf.zeros_like(magnitudes))
impulse_response = tf.signal.irfft(magnitudes)
# Window and put in causal form.
impulse_response = apply_window_to_impulse_response(impulse_response,
window_size)
return impulse_response
def sinc(x, threshold=1e-20):
"""Normalized zero phase version (peak at zero)."""
x = tf_float32(x)
x = tf.where(tf.abs(x) < threshold, threshold * tf.ones_like(x), x)
x = np.pi * x
return tf.sin(x) / x
def sinc_impulse_response(cutoff_frequency, window_size=512, sample_rate=None):
"""Get a sinc impulse response for a set of low-pass cutoff frequencies.
Args:
cutoff_frequency: Frequency cutoff for low-pass sinc filter. If the
sample_rate is given, cutoff_frequency is in Hertz. If sample_rate is
None, cutoff_frequency is normalized ratio (frequency/nyquist) in the
range [0, 1.0]. Shape [batch_size, n_time, 1].
window_size: Size of the Hamming window to apply to the impulse.
sample_rate: Optionally provide the sample rate.
Returns:
impulse_response: A series of impulse responses. Shape
[batch_size, n_time, (window_size // 2) * 2 + 1].
"""
# Convert frequency to samples/sample_rate [0, Nyquist] -> [0, 1].
if sample_rate is not None:
cutoff_frequency *= 2.0 / float(sample_rate)
# Create impulse response axis.
half_size = window_size // 2
full_size = half_size * 2 + 1
idx = tf.range(-half_size, half_size + 1.0, dtype=tf.float32)
idx = idx[tf.newaxis, tf.newaxis, :]
# Compute impulse response.
impulse_response = sinc(cutoff_frequency * idx)
# Window the impulse response.
window = tf.signal.hamming_window(full_size)
window = tf.broadcast_to(window, impulse_response.shape)
impulse_response = window * tf.math.real(impulse_response)
# Normalize for unity gain.
impulse_response /= tf.reduce_sum(impulse_response, axis=-1, keepdims=True)
return impulse_response
def frequency_filter(audio: tf.Tensor,
magnitudes: tf.Tensor,
window_size: int = 0,
padding: Text = 'same') -> tf.Tensor:
"""Filter audio with a finite impulse response filter.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
magnitudes: Frequency transfer curve. Float32 Tensor of shape [batch,
n_frames, n_frequencies] or [batch, n_frequencies]. The frequencies of the
last dimension are ordered as [0, f_nyqist / (n_frames -1), ...,
f_nyquist], where f_nyquist is (sample_rate / 2). Automatically splits the
audio into equally sized frames to match frames in magnitudes.
window_size: Size of the window to apply in the time domain. If window_size
is less than 1, it is set as the default (n_frequencies).
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
window_size - 1).
Returns:
Filtered audio. Tensor of shape
[batch, audio_timesteps + window_size - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
"""
impulse_response = frequency_impulse_response(magnitudes,
window_size=window_size)
return fft_convolve(audio, impulse_response, padding=padding)
def sinc_filter(audio: tf.Tensor,
cutoff_frequency: tf.Tensor,
window_size: int = 512,
sample_rate: int = None,
padding: Text = 'same') -> tf.Tensor:
"""Filter audio with sinc low-pass filter.
Args:
audio: Input audio. Tensor of shape [batch, audio_timesteps].
cutoff_frequency: Frequency cutoff for low-pass sinc filter. If the
sample_rate is given, cutoff_frequency is in Hertz. If sample_rate is
None, cutoff_frequency is normalized ratio (frequency/nyquist) in the
range [0, 1.0]. Shape [batch_size, n_time, 1].
window_size: Size of the Hamming window to apply to the impulse.
sample_rate: Optionally provide the sample rate.
padding: Either 'valid' or 'same'. For 'same' the final output to be the
same size as the input audio (audio_timesteps). For 'valid' the audio is
extended to include the tail of the impulse response (audio_timesteps +
window_size - 1).
Returns:
Filtered audio. Tensor of shape
[batch, audio_timesteps + window_size - 1] ('valid' padding) or shape
[batch, audio_timesteps] ('same' padding).
"""
impulse_response = sinc_impulse_response(cutoff_frequency,
window_size=window_size,
sample_rate=sample_rate)
return fft_convolve(audio, impulse_response, padding=padding)
| 39.503205
| 80
| 0.674537
|
83cf4caf850199baf381e818b63a9e974810224c
| 2,240
|
py
|
Python
|
oasislmf/utils/data.py
|
jonathanbethel/OasisLMF
|
7dfbbd237580d36f4b209a5e4494d21d7950e668
|
[
"BSD-3-Clause"
] | 1
|
2019-08-10T18:53:53.000Z
|
2019-08-10T18:53:53.000Z
|
oasislmf/utils/data.py
|
jonathanbethel/OasisLMF
|
7dfbbd237580d36f4b209a5e4494d21d7950e668
|
[
"BSD-3-Clause"
] | null | null | null |
oasislmf/utils/data.py
|
jonathanbethel/OasisLMF
|
7dfbbd237580d36f4b209a5e4494d21d7950e668
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
__all__ = [
'get_dataframe'
]
import builtins
import pandas as pd
import six
from .exceptions import OasisException
def get_dataframe(
src_fp=None,
src_type='csv',
src_buf=None,
src_data=None,
float_precision='high',
lowercase_cols=True,
index_col=True,
non_na_cols=(),
col_dtypes={},
sort_col=None,
sort_ascending=None
):
if not (src_fp or src_buf or src_data is not None):
raise OasisException(
'A CSV or JSON file path or a string buffer of such a file or an '
'appropriate data structure or dataframe must be provided'
)
df = None
if src_fp and src_type == 'csv':
df = pd.read_csv(src_fp, float_precision=float_precision)
elif src_buf and src_type == 'csv':
df = pd.read_csv(io.StringIO(src_buf), float_precision=float_precision)
elif src_fp and src_type == 'json':
df = pd.read_json(src_fp, precise_float=(True if float_precision == 'high' else False))
elif src_buf and src_type == 'json':
df = pd.read_json(io.StringIO(src_buf), precise_float=(True if float_precision == 'high' else False))
elif src_data and (isinstance(src_data, list) or isinstance(src_data, pd.DataFrame)):
df = pd.DataFrame(data=src_data, dtype=object)
if lowercase_cols:
df.columns = df.columns.str.lower()
if index_col:
df['index'] = list(range(len(df)))
if non_na_cols:
_non_na_cols = tuple(col.lower() for col in non_na_cols) if lowercase_cols else non_na_cols
df.dropna(subset=_non_na_cols, inplace=True)
if col_dtypes:
_col_dtypes = {
(k.lower() if lowercase_cols else k):(getattr(builtins, v) if v in ('int', 'bool', 'float', 'str',) else v) for k, v in six.iteritems(col_dtypes)
}
for col, dtype in six.iteritems(_col_dtypes):
df[col] = df[col].astype(dtype) if dtype != int else df[col].astype(object)
if sort_col:
_sort_col = sort_col.lower() if lowercase_cols else sort_col
sort_ascending = sort_ascending if sort_ascending is not None else True
df.sort_values(_sort_col, axis=0, ascending=sort_ascending, inplace=True)
return df
| 32
| 157
| 0.658036
|
79e25b4e2cad92fa6af52bf9ac75948b90fc0dae
| 1,149
|
py
|
Python
|
nova/api/validation/extra_specs/accel.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,874
|
2015-01-04T05:18:34.000Z
|
2022-03-31T03:30:28.000Z
|
nova/api/validation/extra_specs/accel.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 40
|
2015-04-13T02:32:42.000Z
|
2022-02-16T02:28:06.000Z
|
nova/api/validation/extra_specs/accel.py
|
zjzh/nova
|
7bb21723171c59b93e28f5d508c2b6df39220f13
|
[
"Apache-2.0"
] | 1,996
|
2015-01-04T15:11:51.000Z
|
2022-03-31T11:03:13.000Z
|
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Validators for ``accel`` namespaced extra specs."""
from nova.api.validation.extra_specs import base
EXTRA_SPEC_VALIDATORS = [
base.ExtraSpecValidator(
name='accel:device_profile',
description=(
'The name of a device profile to configure for the instance. '
'A device profile may be viewed as a "flavor for devices".'
),
value={
'type': str,
'description': 'A name of a device profile.',
},
),
]
def register():
return EXTRA_SPEC_VALIDATORS
| 31.054054
| 75
| 0.684073
|
0c61f96660218c9a26c3aebe80ffc0e0501133bf
| 1,931
|
bzl
|
Python
|
packages/labs/package.bzl
|
kriswuollett/rules_nodejs
|
5798eeeda78c8acc2ebc2f24a41aca33164a972f
|
[
"Apache-2.0"
] | 645
|
2017-08-22T22:18:51.000Z
|
2022-03-31T11:50:53.000Z
|
packages/labs/package.bzl
|
bolitt/rules_nodejs
|
ba9f82103c6122bb316614734489e44552d3d266
|
[
"Apache-2.0"
] | 2,172
|
2017-08-26T23:52:39.000Z
|
2022-03-31T23:51:29.000Z
|
packages/labs/package.bzl
|
bolitt/rules_nodejs
|
ba9f82103c6122bb316614734489e44552d3d266
|
[
"Apache-2.0"
] | 570
|
2017-08-24T19:57:44.000Z
|
2022-03-29T12:09:04.000Z
|
"Install toolchain dependencies"
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@build_bazel_rules_nodejs//:index.bzl", "yarn_install")
load(":mock_io_bazel_rules_closure.bzl", "mock_io_bazel_rules_closure")
def npm_bazel_labs_dependencies():
"""
Fetch our transitive dependencies.
If the user wants to get a different version of these, they can just fetch it
from their WORKSPACE before calling this function, or not call this function at all.
"""
_maybe(
http_archive,
name = "com_github_grpc_grpc_web",
sha256 = "8d9b1e9b839a5254aa79cb4068b05fdb6e1de5637c1b8551f95144159a4801f2",
strip_prefix = "grpc-web-1.2.0",
urls = [
"https://github.com/grpc/grpc-web/archive/1.2.0.tar.gz",
],
)
_maybe(
http_archive,
name = "rules_proto",
sha256 = "66bfdf8782796239d3875d37e7de19b1d94301e8972b3cbd2446b332429b4df1",
strip_prefix = "rules_proto-4.0.0",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0.tar.gz",
"https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0.tar.gz",
],
)
_maybe(
mock_io_bazel_rules_closure,
name = "io_bazel_rules_closure",
)
yarn_install(
name = "build_bazel_rules_typescript_grpc_web_compiletime_deps",
package_json = Label("//packages/labs/grpc_web:package.json"),
yarn_lock = Label("//packages/labs/grpc_web:yarn.lock"),
# Do not symlink node_modules as when used in downstream repos we should not create
# node_modules folders in the external repository. This is
# not supported by managed_directories.
symlink_node_modules = False,
)
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
| 35.759259
| 106
| 0.674262
|
db854c8f461fb59876d0a50c15a4f523f98e0f90
| 685
|
py
|
Python
|
djangocms_text_ckeditor/migrations/0001_initial.py
|
toffi9/djangocms-text-ckeditor
|
175a1a444de8ca1ba4742196cb83150d45b5c505
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_text_ckeditor/migrations/0001_initial.py
|
toffi9/djangocms-text-ckeditor
|
175a1a444de8ca1ba4742196cb83150d45b5c505
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_text_ckeditor/migrations/0001_initial.py
|
toffi9/djangocms-text-ckeditor
|
175a1a444de8ca1ba4742196cb83150d45b5c505
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='Text',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True, on_delete=models.CASCADE)),
('body', models.TextField(verbose_name='body')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 26.346154
| 174
| 0.557664
|
9bd223ad59a85b6979573779593eb820af5680e9
| 25,722
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_route_tables_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_route_tables_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_route_tables_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> "models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> AsyncLROPoller["models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_09_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.RouteTable":
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
| 48.168539
| 191
| 0.662623
|
20033f3af563f76a3b614bfb0621a81378596936
| 3,001
|
py
|
Python
|
tensorflow/python/keras/layers/preprocessing/benchmarks/category_vocab_list_varlen_benchmark.py
|
AdamHillier/tensorflow
|
6780ebf4858a56fd0745f03fa5a61b249559f3cd
|
[
"Apache-2.0"
] | 4
|
2016-07-14T15:15:05.000Z
|
2017-03-02T15:17:22.000Z
|
tensorflow/python/keras/layers/preprocessing/benchmarks/category_vocab_list_varlen_benchmark.py
|
AdamHillier/tensorflow
|
6780ebf4858a56fd0745f03fa5a61b249559f3cd
|
[
"Apache-2.0"
] | 1
|
2021-03-23T03:25:15.000Z
|
2021-03-23T03:25:15.000Z
|
tensorflow/python/keras/layers/preprocessing/benchmarks/category_vocab_list_varlen_benchmark.py
|
AdamHillier/tensorflow
|
6780ebf4858a56fd0745f03fa5a61b249559f3cd
|
[
"Apache-2.0"
] | 5
|
2016-11-07T21:17:45.000Z
|
2020-05-31T00:16:59.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns from lists with varying-length inputs."""
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager.def_function import function as tf_function
from tensorflow.python.feature_column import feature_column_v2 as fcv2
from tensorflow.python.feature_column import sequence_feature_column
from tensorflow.python.framework import dtypes as dt
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
from tensorflow.python.platform import test as tf_test
# This is required as of 3/2021 because otherwise we drop into graph mode.
v2_compat.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab = fc_bm.create_vocabulary(32768)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(
keras.Input(
shape=(max_length,), name="data", ragged=True, dtype=dt.string))
model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))
# FC implementation
fc = sequence_feature_column.sequence_categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(fcv2.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_sparse()}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list|varlen|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf_test.main()
| 37.5125
| 101
| 0.752416
|
fc6b6ad2f54a3f40536088de8066ad188b6b9d2d
| 1,379
|
py
|
Python
|
aoc2020/day13/day13_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | 1
|
2019-09-15T16:37:24.000Z
|
2019-09-15T16:37:24.000Z
|
aoc2020/day13/day13_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | null | null | null |
aoc2020/day13/day13_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | null | null | null |
from functools import reduce
from typing import List
from utils import get_input_lines
mods = []
rems = []
for offset, bus_id in enumerate(list(get_input_lines(__file__))[1].split(",")):
if bus_id == "x":
continue
bus_id = int(bus_id)
# Use Chinese Remainder Theorem
# (X mod bus_id) = k * bus_id - offset, with k such that the right hand side is not negative
rhs = bus_id - offset if offset > 0 else 0
# Correct right hand side if it is too small
while rhs < 0:
rhs += bus_id
mods.append(bus_id)
rems.append(rhs)
def modular_inverse(a: int, b: int) -> int:
# Source: https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers
t, new_t = 0, 1
r, new_r = b, a
while new_r != 0:
quotient = r // new_r
t, new_t = new_t, t - quotient * new_t
r, new_r = new_r, r - quotient * new_r
if r > 1:
raise ValueError(f"Integer {a} is not invertible modulo {b}")
if t < 0:
t += b
return t
def chinese_remainder(remainders: List[int], modulos: List[int]) -> int:
solution = 0
big_m = reduce(lambda x1, x2: x1 * x2, modulos)
for a_i, m_i in zip(remainders, modulos):
big_m_i = big_m // m_i
solution += a_i * big_m_i * modular_inverse(big_m_i, m_i)
return solution % big_m
print(chinese_remainder(rems, mods))
| 24.625
| 96
| 0.626541
|
7f7c1463679c4037d288e4ed5bb43d073696a252
| 2,601
|
py
|
Python
|
src/namegen/driver_memory.py
|
MuistotKartalla/muistot-namegen
|
97d0608db4e29636f127ddc7349d95c1e99c52e3
|
[
"MIT"
] | null | null | null |
src/namegen/driver_memory.py
|
MuistotKartalla/muistot-namegen
|
97d0608db4e29636f127ddc7349d95c1e99c52e3
|
[
"MIT"
] | null | null | null |
src/namegen/driver_memory.py
|
MuistotKartalla/muistot-namegen
|
97d0608db4e29636f127ddc7349d95c1e99c52e3
|
[
"MIT"
] | 1
|
2022-03-23T13:14:49.000Z
|
2022-03-23T13:14:49.000Z
|
import base64
import collections
import gzip
import json
import typing
from .driver import Driver
from .utils import hash_file, read_initial_values, generate_next, get_random, get_state_file, get_word_file, lcm
class Item:
__slots__ = ['start', 'end', 'value']
start: str
end: str
value: int
def __init__(self, start: str, end: str, value: int):
self.start = start
self.end = end
self.value = value
def next(self):
self.value = generate_next(self.value)
return self
def to_dict(self):
return {
'start': self.start,
'end': self.end,
'value': self.value
}
@staticmethod
def from_dict(item: typing.Dict) -> 'Item':
return Item(start=item['start'], end=item['end'], value=int(item['value']))
class MemoryDriver(Driver):
items: typing.Deque[Item] = collections.deque()
def to_json(self) -> str:
return base64.b64encode(gzip.compress(json.dumps(dict(
items=list(map(lambda i: i.to_dict(), self.items))
)).encode('utf-8'))).decode('ascii')
def from_json(self, f: typing.TextIO):
self.items.clear()
self.items.extend(map(
lambda o: Item.from_dict(o),
json.loads(gzip.decompress(base64.b64decode(f.readline().strip().encode('ascii'))))['items']
))
def load_state(self):
try:
with open(get_state_file(), 'r') as f:
file_hash = f.readline().strip()
if file_hash == hash_file(get_word_file()):
self.from_json(f)
return True
except FileNotFoundError:
pass
return False
def start(self):
if not self.load_state():
starts, ends = read_initial_values()
i = 0
j = 0
cnt = 0
max_s = len(starts) - 1
max_e = len(ends) - 1
while cnt != lcm(max_s, max_e):
self.items.append(Item(start=starts[i], end=ends[j], value=get_random()))
i = i + 1 if i < max_s else 0
j = j + 1 if j < max_e else 0
cnt += 1
def stop(self):
with open(get_state_file(), 'w') as f:
f.write(hash_file(get_word_file()))
f.write('\n')
f.write(self.to_json())
def generate(self):
item = self.items.popleft()
try:
return f'{item.start}{item.end}#{item.value:04d}'
finally:
self.items.append(item.next())
__all__ = ['MemoryDriver']
| 27.967742
| 112
| 0.546328
|
83f77381424d84160ae5897ca820bf7cd327d70b
| 10,857
|
py
|
Python
|
src/models/wisenet_base/packages/DEXTR/train_pascal.py
|
JanAlexanderPersonal/covid19_weak_supervision
|
5599e48c9945f1e08a2731740bc8f6e44a031703
|
[
"Apache-2.0"
] | 7
|
2020-07-22T19:48:52.000Z
|
2021-08-06T13:43:21.000Z
|
src/models/wisenet_base/packages/DEXTR/train_pascal.py
|
JanAlexanderPersonal/covid19_weak_supervision
|
5599e48c9945f1e08a2731740bc8f6e44a031703
|
[
"Apache-2.0"
] | 1
|
2021-03-06T15:57:21.000Z
|
2021-03-06T15:57:21.000Z
|
src/models/wisenet_base/packages/DEXTR/train_pascal.py
|
JanAlexanderPersonal/covid19_weak_supervision
|
5599e48c9945f1e08a2731740bc8f6e44a031703
|
[
"Apache-2.0"
] | 1
|
2021-02-09T02:16:21.000Z
|
2021-02-09T02:16:21.000Z
|
import sys; sys.path.append("../../_EXTRAS"); import misc as ms
import socket
import timeit
from datetime import datetime
import scipy.misc as sm
from collections import OrderedDict
import glob
# PyTorch includes
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.nn.functional import upsample
# Tensorboard include
# from tensorboardX import SummaryWriter
# Custom includes
from dataloaders.combine_dbs import CombineDBs as combine_dbs
import dataloaders.pascal as pascal
import dataloaders.sbd as sbd
from dataloaders import custom_transforms as tr
import networks.deeplab_resnet as resnet
from layers.loss import class_balanced_cross_entropy_loss
from dataloaders.helpers import *
# Set gpu_id to -1 to run in CPU mode, otherwise set the id of the corresponding gpu
gpu_id = 0
device = torch.device("cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print('Using GPU: {} '.format(gpu_id))
# Setting parameters
use_sbd = False
nEpochs = 100 # Number of epochs for training
resume_epoch = 0 # Default is 0, change if want to resume
p = OrderedDict() # Parameters to include in report
classifier = 'psp' # Head classifier to use
p['trainBatch'] = 5 # Training batch size
testBatch = 5 # Testing batch size
useTest = 1 # See evolution of the test set when training?
nTestInterval = 10 # Run on test set every nTestInterval epochs
snapshot = 20 # Store a model every snapshot epochs
relax_crop = 50 # Enlarge the bounding box by relax_crop pixels
nInputChannels = 4 # Number of input channels (RGB + heatmap of extreme points)
zero_pad_crop = True # Insert zero padding when cropping the image
p['nAveGrad'] = 1 # Average the gradient of several iterations
p['lr'] = 1e-8 # Learning rate
p['wd'] = 0.0005 # Weight decay
p['momentum'] = 0.9 # Momentum
# Results and model directories (a new directory is generated for every run)
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if resume_epoch == 0:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run_*')))
run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0
else:
run_id = 0
save_dir = os.path.join(save_dir_root, 'run_' + str(run_id))
if not os.path.exists(os.path.join(save_dir, 'models')):
os.makedirs(os.path.join(save_dir, 'models'))
# Network definition
modelName = 'dextr_pascal'
net = resnet.resnet101(1, pretrained=True,
nInputChannels=nInputChannels, classifier=classifier)
if resume_epoch == 0:
print("Initializing from pretrained Deeplab-v2 model")
else:
print("Initializing weights from: {}".format(
os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth')))
net.load_state_dict(
torch.load(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth'),
map_location=lambda storage, loc: storage))
train_params = [{'params': resnet.get_1x_lr_params(net), 'lr': p['lr']},
{'params': resnet.get_10x_lr_params(net), 'lr': p['lr'] * 10}]
net.to(device)
# Training the network
if resume_epoch != nEpochs:
# Logging into Tensorboard
log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
# writer = SummaryWriter(log_dir=log_dir)
# Use the following optimizer
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd'])
p['optimizer'] = str(optimizer)
# Preparation of the data loaders
composed_transforms_tr = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)),
tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
tr.ExtremePoints(sigma=10, pert=5, elem='crop_gt'),
tr.ToImage(norm_elem='extreme_points'),
tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
tr.ToTensor()])
composed_transforms_ts = transforms.Compose([
tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
tr.ToImage(norm_elem='extreme_points'),
tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
tr.ToTensor()])
voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr)
voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts)
if use_sbd:
sbd = sbd.SBDSegmentation(split=['train', 'val'], transform=composed_transforms_tr, retname=True)
db_train = combine_dbs([voc_train, sbd], excluded=[voc_val])
else:
db_train = voc_train
p['dataset_train'] = str(db_train)
p['transformations_train'] = [str(tran) for tran in composed_transforms_tr.transforms]
p['dataset_test'] = str(db_train)
p['transformations_test'] = [str(tran) for tran in composed_transforms_ts.transforms]
trainloader = DataLoader(db_train, batch_size=p['trainBatch'], shuffle=True, num_workers=2)
testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=2)
generate_param_report(os.path.join(save_dir, exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
running_loss_tr = 0.0
running_loss_ts = 0.0
aveGrad = 0
print("Training Network")
# Main Training and Testing Loop
for epoch in range(resume_epoch, nEpochs):
start_time = timeit.default_timer()
net.train()
for ii, sample_batched in enumerate(trainloader):
import ipdb; ipdb.set_trace() # breakpoint 5ab142bd //
inputs, gts = sample_batched['concat'], sample_batched['crop_gt']
# Forward-Backward of the mini-batch
inputs.requires_grad_()
inputs, gts = inputs.to(device), gts.to(device)
output = net.forward(inputs)
output = upsample(output, size=(512, 512), mode='bilinear', align_corners=True)
# Compute the losses, side outputs and fuse
loss = class_balanced_cross_entropy_loss(output, gts, size_average=False, batch_average=True)
running_loss_tr += loss.item()
# Print stuff
if ii % num_img_tr == num_img_tr - 1:
running_loss_tr = running_loss_tr / num_img_tr
writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, ii*p['trainBatch']+inputs.data.shape[0]))
print('Loss: %f' % running_loss_tr)
running_loss_tr = 0
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time)+"\n")
# Backward the averaged gradient
loss /= p['nAveGrad']
loss.backward()
aveGrad += 1
# Update the weights once in p['nAveGrad'] forward passes
if aveGrad % p['nAveGrad'] == 0:
writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch)
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if useTest and epoch % nTestInterval == (nTestInterval - 1):
net.eval()
with torch.no_grad():
for ii, sample_batched in enumerate(testloader):
inputs, gts = sample_batched['concat'], sample_batched['crop_gt']
# Forward pass of the mini-batch
inputs, gts = inputs.to(device), gts.to(device)
output = net.forward(inputs)
output = upsample(output, size=(512, 512), mode='bilinear', align_corners=True)
# Compute the losses, side outputs and fuse
loss = class_balanced_cross_entropy_loss(output, gts, size_average=False)
running_loss_ts += loss.item()
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
running_loss_ts = running_loss_ts / num_img_ts
print('[Epoch: %d, numImages: %5d]' % (epoch, ii*testBatch+inputs.data.shape[0]))
writer.add_scalar('data/test_loss_epoch', running_loss_ts, epoch)
print('Loss: %f' % running_loss_ts)
running_loss_ts = 0
writer.close()
# Generate result of the validation images
net.eval()
composed_transforms_ts = transforms.Compose([
tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
tr.FixedResize(resolutions={'gt': None, 'crop_image': (512, 512), 'crop_gt': (512, 512)}),
tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
tr.ToImage(norm_elem='extreme_points'),
tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
tr.ToTensor()])
db_test = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts, retname=True)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
save_dir_res = os.path.join(save_dir, 'Results')
if not os.path.exists(save_dir_res):
os.makedirs(save_dir_res)
print('Testing Network')
with torch.no_grad():
# Main Testing Loop
for ii, sample_batched in enumerate(testloader):
inputs, gts, metas = sample_batched['concat'], sample_batched['gt'], sample_batched['meta']
# Forward of the mini-batch
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)
outputs = outputs.to(torch.device('cpu'))
for jj in range(int(inputs.size()[0])):
pred = np.transpose(outputs.data.numpy()[jj, :, :, :], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
gt = tens2image(gts[jj, :, :, :])
bbox = get_bbox(gt, pad=relax_crop, zero_pad=zero_pad_crop)
result = crop2fullmask(pred, bbox, gt, zero_pad=zero_pad_crop, relax=relax_crop)
# Save the result, attention to the index jj
sm.imsave(os.path.join(save_dir_res, metas['image'][jj] + '-' + metas['object'][jj] + '.png'), result)
| 43.25498
| 119
| 0.651653
|
5817d99fc9ab5cb79fbdf899c02f0a7221c6cd73
| 14,776
|
py
|
Python
|
tests/vfs/zip_file_entry.py
|
jaegeral/dfvfs
|
606d09bf4de0b5dcf20d1dddff879dfed5c93640
|
[
"Apache-2.0"
] | null | null | null |
tests/vfs/zip_file_entry.py
|
jaegeral/dfvfs
|
606d09bf4de0b5dcf20d1dddff879dfed5c93640
|
[
"Apache-2.0"
] | null | null | null |
tests/vfs/zip_file_entry.py
|
jaegeral/dfvfs
|
606d09bf4de0b5dcf20d1dddff879dfed5c93640
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using the zipfile."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.vfs import zip_file_entry
from dfvfs.vfs import zip_file_system
from tests import test_lib as shared_test_lib
class ZIPFileEntryTest(shared_test_lib.BaseTestCase):
"""Tests the ZIP extracted file entry."""
# pylint: disable=protected-access
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['syslog.zip'])
self._SkipIfPathNotExists(test_path)
self._os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._zip_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
self._file_system = zip_file_system.ZipFileSystem(
self._resolver_context, self._zip_path_spec)
self._file_system.Open()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testIntialize(self):
"""Test the __init__ function."""
file_entry = zip_file_entry.ZipFileEntry(
self._resolver_context, self._file_system, self._zip_path_spec,
is_virtual=True)
self.assertIsNotNone(file_entry)
# TODO: add tests for _GetDirectory function.
def testGetStat(self):
"""Tests the _GetStat function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_object = file_entry._GetStat()
self.assertIsNotNone(stat_object)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
self.assertEqual(stat_object.size, 1247)
self.assertEqual(stat_object.mode, 256)
self.assertEqual(stat_object.mtime, 1343141124)
# TODO: re-enable when dfdatetime updates are committed
# self.assertEqual(stat_object.mtime_nano, None)
def testGetStatAttribute(self):
"""Tests the _GetStatAttribute function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_attribute = file_entry._GetStatAttribute()
self.assertIsNotNone(stat_attribute)
self.assertEqual(stat_attribute.mode, 0o100400)
self.assertEqual(stat_attribute.size, 1247)
self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)
# TODO: add tests for _GetSubFileEntries
def testAccessTime(self):
"""Test the access_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.access_time)
def testChangeTime(self):
"""Test the change_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.change_time)
def testCreationTime(self):
"""Test the creation_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.creation_time)
def testDataStreams(self):
"""Test the data_streams property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [''])
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 0)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [])
def testModificationTime(self):
"""Test the modification_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry.modification_time)
def testName(self):
"""Test the name property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'syslog')
def testSize(self):
"""Test the size property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 1247)
def testSubFileEntries(self):
"""Test the sub_file_entries property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 2)
self._assertSubFileEntries(file_entry, ['syslog', 'wtmp.1'])
# Test on a zip file that has missing directory entries.
test_path = self._GetTestFilePath(['missing_directory_entries.zip'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=test_os_path_spec)
file_system = zip_file_system.ZipFileSystem(
self._resolver_context, path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self._assertSubFileEntries(file_entry, ['folder'])
# The "folder" folder is a missing directory entry but should still
# be found due to the files found inside the directory.
sub_file_entry = next(file_entry.sub_file_entries)
self.assertTrue(sub_file_entry.IsVirtual())
self._assertSubFileEntries(sub_file_entry, ['syslog', 'wtmp.1'])
def testGetDataStream(self):
"""Tests the GetDataStream function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream_name = ''
data_stream = file_entry.GetDataStream(data_stream_name)
self.assertIsNotNone(data_stream)
self.assertEqual(data_stream.name, data_stream_name)
data_stream = file_entry.GetDataStream('bogus')
self.assertIsNone(data_stream)
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNotNone(parent_file_entry)
self.assertEqual(parent_file_entry.name, '')
# TODO: add tests for GetZipInfo function.
def testIsAllocated(self):
"""Test the IsAllocated function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsAllocated())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsAllocated())
def testIsDevice(self):
"""Test the IsDevice function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsDevice())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsDevice())
def testIsDirectory(self):
"""Test the IsDirectory function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsDirectory())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsDirectory())
def testIsFile(self):
"""Test the IsFile function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsFile())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsFile())
def testIsLink(self):
"""Test the IsLink function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsLink())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsLink())
def testIsPipe(self):
"""Test the IsPipe function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsPipe())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsPipe())
def testIsRoot(self):
"""Test the IsRoot function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsRoot())
def testIsSocket(self):
"""Test the IsSocket function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsSocket())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsSocket())
def testIsVirtual(self):
"""Test the IsVirtual function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/syslog',
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsVirtual())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_ZIP, location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsVirtual())
# TODO: add tests for GetZipInfo function.
if __name__ == '__main__':
unittest.main()
| 36.756219
| 80
| 0.754331
|
fba7a147c3bce36fc30172b39cb7379ac8ba2cfb
| 16,090
|
py
|
Python
|
datalad/distributed/tests/test_create_sibling_gitlab.py
|
AKSoo/datalad
|
dbc34478980c808a86b5531316c986abac953e37
|
[
"MIT"
] | null | null | null |
datalad/distributed/tests/test_create_sibling_gitlab.py
|
AKSoo/datalad
|
dbc34478980c808a86b5531316c986abac953e37
|
[
"MIT"
] | 1
|
2020-12-01T20:13:51.000Z
|
2020-12-01T20:13:51.000Z
|
datalad/distributed/tests/test_create_sibling_gitlab.py
|
jwodder/datalad
|
2b92a764fdc64b750dad68eb51c817218a1ec153
|
[
"MIT"
] | null | null | null |
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test create publication target on gitlab"""
import os
# this must import ok with and without gitlab
from datalad.api import (
Dataset,
create,
create_sibling_gitlab,
)
from datalad.tests.utils import (
assert_raises,
assert_repo_status,
assert_result_count,
assert_status,
eq_,
with_tempfile,
)
from datalad.utils import chpwd
def _get_nested_collections(path):
ds = Dataset(path).create()
c1 = ds.create(ds.pathobj / 'subdir' / 'collection1')
c1s1 = c1.create('sub1')
c1s2 = c1.create('sub2')
c2 = ds.create('collection2')
c2s1 = c2.create('sub1')
c2s11 = c2s1.create('deepsub1')
ds.save(recursive=True)
assert_repo_status(ds.path)
# return a catalog
return dict(
root=ds,
c1=c1,
c1s1=c1s2,
c1s2=c1s2,
c2=c2,
c2s1=c2s1,
c2s11=c2s11,
)
# doesn't actually need gitlab and exercises most of the decision logic
@with_tempfile
def test_dryrun(path):
ctlg = _get_nested_collections(path)
# no site config -> error
assert_raises(ValueError, ctlg['root'].create_sibling_gitlab)
# single project vs multi-dataset call
assert_raises(
ValueError,
ctlg['root'].create_sibling_gitlab,
site='site', project='one', recursive=True)
assert_raises(
ValueError,
ctlg['root'].create_sibling_gitlab,
site='site', project='one', path=['one', 'two'])
# explicit cite, no path constraints, fails for lack of project path config
res = ctlg['root'].create_sibling_gitlab(
dry_run=True, on_failure='ignore',
site='dummy',
)
assert_result_count(res, 1)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='error',
site='dummy', sibling='dummy',
)
# now a working, fully manual call
for p in (None, ctlg['root'].path):
res = ctlg['root'].create_sibling_gitlab(
dry_run=True, on_failure='ignore',
site='dummy', project='here',
path=p,
)
assert_result_count(res, 1)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='ok',
site='dummy', sibling='dummy', project='here',
)
# now configure a default gitlab site
ctlg['root'].config.set('datalad.gitlab-default-site', 'theone')
# we don't need to specify one anymore, but we can still customize
# the sibling name
res = ctlg['root'].create_sibling_gitlab(
dry_run=True, on_failure='ignore',
name='ursula', project='here',
)
assert_result_count(res, 1)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='ok',
site='theone', sibling='ursula', project='here',
)
# now configure a sibling name for this site
ctlg['root'].config.set('datalad.gitlab-theone-siblingname', 'dieter')
# and another one for another site
ctlg['root'].config.set('datalad.gitlab-otherone-siblingname', 'ulf')
# no need to specific 'name' anymore
res = ctlg['root'].create_sibling_gitlab(
dry_run=True, on_failure='ignore',
project='here',
)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='ok',
site='theone', sibling='dieter', project='here',
)
# properly switches the name based on site
res = ctlg['root'].create_sibling_gitlab(
dry_run=True, on_failure='ignore',
site='otherone', project='here',
)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='ok',
site='otherone', sibling='ulf', project='here',
)
# reports notneeded on existing='skip' with an existing remote
ctlg['root'].repo.add_remote('dieter', 'http://example.com')
res = ctlg['root'].create_sibling_gitlab(
dry_run=True, on_failure='ignore',
project='here', existing='skip',
)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='notneeded',
site='theone', sibling='dieter',
)
ctlg['root'].repo.remove_remote('dieter')
# lastly, configure a project path
ctlg['root'].config.set('datalad.gitlab-theone-project', 'secret')
# now we can drive it blind
res = ctlg['root'].create_sibling_gitlab(dry_run=True)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='ok',
site='theone', sibling='dieter', project='secret',
)
# we can make use of the config in the base dataset to drive
# calls on subdatasets: use -d plus a path
res = ctlg['root'].create_sibling_gitlab(path='subdir', dry_run=True)
# only a single result, doesn't touch the parent
assert_result_count(res, 1)
assert_result_count(
res, 1, path=ctlg['c1'].path, type='dataset', status='ok',
site='theone', sibling='dieter',
# hierarchical setup: directories becomes groups
# which implies each dataset is in its own group
# project itself is placed at '_repo'_ to give URLs like
# http://site/dir/dir/dir/_repo_.git
# as a balance between readability and name conflict minimization
project='secret/{}/_repo_'.format(
ctlg['c1'].pathobj.relative_to(ctlg['root'].pathobj).as_posix()),
)
# we get the same result with an explicit layout request
expl_res = ctlg['root'].create_sibling_gitlab(
path='subdir', layout='hierarchy', dry_run=True)
eq_(res, expl_res)
# layout can be configured too, "collection" is "flat" in a group
ctlg['root'].config.set('datalad.gitlab-theone-layout', 'collection')
res = ctlg['root'].create_sibling_gitlab(
path='subdir', dry_run=True)
assert_result_count(
res, 1, path=ctlg['c1'].path, type='dataset', status='ok',
# http://site/group/dir--dir--dir--name.git
project='secret/{}'.format(str(
ctlg['c1'].pathobj.relative_to(ctlg['root'].pathobj)).replace(
os.sep, '--')),
)
# make sure the reference dataset does not conflict with its group in this
# case
res = ctlg['root'].create_sibling_gitlab(dry_run=True)
assert_result_count(
res, 1, path=ctlg['root'].path, type='dataset', status='ok',
project='secret/_repo_')
# "flat" does GitHub-style
ctlg['root'].config.set('datalad.gitlab-theone-layout', 'flat')
res = ctlg['root'].create_sibling_gitlab(
path='subdir', dry_run=True)
assert_result_count(
res, 1, path=ctlg['c1'].path, type='dataset', status='ok',
# http://site/base--dir--dir--dir--name.git
project='secret--{}'.format(str(
ctlg['c1'].pathobj.relative_to(ctlg['root'].pathobj)).replace(
os.sep, '--')),
)
# the results do not depend on explicitly given datasets, if we just enter
# the parent dataset we get the same results
with chpwd(str(ctlg['root'].pathobj / 'subdir')):
rel_res = create_sibling_gitlab(path=os.curdir, dry_run=True)
eq_(res, rel_res)
# and again the same results if we are in a subdataset and point to a parent
# dataset as a reference and config provider
with chpwd(ctlg['c1'].path):
rel_res = create_sibling_gitlab(
dataset=ctlg['root'].path, path=os.curdir, dry_run=True)
eq_(res, rel_res)
# blows on unknown layout
ctlg['root'].config.unset('datalad.gitlab-theone-layout')
assert_raises(
ValueError,
ctlg['root'].create_sibling_gitlab, layout='funny', dry_run=True)
# and finally recursion
res = ctlg['root'].create_sibling_gitlab(recursive=True, dry_run=True)
# one result per dataset
assert_result_count(res, len(ctlg))
# verbose check of target layout (easier to see target pattern for humans)
# default layout: hierarchy
eq_(
sorted(r['project'] for r in res),
[
'secret',
'secret/collection2/_repo_',
'secret/collection2/sub1/_repo_',
'secret/collection2/sub1/deepsub1/_repo_',
'secret/subdir/collection1/_repo_',
'secret/subdir/collection1/sub1/_repo_',
'secret/subdir/collection1/sub2/_repo_',
]
)
res = ctlg['root'].create_sibling_gitlab(
recursive=True, layout='collection', dry_run=True)
assert_result_count(res, len(ctlg))
eq_(
sorted(r['project'] for r in res),
[
'secret/_repo_',
'secret/collection2',
'secret/collection2--sub1',
'secret/collection2--sub1--deepsub1',
'secret/subdir--collection1',
'secret/subdir--collection1--sub1',
'secret/subdir--collection1--sub2',
],
)
res = ctlg['root'].create_sibling_gitlab(
recursive=True, layout='flat', dry_run=True)
assert_result_count(res, len(ctlg))
eq_(
sorted(r['project'] for r in res),
[
'secret',
'secret--collection2',
'secret--collection2--sub1',
'secret--collection2--sub1--deepsub1',
'secret--subdir--collection1',
'secret--subdir--collection1--sub1',
'secret--subdir--collection1--sub2',
],
)
class _FakeGitLab(object):
def __init__(self, site):
pass
class _NewProjectGitLab(_FakeGitLab):
def get_project(self, path):
return None
def create_project(self, path, description=None):
return dict(
http_url_to_repo='http://example.com',
ssh_url_to_repo='example.com',
description=description,
)
class _ExistingProjectGitLab(_FakeGitLab):
def get_project(self, path):
return dict(
http_url_to_repo='http://example.com',
ssh_url_to_repo='example.com',
)
class _ExistingProjectOtherURLGitLab(_FakeGitLab):
def get_project(self, path):
return dict(
http_url_to_repo='http://example2.com',
ssh_url_to_repo='example2.com',
)
class _CreateFailureGitLab(_FakeGitLab):
def get_project(self, path):
None
def create_project(self, path, description=None):
raise RuntimeError
@with_tempfile
def test_fake_gitlab(path):
from unittest.mock import patch
ds = Dataset(path).create()
with patch("datalad.distributed.create_sibling_gitlab.GitLabSite", _NewProjectGitLab):
res = ds.create_sibling_gitlab(site='dummy', project='here', description='thisisit')
assert_result_count(res, 2)
# GitLab success
assert_result_count(
res, 1, action='create_sibling_gitlab', path=path, type='dataset',
site='dummy', sibling='dummy', project='here', description='thisisit',
project_attributes={
'http_url_to_repo': 'http://example.com',
'ssh_url_to_repo': 'example.com',
'description': 'thisisit'
},
status='ok')
assert_result_count(
res, 1, action='configure-sibling', path=path, name='dummy',
url='http://example.com', status='ok')
# test sibling name conflicts
with patch("datalad.distributed.create_sibling_gitlab.GitLabSite", _ExistingProjectGitLab):
res = ds.create_sibling_gitlab(path=ds.path, site='dummy',
project='here', existing='skip')
assert_result_count(res, 1)
assert_result_count(
res, 0, action='create_sibling_gitlab',
message=['already has a configured sibling "%s"', "dummy"],
path=path,
refds=path,
site='dummy', sibling='dummy',
status='notneeded',
type='dataset'
)
# sibling name conflict with existing='error' should yiel error
with patch("datalad.distributed.create_sibling_gitlab.GitLabSite", _ExistingProjectGitLab):
res = ds.create_sibling_gitlab(path=ds.path, site='dummy',
project='here', existing='skip')
assert_result_count(res, 1)
assert_result_count(
res, 0, action='create_sibling_gitlab',
message=['already has a configured sibling "%s"', "dummy"],
path=path,
refds=path,
site='dummy', sibling='dummy',
status='error',
type='dataset'
)
# try recreation, the sibling is already configured, same setup, no error
with patch("datalad.distributed.create_sibling_gitlab.GitLabSite",
_ExistingProjectGitLab):
res = ds.create_sibling_gitlab(path=ds.path, site='dummy',
project='here', existing='reconfigure')
assert_result_count(
res, 1, action='configure-sibling', path=path, name='dummy',
url='http://example.com', status='ok')
# but error when the name differs
res = ds.create_sibling_gitlab(
site='dummy', project='here', name='othername', on_failure='ignore')
assert_result_count(res, 1)
assert_result_count(
res, 1, action='create_sibling_gitlab', path=path,
site='dummy', sibling='othername', project='here',
project_attributes={
'http_url_to_repo': 'http://example.com',
'ssh_url_to_repo': 'example.com'
},
status='error')
with patch("datalad.distributed.create_sibling_gitlab.GitLabSite", _CreateFailureGitLab):
assert_status(
'error',
ds.create_sibling_gitlab(site='dummy', project='here', on_failure='ignore')
)
# new sibling, ssh access
with patch("datalad.distributed.create_sibling_gitlab.GitLabSite", _NewProjectGitLab):
res = ds.create_sibling_gitlab(site='sshsite', project='here', access='ssh')
assert_result_count(res, 2)
assert_result_count(
res, 1, action='create_sibling_gitlab', path=path, type='dataset',
site='sshsite', sibling='sshsite', project='here',
project_attributes={
'http_url_to_repo': 'http://example.com',
'ssh_url_to_repo': 'example.com',
'description': None
},
status='ok')
assert_result_count(
res, 1, action='configure-sibling', path=path, name='sshsite',
url='example.com', status='ok')
with patch("datalad.distributed.create_sibling_gitlab.GitLabSite",
_ExistingProjectOtherURLGitLab):
res = ds.create_sibling_gitlab(site='sshsite', project='here',
access='ssh', on_failure='ignore',
name='sshsite2')
assert_result_count(res, 1)
assert_result_count(
res, 0, action='create_sibling_gitlab',
message=["There is already a project at '%s' on site '%s', "
"but no sibling with name '%s' is configured, "
"maybe use --existing=reconfigure", "here", "sshsite",
"sshsite2"],
path=path,
refds=path,
site='sshsite', sibling='sshsite2', project='here',
project_attributes={
'http_url_to_repo': 'http://example2.com',
'ssh_url_to_repo': 'example2.com'
},
status='error',
type='dataset')
# same goes for switching the access type without --reconfigure
assert_status(
'error',
ds.create_sibling_gitlab(site='sshsite', project='here',
access='http', on_failure='ignore')
)
| 38.127962
| 95
| 0.600435
|
e904fc15dac021e1d3ef7996c3f9d6c5ba9f9160
| 5,518
|
py
|
Python
|
builder/builder.py
|
dfm/builder
|
c237559d6c8169f4c9b25119f46ee1a2eb08272f
|
[
"MIT"
] | null | null | null |
builder/builder.py
|
dfm/builder
|
c237559d6c8169f4c9b25119f46ee1a2eb08272f
|
[
"MIT"
] | null | null | null |
builder/builder.py
|
dfm/builder
|
c237559d6c8169f4c9b25119f46ee1a2eb08272f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
__all__ = ["Library", "build_ext"]
import os
import re
import glob
import logging
from functools import wraps
from distutils.version import StrictVersion
try:
from setuptools.command.build_ext import build_ext as _build_ext
except ImportError:
from distutils.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def build_extension(self, ext):
include_dirs = ext.include_dirs + self.compiler.include_dirs
library_dirs = ext.library_dirs + self.compiler.library_dirs
libs = list(ext.libraries)
ext.libraries = []
for lib in libs:
if not hasattr(lib, "find_include"):
ext.libraries += lib
continue
ext.include_dirs += lib.find_include(hint=include_dirs)[1]
lds, libs = lib.find_libraries(hint=library_dirs)
ext.library_dirs += lds
ext.libraries += libs
ext.extra_compile_args += lib.extra_compile_args()
_build_ext.build_extension(self, ext)
class _cached(object):
def __init__(self, k):
self.k = k
def __call__(self, f):
@wraps(f)
def wrapped(s, *args, **kwargs):
v = getattr(s, self.k)
if v is None:
v = f(s, *args, **kwargs)
setattr(s, self.k, v)
return v
return wrapped
class Library(object):
name = None
header_pattern = None
library_pattern = None
library_name = None
version_header = None
version_re_list = []
include_dirs = [
"/usr/local/include",
"/usr/local/homebrew/include",
"/opt/local/var/macports/software",
"/opt/local/include",
"/usr/include",
"/usr/include/local",
]
library_dirs = [
"/usr/local/lib",
"/usr/local/homebrew/lib",
"/opt/local/lib",
"/usr/lib",
]
dependencies = []
def __init__(self, min_version=None, required=True):
assert self.name is not None, "Subclasses must have a name!"
self.min_version = min_version
self.required = required
self._include = None
self._libraries = None
@_cached("_include")
def find_include(self, hint=None, verbose=True):
# Find the include directories for all the dependencies.
include_dirs = [d for dep in self.dependencies
for d in dep.find_include()[1]]
if self.header_pattern is None:
return None, include_dirs
# Loop over the possible search directories and look for the header.
search_dirs = [] if hint is None else hint
search_dirs += self.include_dirs
for d in search_dirs:
fns = glob.glob(os.path.join(d, self.header_pattern))
if not len(fns):
continue
version = self.get_version(d)
if (self.min_version is not None and
StrictVersion(version) < StrictVersion(self.min_version)):
logging.warn("Found previous version of {0} in {1}"
.format(self.name, d))
continue
if verbose:
if version is None:
print("Found {0} in {1}".format(self.name, d))
else:
print("Found {0} in {1} [{2}]".format(self.name, d,
version))
return version, include_dirs + [d]
if self.required:
raise RuntimeError("Couldn't find required headers for {0}"
.format(self.name))
else:
logging.warn("Couldn't find headers for {0}".format(self.name))
return None, include_dirs
@_cached("_libraries")
def find_libraries(self, hint=None, verbose=True):
# Find the include directories for all the dependencies.
libraries, library_dirs = [], []
for dep in self.dependencies:
dirs, libs = dep.find_libraries()
library_dirs += dirs
libraries += libs
if self.library_pattern is None:
return library_dirs, libraries
# Loop over the possible search directories and look for the header.
search_dirs = [] if hint is None else hint
search_dirs += self.library_dirs
for d in search_dirs:
fns = glob.glob(os.path.join(d, self.library_pattern))
if not len(fns):
continue
if verbose:
print("Found {0} library in {1}".format(self.name, d))
return library_dirs + [d], libraries + [self.library_name]
if self.required:
raise RuntimeError("Couldn't find required library {0}"
.format(self.name))
else:
logging.warn("Couldn't find library {0}".format(self.name))
return library_dirs, libraries
def get_version(self, d):
if self.version_header is None:
return None
fn = os.path.join(d, self.version_header)
if not os.path.exists(fn):
raise RuntimeError("The version header {0} doesn't exist"
.format(self.version_header))
txt = open(fn, "r").read()
v = [re.findall(pattern, txt)[0] for pattern in self.version_re_list]
return ".".join(v)
def extra_compile_args(self):
return []
| 32.269006
| 78
| 0.570859
|
67d2cb668d6117de7a354e4880195ad8546e89fd
| 13,250
|
py
|
Python
|
rayml/automl/engine/engine_base.py
|
gcode-ai/rayml
|
92c4f3c6041f465fee27a6c03bd7959c4ef21124
|
[
"BSD-3-Clause"
] | null | null | null |
rayml/automl/engine/engine_base.py
|
gcode-ai/rayml
|
92c4f3c6041f465fee27a6c03bd7959c4ef21124
|
[
"BSD-3-Clause"
] | null | null | null |
rayml/automl/engine/engine_base.py
|
gcode-ai/rayml
|
92c4f3c6041f465fee27a6c03bd7959c4ef21124
|
[
"BSD-3-Clause"
] | null | null | null |
"""Base class for rayml engines."""
import sys
import time
import traceback
from abc import ABC, abstractmethod
from collections import OrderedDict
import numpy as np
import pandas as pd
import woodwork as ww
from rayml.automl.utils import tune_binary_threshold
from rayml.exceptions import PipelineScoreError
from rayml.preprocessing import split_data
from rayml.problem_types import (
is_binary,
is_classification,
is_multiclass,
is_time_series,
)
class EngineComputation(ABC):
"""Wrapper around the result of a (possibly asynchronous) engine computation."""
@abstractmethod
def get_result(self):
"""Gets the computation result. Will block until the computation is finished.
Raises Exception: If computation fails. Returns traceback.
"""
@abstractmethod
def done(self):
"""Whether the computation is done."""
@abstractmethod
def cancel(self):
"""Cancel the computation."""
class JobLogger:
"""Mimic the behavior of a python logging.Logger but stores all messages rather than actually logging them.
This is used during engine jobs so that log messages are recorded
after the job completes. This is desired so that all of the messages
for a single job are grouped together in the log.
"""
def __init__(self):
self.logs = []
def info(self, msg):
"""Store message at the info level."""
self.logs.append(("info", msg))
def debug(self, msg):
"""Store message at the debug level."""
self.logs.append(("debug", msg))
def warning(self, msg):
"""Store message at the warning level."""
self.logs.append(("warning", msg))
def error(self, msg):
"""Store message at the error level."""
self.logs.append(("error", msg))
def write_to_logger(self, logger):
"""Write all the messages to the logger, first in, first out (FIFO) order."""
logger_method = {
"info": logger.info,
"debug": logger.debug,
"warning": logger.warning,
"error": logger.warning,
}
for level, message in self.logs:
method = logger_method[level]
method(message)
class EngineBase(ABC):
"""Base class for rayml engines."""
@staticmethod
def setup_job_log():
"""Set up logger for job."""
return JobLogger()
@abstractmethod
def submit_evaluation_job(self, automl_config, pipeline, X, y):
"""Submit job for pipeline evaluation during AutoMLSearch."""
@abstractmethod
def submit_training_job(self, automl_config, pipeline, X, y):
"""Submit job for pipeline training."""
@abstractmethod
def submit_scoring_job(
self, automl_config, pipeline, X, y, objectives, X_train=None, y_train=None
):
"""Submit job for pipeline scoring."""
def train_pipeline(pipeline, X, y, automl_config, schema=True):
"""Train a pipeline and tune the threshold if necessary.
Args:
pipeline (PipelineBase): Pipeline to train.
X (pd.DataFrame): Features to train on.
y (pd.Series): Target to train on.
automl_config (AutoMLSearch): The AutoMLSearch object, used to access config and the error callback.
schema (bool): Whether to use the schemas for X and y. Defaults to True.
Returns:
pipeline (PipelineBase): A trained pipeline instance.
"""
X_threshold_tuning = None
y_threshold_tuning = None
if automl_config.X_schema and schema:
X.ww.init(schema=automl_config.X_schema)
if automl_config.y_schema and schema:
y.ww.init(schema=automl_config.y_schema)
threshold_tuning_objective = automl_config.objective
if (
is_binary(automl_config.problem_type)
and automl_config.optimize_thresholds
and automl_config.objective.score_needs_proba
and automl_config.alternate_thresholding_objective is not None
):
# use the alternate_thresholding_objective
threshold_tuning_objective = automl_config.alternate_thresholding_objective
if (
automl_config.optimize_thresholds
and pipeline.can_tune_threshold_with_objective(threshold_tuning_objective)
):
test_size_ = (
pipeline.forecast_horizon / len(X)
if is_time_series(automl_config.problem_type)
else 0.2
)
X, X_threshold_tuning, y, y_threshold_tuning = split_data(
X,
y,
pipeline.problem_type,
test_size=test_size_,
random_seed=pipeline.random_seed,
)
cv_pipeline = pipeline.clone()
cv_pipeline.fit(X, y)
tune_binary_threshold(
cv_pipeline,
threshold_tuning_objective,
cv_pipeline.problem_type,
X_threshold_tuning,
y_threshold_tuning,
X,
y,
)
return cv_pipeline
def train_and_score_pipeline(
pipeline, automl_config, full_X_train, full_y_train, logger
):
"""Given a pipeline, config and data, train and score the pipeline and return the CV or TV scores.
Args:
pipeline (PipelineBase): The pipeline to score.
automl_config (AutoMLSearch): The AutoMLSearch object, used to access config and the error callback.
full_X_train (pd.DataFrame): Training features.
full_y_train (pd.Series): Training target.
logger: Logger object to write to.
Raises:
Exception: If there are missing target values in the training set after data split.
Returns:
tuple of three items: First - A dict containing cv_score_mean, cv_scores, training_time and a cv_data structure with details.
Second - The pipeline class we trained and scored. Third - the job logger instance with all the recorded messages.
"""
start = time.time()
cv_data = []
logger.info("\tStarting cross validation")
# Encode target for classification problems so that we can support float targets. This is okay because we only use split to get the indices to split on
if is_classification(automl_config.problem_type):
y_mapping = {
original_target: encoded_target
for (encoded_target, original_target) in enumerate(
full_y_train.value_counts().index
)
}
full_y_train = ww.init_series(full_y_train.map(y_mapping))
cv_pipeline = pipeline
for i, (train, valid) in enumerate(
automl_config.data_splitter.split(full_X_train, full_y_train)
):
logger.debug(f"\t\tTraining and scoring on fold {i}")
X_train, X_valid = full_X_train.ww.iloc[train], full_X_train.ww.iloc[valid]
y_train, y_valid = full_y_train.ww.iloc[train], full_y_train.ww.iloc[valid]
if is_binary(automl_config.problem_type) or is_multiclass(
automl_config.problem_type
):
diff_train = set(np.setdiff1d(full_y_train, y_train))
diff_valid = set(np.setdiff1d(full_y_train, y_valid))
diff_string = (
f"Missing target values in the training set after data split: {diff_train}. "
if diff_train
else ""
)
diff_string += (
f"Missing target values in the validation set after data split: {diff_valid}."
if diff_valid
else ""
)
if diff_string:
raise Exception(diff_string)
objectives_to_score = [
automl_config.objective
] + automl_config.additional_objectives
try:
logger.debug(f"\t\t\tFold {i}: starting training")
cv_pipeline = train_pipeline(
pipeline, X_train, y_train, automl_config, schema=False
)
logger.debug(f"\t\t\tFold {i}: finished training")
if (
automl_config.optimize_thresholds
and is_binary(automl_config.problem_type)
and cv_pipeline.threshold is not None
):
logger.debug(
f"\t\t\tFold {i}: Optimal threshold found ({cv_pipeline.threshold:.3f})"
)
logger.debug(f"\t\t\tFold {i}: Scoring trained pipeline")
scores = cv_pipeline.score(
X_valid,
y_valid,
objectives=objectives_to_score,
X_train=X_train,
y_train=y_train,
)
logger.debug(
f"\t\t\tFold {i}: {automl_config.objective.name} score: {scores[automl_config.objective.name]:.3f}"
)
score = scores[automl_config.objective.name]
except Exception as e:
if automl_config.error_callback is not None:
automl_config.error_callback(
exception=e,
traceback=traceback.format_tb(sys.exc_info()[2]),
automl=automl_config,
fold_num=i,
pipeline=pipeline,
)
if isinstance(e, PipelineScoreError):
nan_scores = {objective: np.nan for objective in e.exceptions}
scores = {**nan_scores, **e.scored_successfully}
scores = OrderedDict(
{
o.name: scores[o.name]
for o in [automl_config.objective]
+ automl_config.additional_objectives
}
)
score = scores[automl_config.objective.name]
else:
score = np.nan
scores = OrderedDict(
zip(
[n.name for n in automl_config.additional_objectives],
[np.nan] * len(automl_config.additional_objectives),
)
)
ordered_scores = OrderedDict()
ordered_scores.update({automl_config.objective.name: score})
ordered_scores.update(scores)
ordered_scores.update({"# Training": y_train.shape[0]})
ordered_scores.update({"# Validation": y_valid.shape[0]})
evaluation_entry = {
"all_objective_scores": ordered_scores,
"mean_cv_score": score,
"binary_classification_threshold": None,
}
if (
is_binary(automl_config.problem_type)
and cv_pipeline is not None
and cv_pipeline.threshold is not None
):
evaluation_entry["binary_classification_threshold"] = cv_pipeline.threshold
cv_data.append(evaluation_entry)
training_time = time.time() - start
cv_scores = pd.Series([fold["mean_cv_score"] for fold in cv_data])
cv_score_mean = cv_scores.mean()
logger.info(
f"\tFinished cross validation - mean {automl_config.objective.name}: {cv_score_mean:.3f}"
)
return {
"scores": {
"cv_data": cv_data,
"training_time": training_time,
"cv_scores": cv_scores,
"cv_score_mean": cv_score_mean,
},
"pipeline": cv_pipeline,
"logger": logger,
}
def evaluate_pipeline(pipeline, automl_config, X, y, logger):
"""Function submitted to the submit_evaluation_job engine method.
Args:
pipeline (PipelineBase): The pipeline to score.
automl_config (AutoMLConfig): The AutoMLSearch object, used to access config and the error callback.
X (pd.DataFrame): Training features.
y (pd.Series): Training target.
logger: Logger object to write to.
Returns:
tuple of three items: First - A dict containing cv_score_mean, cv_scores, training_time and a cv_data structure with details.
Second - The pipeline class we trained and scored. Third - the job logger instance with all the recorded messages.
"""
logger.info(f"{pipeline.name}:")
X.ww.init(schema=automl_config.X_schema)
y.ww.init(schema=automl_config.y_schema)
return train_and_score_pipeline(
pipeline,
automl_config=automl_config,
full_X_train=X,
full_y_train=y,
logger=logger,
)
def score_pipeline(
pipeline, X, y, objectives, X_train=None, y_train=None, X_schema=None, y_schema=None
):
"""Wrap around pipeline.score method to make it easy to score pipelines with dask.
Args:
pipeline (PipelineBase): The pipeline to score.
X (pd.DataFrame): Features to score on.
y (pd.Series): Target used to calculate scores.
objectives (list[ObjectiveBase]): List of objectives to score on.
X_train (pd.DataFrame): Training features. Used for feature engineering in time series.
y_train (pd.Series): Training target. Used for feature engineering in time series.
X_schema (ww.TableSchema): Schema for features. Defaults to None.
y_schema (ww.ColumnSchema): Schema for columns. Defaults to None.
Returns:
dict: Dictionary object containing pipeline scores.
"""
if X_schema:
X.ww.init(schema=X_schema)
if y_schema:
y.ww.init(schema=y_schema)
return pipeline.score(X, y, objectives, X_train=X_train, y_train=y_train)
| 36.501377
| 155
| 0.627245
|
4b10eadb1e16e846fd746e33b83f1ea1db03512e
| 747
|
py
|
Python
|
examples/simple_configure.py
|
mikimn/arggo
|
79d7b0e7c391e9e1e8cf4621db95382ad9fd074e
|
[
"MIT"
] | 2
|
2021-05-26T17:29:50.000Z
|
2021-05-29T10:19:10.000Z
|
examples/simple_configure.py
|
mikimn/arggo
|
79d7b0e7c391e9e1e8cf4621db95382ad9fd074e
|
[
"MIT"
] | 10
|
2021-03-12T14:33:34.000Z
|
2021-05-23T14:48:45.000Z
|
examples/simple_configure.py
|
mikimn/arggo
|
79d7b0e7c391e9e1e8cf4621db95382ad9fd074e
|
[
"MIT"
] | 1
|
2021-09-22T18:03:29.000Z
|
2021-09-22T18:03:29.000Z
|
import os
from dataclasses import dataclass
import sys
sys.path.append(os.getcwd())
sys.path.append("../")
import arggo
from arggo.dataclass_utils import parser_field
@dataclass
class Arguments:
name: str = parser_field(help="The user's name.")
should_greet: bool = parser_field(help="Whether or not I should greet the user")
@arggo.configure(parser_argument_index=1, logging_dir="my_logs")
def greet_user(count: int, args: Arguments):
numeral = {1: "st", 2: "nd", 3: "rd"}
numeral = numeral[count] if count in numeral else "th"
if args.should_greet:
print(f"Greetings for the {count}{numeral} time, {args.name}!")
def main():
for i in range(4):
greet_user(i)
if __name__ == "__main__":
main()
| 23.34375
| 84
| 0.689424
|
e9e5d231a58d8bc436dd443594fc4f4142b6eb7e
| 3,728
|
py
|
Python
|
zaza/openstack/utilities/exceptions.py
|
gnuoy/zaza-openstack-tests
|
0546e01b627d7e0a785ef801e88743480e94cbed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
zaza/openstack/utilities/exceptions.py
|
gnuoy/zaza-openstack-tests
|
0546e01b627d7e0a785ef801e88743480e94cbed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
zaza/openstack/utilities/exceptions.py
|
gnuoy/zaza-openstack-tests
|
0546e01b627d7e0a785ef801e88743480e94cbed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module of exceptions that zaza may raise."""
class MissingOSAthenticationException(Exception):
"""Exception when some data needed to authenticate is missing."""
pass
class CloudInitIncomplete(Exception):
"""Cloud init has not completed properly."""
pass
class SSHFailed(Exception):
"""SSH failed."""
pass
class NeutronAgentMissing(Exception):
"""Agent binary does not appear in the Neutron agent list."""
pass
class NeutronBGPSpeakerMissing(Exception):
"""No BGP speaker appeared on agent."""
pass
class ApplicationNotFound(Exception):
"""Application not found in machines."""
def __init__(self, application):
"""Create Application not found exception.
:param application: Name of the application
:type application: string
:returns: ApplicationNotFound Exception
"""
msg = ("{} application was not found in machines.".
format(application))
super(ApplicationNotFound, self).__init__(msg)
class SeriesNotFound(Exception):
"""Series not found in status."""
pass
class OSVersionNotFound(Exception):
"""OS Version not found."""
pass
class ReleasePairNotFound(Exception):
"""Release pair was not found in OPENSTACK_RELEASES_PAIRS."""
pass
class KeystoneAuthorizationStrict(Exception):
"""Authorization/Policy too strict."""
pass
class KeystoneAuthorizationPermissive(Exception):
"""Authorization/Policy too permissive."""
pass
class KeystoneWrongTokenProvider(Exception):
"""A token was issued from the wrong token provider."""
pass
class KeystoneKeyRepositoryError(Exception):
"""Error in key repository.
This may be caused by isues with one of:
- incomplete or missing data in `key_repository` in leader storage
- synchronization of keys to non-leader units
- rotation of keys
"""
pass
class ProcessNameCountMismatch(Exception):
"""Count of process names doesn't match."""
pass
class ProcessNameMismatch(Exception):
"""Name of processes doesn't match."""
pass
class PIDCountMismatch(Exception):
"""PID's count doesn't match."""
pass
class ProcessIdsFailed(Exception):
"""Process ID lookup failed."""
pass
class UnitNotFound(Exception):
"""Unit not found in actual dict."""
pass
class UnitCountMismatch(Exception):
"""Count of unit doesn't match."""
pass
class UbuntuReleaseNotFound(Exception):
"""Ubuntu release not found in list."""
pass
class ServiceNotFound(Exception):
"""Service not found on unit."""
pass
class CephPoolNotFound(Exception):
"""Ceph pool not found."""
pass
class CephPoolNotConfigured(Exception):
"""Ceph pool not configured properly."""
pass
class NovaGuestMigrationFailed(Exception):
"""Nova guest migration failed."""
pass
class NovaGuestRestartFailed(Exception):
"""Nova guest restart failed."""
pass
class PolicydError(Exception):
"""Policyd override failed."""
pass
class CACERTNotFound(Exception):
"""Could not find cacert."""
pass
| 19.316062
| 74
| 0.69367
|
1a818f4925dc80ebd9045151a8a3fded973a1002
| 1,829
|
py
|
Python
|
backend/api/v1/aggregator/helpers.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
backend/api/v1/aggregator/helpers.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
backend/api/v1/aggregator/helpers.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
import math
import json
import pyproj
from geojson import dumps, FeatureCollection, Feature, Point
from shapely.geometry import mapping
from api.config import GWELLS_API_URL
from api.v1.aggregator.schema import ExternalAPIRequest, GWELLSAPIParams, json_to_geojson
EARTH_RADIUS = 6378137
MAX_LATITUDE = 85.0511287798
transform_4326_3005 = pyproj.Transformer.from_proj(
pyproj.Proj(init='epsg:4326'),
pyproj.Proj(init='epsg:3005')
).transform
transform_3005_4326 = pyproj.Transformer.from_proj(
pyproj.Proj(init='epsg:3005'),
pyproj.Proj(init='epsg:4326')
).transform
transform_4326_4140 = pyproj.Transformer.from_proj(
pyproj.Proj(init='epsg:4326'),
pyproj.Proj(init='epsg:4140')
).transform
transform_4140_4326 = pyproj.Transformer.from_proj(
pyproj.Proj(init='epsg:4140'),
pyproj.Proj(init='epsg:4326')
).transform
# Converts to x,y point array from lat lng
def spherical_mercator_project(lat, lng):
d = math.pi / 180
lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)
sin = math.sin(lat * d)
return [EARTH_RADIUS * math.log((1 + sin) / (1 - sin)) / 2, EARTH_RADIUS * lng * d]
# Converts to lat,lng array from point
def spherical_mercator_unproject(x, y):
d = 180 / math.pi
return [(2 * math.atan(math.exp(y / EARTH_RADIUS)) - (math.pi / 2)) * d, x * d / EARTH_RADIUS]
def gwells_api_request(within):
"""
creates an ExternalAPIRequest object with params for accessing data from the
GWELLS API.
"""
url = f"{GWELLS_API_URL}/api/v2/wells"
params = GWELLSAPIParams(
within=json.dumps(mapping(within)),
geojson="false"
)
return ExternalAPIRequest(
url=url,
layer="groundwater_wells",
excluded_fields=["well_guid", "drilling_company"],
id_field="well_tag_number",
q=params
)
| 28.138462
| 98
| 0.700929
|
7447bd308a621cd0f12f389b22d15aae4b2137d0
| 1,248
|
py
|
Python
|
tests/test_controllers_opendaylight.py
|
dfarrell07/CBenchF
|
a3a5d9a6d2a703a77499a9ee8a2592d2f90b52e7
|
[
"BSD-2-Clause"
] | 1
|
2020-11-18T03:13:45.000Z
|
2020-11-18T03:13:45.000Z
|
tests/test_controllers_opendaylight.py
|
dfarrell07/CBenchF
|
a3a5d9a6d2a703a77499a9ee8a2592d2f90b52e7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_controllers_opendaylight.py
|
dfarrell07/CBenchF
|
a3a5d9a6d2a703a77499a9ee8a2592d2f90b52e7
|
[
"BSD-2-Clause"
] | 1
|
2019-02-20T14:34:57.000Z
|
2019-02-20T14:34:57.000Z
|
"""Tests for the module that abstracts the OpenDaylight controller."""
import unittest
import cbenchf.controllers.opendaylight as odl_mod
class TestInit(unittest.TestCase):
"""Test building the ODL abstraction."""
@unittest.skip("Travis doesn't have Docker")
def test_basic(self):
"""Test a basic, all default construction."""
odl_mod.OpenDaylight()
class TestStart(unittest.TestCase):
"""Test starting an OpenDaylight controller instance"""
def setUp(self):
"""Build an ODL abstraction."""
self.odl = odl_mod.OpenDaylight()
@unittest.skip("Travis doesn't have Docker")
def test_present(self):
"""Confirm that the start method is present. Required API method."""
hasattr(self.odl, "start") and callable(getattr(self.odl, "start"))
class TestStop(unittest.TestCase):
"""Test stopping an OpenDaylight controller instance"""
def setUp(self):
"""Build an ODL abstraction."""
self.odl = odl_mod.OpenDaylight()
@unittest.skip("Travis doesn't have Docker")
def test_present(self):
"""Confirm that the stop method is present. Required API method."""
hasattr(self.odl, "stop") and callable(getattr(self.odl, "stop"))
| 28.363636
| 76
| 0.674679
|
544ca046e83a06b991014cd0c0e16e41fa92d041
| 590
|
py
|
Python
|
chapter05/wsgirequest_demo/front/views.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
chapter05/wsgirequest_demo/front/views.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
chapter05/wsgirequest_demo/front/views.py
|
Tomtao626/django
|
fe945063593b4bfe82d74842f728b854b501a294
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.core.handlers.wsgi import WSGIRequest
# Create your views here.
def index(request):
print(request)
print(type(request))
return HttpResponse("success")
def login(request):
print(request.get_host())
print(request.path)
print(request.get_full_path())
print(request.get_raw_uri())
print(request.META)
for k,v in request.META.items():
print(k,v)
print(request.is_secure()) # 是否使用https
print(request.is_ajax()) # 是否用ajax
return HttpResponse("login")
| 24.583333
| 49
| 0.70678
|
af85c2306e0610da00c175bfda90993367783497
| 965
|
py
|
Python
|
sampletab.py
|
alexdevonport/pypimm-gui
|
db156a73934b69927f67952917953302627eea67
|
[
"MIT"
] | null | null | null |
sampletab.py
|
alexdevonport/pypimm-gui
|
db156a73934b69927f67952917953302627eea67
|
[
"MIT"
] | null | null | null |
sampletab.py
|
alexdevonport/pypimm-gui
|
db156a73934b69927f67952917953302627eea67
|
[
"MIT"
] | null | null | null |
import tkinter as tk
class SampleTab(tk.Frame):
"""
"""
def __init__(self, master):
super().__init__(master, session)
self.master = master
self.session = session
self.sampleListFrame = tk.Frame(self)
self.sampleList = tk.Listbox(self.sampleListFrame)
self.sampleList.pack()
#self.sampleButtons = tk.Frame(self)
self.addSampleButton = tk.Button(self.sampleListFrame,
text='Add Sample')
self.addSampleButton.pack(fill='x')
self.renameSampleButton = tk.Button(self.sampleListFrame,
text='Rename Sample')
self.renameSampleButton.pack(fill='x')
self.removeSampleButton = tk.Button(self.sampleListFrame,
text='Remove Sample')
self.removeSampleButton.pack(fill='x')
self.sampleListFrame.pack(side='left')
return None
def update(self):
"""
"""
return None
| 29.242424
| 65
| 0.601036
|
ed4cdf74b11c877cacf4116de0cbd018a9430c6b
| 10,524
|
py
|
Python
|
ssseg/inference.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 2
|
2021-10-31T21:52:30.000Z
|
2021-12-21T12:35:37.000Z
|
ssseg/inference.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | null | null | null |
ssseg/inference.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | null | null | null |
'''
Function:
Visualize the segmentation results by using our segmentors
Author:
Zhenchao Jin
'''
import os
import cv2
import copy
import torch
import warnings
import argparse
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from modules import *
from cfgs import BuildConfig
warnings.filterwarnings('ignore')
'''parse arguments in command line'''
def parseArgs():
parser = argparse.ArgumentParser(description='SSSegmentation is an open source strongly supervised semantic segmentation toolbox based on PyTorch')
parser.add_argument('--imagedir', dest='imagedir', help='images dir for testing multi images', type=str)
parser.add_argument('--imagepath', dest='imagepath', help='imagepath for testing single image', type=str)
parser.add_argument('--outputfilename', dest='outputfilename', help='name to save output image(s)', type=str, default='')
parser.add_argument('--cfgfilepath', dest='cfgfilepath', help='config file path you want to use', type=str, required=True)
parser.add_argument('--checkpointspath', dest='checkpointspath', help='checkpoints you want to resume from', type=str, required=True)
args = parser.parse_args()
return args
'''demo for segmentation'''
class Demo():
def __init__(self, **kwargs):
# set attribute
for key, value in kwargs.items(): setattr(self, key, value)
'''start'''
def start(self):
# parse arguments
cmd_args = parseArgs()
cfg, cfg_file_path = BuildConfig(cmd_args.cfgfilepath)
cfg.SEGMENTOR_CFG['distributed']['is_on'] = False
cfg.SEGMENTOR_CFG['is_multi_gpus'] = False
assert cmd_args.imagepath or cmd_args.imagedir, 'imagepath or imagedir should be specified...'
# check backup dir
common_cfg = cfg.COMMON_CFG['test']
checkdir(common_cfg['backupdir'])
# cuda detect
use_cuda = torch.cuda.is_available()
# initialize logger_handle
logger_handle = Logger(common_cfg['logfilepath'])
# instanced segmentor
cfg.SEGMENTOR_CFG['backbone']['pretrained'] = False
segmentor = BuildSegmentor(segmentor_cfg=copy.deepcopy(cfg.SEGMENTOR_CFG), mode='TEST')
if use_cuda: segmentor = segmentor.cuda()
# instanced dataset
dataset = BuildDataset(mode='TEST', logger_handle=logger_handle, dataset_cfg=copy.deepcopy(cfg.DATASET_CFG), get_basedataset=True)
palette = BuildPalette(dataset_type=cfg.DATASET_CFG['type'], num_classes=cfg.SEGMENTOR_CFG['num_classes'], logger_handle=logger_handle)
# load checkpoints
cmd_args.local_rank = 0
checkpoints = loadcheckpoints(cmd_args.checkpointspath, logger_handle=logger_handle, cmd_args=cmd_args)
try:
segmentor.load_state_dict(checkpoints['model'])
except Exception as e:
logger_handle.warning(str(e) + '\n' + 'Try to load checkpoints by using strict=False...')
segmentor.load_state_dict(checkpoints['model'], strict=False)
# set eval
segmentor.eval()
# start to test
inference_cfg = copy.deepcopy(cfg.INFERENCE_CFG)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
if not cmd_args.imagedir:
imagepaths = [cmd_args.imagepath]
else:
imagenames = os.listdir(cmd_args.imagedir)
imagepaths = [os.path.join(cmd_args.imagedir, name) for name in imagenames]
pbar = tqdm(range(len(imagepaths)))
for idx in pbar:
imagepath = imagepaths[idx]
if imagepath.split('.')[-1] not in ['jpg', 'jpeg', 'png']: continue
pbar.set_description('Processing %s' % imagepath)
infer_tricks = inference_cfg['tricks']
cascade_cfg = infer_tricks.get('cascade', {'key_for_pre_output': 'memory_gather_logits', 'times': 1, 'forward_default_args': None})
sample = dataset.read(imagepath, 'none.png', False)
image = sample['image']
sample = dataset.synctransform(sample, 'all')
image_tensor = sample['image'].unsqueeze(0).type(FloatTensor)
for idx in range(cascade_cfg['times']):
forward_args = None
if idx > 0:
output_list = [
F.interpolate(outputs, size=output_list[-1].shape[2:], mode='bilinear', align_corners=segmentor.align_corners) for outputs in output_list
]
forward_args = {cascade_cfg['key_for_pre_output']: sum(output_list) / len(output_list)}
if cascade_cfg['forward_default_args'] is not None: forward_args.update(cascade_cfg['forward_default_args'])
output_list = self.auginference(
segmentor=segmentor,
images=image_tensor,
inference_cfg=inference_cfg,
num_classes=cfg.SEGMENTOR_CFG['num_classes'],
FloatTensor=FloatTensor,
align_corners=segmentor.align_corners,
forward_args=forward_args,
)
output_list = [
F.interpolate(output, size=(sample['height'], sample['width']), mode='bilinear', align_corners=segmentor.align_corners) for output in output_list
]
output = sum(output_list) / len(output_list)
pred = (torch.argmax(output[0], dim=0)).cpu().numpy().astype(np.int32)
mask = np.zeros((pred.shape[0], pred.shape[1], 3), dtype=np.uint8)
for clsid, color in enumerate(palette):
mask[pred == clsid, :] = np.array(color)[::-1]
image = image * 0.5 + mask * 0.5
image = image.astype(np.uint8)
if cmd_args.outputfilename:
cv2.imwrite(os.path.join(common_cfg['backupdir'], cmd_args.outputfilename + '_%d' % idx + '.png'), image)
else:
cv2.imwrite(os.path.join(common_cfg['backupdir'], imagepath.split('/')[-1].split('.')[0] + '.png'), image)
'''inference with augmentations'''
def auginference(self, segmentor, images, inference_cfg, num_classes, FloatTensor, align_corners, forward_args=None):
infer_tricks, output_list = inference_cfg['tricks'], []
for scale_factor in infer_tricks['multiscale']:
images_scale = F.interpolate(images, scale_factor=scale_factor, mode='bilinear', align_corners=align_corners)
outputs = self.inference(
segmentor=segmentor,
images=images_scale.type(FloatTensor),
inference_cfg=inference_cfg,
num_classes=num_classes,
forward_args=forward_args,
).cpu()
output_list.append(outputs)
if infer_tricks['flip']:
images_flip = torch.from_numpy(np.flip(images_scale.cpu().numpy(), axis=3).copy())
outputs_flip = self.inference(
segmentor=segmentor,
images=images_flip.type(FloatTensor),
inference_cfg=inference_cfg,
num_classes=num_classes,
forward_args=forward_args,
)
fix_ann_pairs = inference_cfg.get('fix_ann_pairs', None)
if fix_ann_pairs is None:
for aug_opt in self.cfg.DATASET_CFG['train']['aug_opts']:
if 'RandomFlip' in aug_opt: fix_ann_pairs = aug_opt[-1].get('fix_ann_pairs', None)
if fix_ann_pairs is not None:
outputs_flip_clone = outputs_flip.data.clone()
for (pair_a, pair_b) in fix_ann_pairs:
outputs_flip[:, pair_a, :, :] = outputs_flip_clone[:, pair_b, :, :]
outputs_flip[:, pair_b, :, :] = outputs_flip_clone[:, pair_a, :, :]
outputs_flip = torch.from_numpy(np.flip(outputs_flip.cpu().numpy(), axis=3).copy()).type_as(outputs)
output_list.append(outputs_flip)
return output_list
'''inference'''
def inference(self, segmentor, images, inference_cfg, num_classes, forward_args=None):
assert inference_cfg['mode'] in ['whole', 'slide']
use_probs_before_resize = inference_cfg['tricks']['use_probs_before_resize']
if inference_cfg['mode'] == 'whole':
if forward_args is None:
outputs = segmentor(images)
else:
outputs = segmentor(images, **forward_args)
if use_probs_before_resize: outputs = F.softmax(outputs, dim=1)
else:
align_corners = segmentor.align_corners if hasattr(segmentor, 'align_corners') else segmentor.module.align_corners
opts = inference_cfg['opts']
stride_h, stride_w = opts['stride']
cropsize_h, cropsize_w = opts['cropsize']
batch_size, _, image_h, image_w = images.size()
num_grids_h = max(image_h - cropsize_h + stride_h - 1, 0) // stride_h + 1
num_grids_w = max(image_w - cropsize_w + stride_w - 1, 0) // stride_w + 1
outputs = images.new_zeros((batch_size, num_classes, image_h, image_w))
count_mat = images.new_zeros((batch_size, 1, image_h, image_w))
for h_idx in range(num_grids_h):
for w_idx in range(num_grids_w):
x1, y1 = w_idx * stride_w, h_idx * stride_h
x2, y2 = min(x1 + cropsize_w, image_w), min(y1 + cropsize_h, image_h)
x1, y1 = max(x2 - cropsize_w, 0), max(y2 - cropsize_h, 0)
crop_images = images[:, :, y1:y2, x1:x2]
if forward_args is None:
outputs_crop = segmentor(crop_images)
else:
outputs_crop = segmentor(crop_images, **forward_args)
outputs_crop = F.interpolate(outputs_crop, size=crop_images.size()[2:], mode='bilinear', align_corners=align_corners)
if use_probs_before_resize: outputs_crop = F.softmax(outputs_crop, dim=1)
outputs += F.pad(outputs_crop, (int(x1), int(outputs.shape[3] - x2), int(y1), int(outputs.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
outputs = outputs / count_mat
return outputs
'''debug'''
if __name__ == '__main__':
with torch.no_grad():
client = Demo()
client.start()
| 53.42132
| 161
| 0.61241
|
c11635bf0509ab3eb04442d5f8bd453592e0244e
| 5,326
|
py
|
Python
|
dynamic_rest/meta.py
|
aleontiev/dynamic-rest
|
d4635f2163e1ae90c3c4fd680ef59ceb048d7cf7
|
[
"MIT"
] | null | null | null |
dynamic_rest/meta.py
|
aleontiev/dynamic-rest
|
d4635f2163e1ae90c3c4fd680ef59ceb048d7cf7
|
[
"MIT"
] | null | null | null |
dynamic_rest/meta.py
|
aleontiev/dynamic-rest
|
d4635f2163e1ae90c3c4fd680ef59ceb048d7cf7
|
[
"MIT"
] | 3
|
2018-05-08T16:34:39.000Z
|
2019-05-22T04:21:18.000Z
|
"""Module containing Django meta helpers."""
from itertools import chain
from django.db import models
from dynamic_rest.related import RelatedObject
from dynamic_rest.compat import DJANGO110
class Meta(object):
_instances = {}
def __new__(cls, model):
key = model._meta.db_table if hasattr(model, '_meta') else model
if key not in cls._instances:
instance = cls._instances[key] = super(Meta, cls).__new__(cls)
instance.model = model
return cls._instances.get(key)
def __init__(self, model):
self.model = model
self.fields = {} # lazy
@property
def meta(self):
return getattr(self.model, '_meta', None)
@classmethod
def get_related_model(cls, field):
return field.related_model if field else None
def get_name(self):
meta = self.meta
return '%s.%s' % (
meta.app_label, meta.db_table
) if meta else None
@classmethod
def get_query_name(cls, field):
if (
hasattr(field, 'field') and
hasattr(field.field, 'related_query_name')
):
return field.field.related_query_name()
return field.name
def is_field(self, field_name):
"""Check whether a given field exists on a model.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
True if `field_name` exists on `model`, False otherwise.
"""
try:
self.get_field(field_name)
return True
except AttributeError:
return False
def get_fields(self, **kwargs):
return self.meta.get_fields(**kwargs)
def get_pk_field(self):
return self.get_field(self.meta.pk.name)
def get_field(self, field_name):
"""Return a field given a model and field name.
The field name may contain dots (.), indicating
a remote field.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
A Django field if `field_name` is a valid field for `model`,
None otherwise.
"""
if field_name in self.fields:
return self.fields[field_name]
field = None
model = self.model
meta = self.meta
if '.' in field_name:
parts = field_name.split('.')
last = len(parts) - 1
for i, part in enumerate(parts):
if i == last:
field_name = part
break
field = get_model_field(model, part)
model = get_related_model(field)
if not model:
raise AttributeError(
'%s is not a related field on %s' % (
part,
model
)
)
meta = model._meta
try:
if DJANGO110:
field = meta.get_field(field_name)
else:
field = meta.get_field_by_name(field_name)[0]
except:
if DJANGO110:
related_objs = (
f for f in meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
)
related_m2m_objs = (
f for f in meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
)
else:
related_objs = meta.get_all_related_objects()
related_m2m_objs = meta.get_all_related_many_to_many_objects()
related_objects = {
o.get_accessor_name(): o
for o in chain(related_objs, related_m2m_objs)
}
if field_name in related_objects:
field = related_objects[field_name]
if not field:
raise AttributeError(
'%s is not a valid field for %s' % (field_name, model)
)
self.fields[field_name] = field
return field
def is_field_remote(self, field_name):
"""Check whether a given model field is a remote field.
A remote field is the inverse of a one-to-many or a
many-to-many relationship.
Arguments:
model: a Django model
field_name: the name of a field
Returns:
True if `field_name` is a remote field, False otherwise.
"""
try:
model_field = self.get_field(field_name)
return isinstance(
model_field,
(models.ManyToManyField, RelatedObject)
)
except AttributeError:
return False
def get_table(self):
return self.meta.db_table
def get_model_table(model):
return Meta(model).get_table()
def get_related_model(field):
return Meta.get_related_model(field)
def is_model_field(model, field_name):
return Meta(model).is_field(field_name)
def get_model_field(model, field_name):
return Meta(model).get_field(field_name)
def is_field_remote(model, field_name):
return Meta(model).is_field_remote(field_name)
| 28.481283
| 78
| 0.553887
|
6a4884565142e681020a00eb43071f551cfb1433
| 1,184
|
py
|
Python
|
toontown/shtiker/ShtikerPage.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 8
|
2017-10-10T11:41:01.000Z
|
2021-02-23T12:55:47.000Z
|
toontown/shtiker/ShtikerPage.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
toontown/shtiker/ShtikerPage.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 2
|
2019-04-06T16:18:23.000Z
|
2021-02-25T06:25:01.000Z
|
import ShtikerBook
from direct.fsm import StateData
from direct.gui.DirectGui import *
from pandac.PandaModules import *
class ShtikerPage(DirectFrame, StateData.StateData):
def __init__(self):
DirectFrame.__init__(self, relief=None, sortOrder=DGG.BACKGROUND_SORT_INDEX)
self.initialiseoptions(ShtikerPage)
StateData.StateData.__init__(self, 'shtiker-page-done')
self.book = None
self.hide()
return
def load(self):
pass
def unload(self):
self.ignoreAll()
del self.book
def enter(self):
self.show()
def exit(self):
self.hide()
def setBook(self, book):
self.book = book
def setPageName(self, pageName):
self.pageName = pageName
def makePageWhite(self, item):
white = Vec4(1, 1, 1, 1)
self.book['image_color'] = white
self.book.nextArrow['image_color'] = white
self.book.prevArrow['image_color'] = white
def makePageRed(self, item):
red = Vec4(1, 0.5, 0.5, 1)
self.book['image_color'] = red
self.book.nextArrow['image_color'] = red
self.book.prevArrow['image_color'] = red
| 25.73913
| 84
| 0.627534
|
aed025efe3b040d661da60d7f0dac901dbec7856
| 2,692
|
py
|
Python
|
examples/python/geometry/point_cloud_to_rgbd.py
|
jeertmans/Open3D
|
4b9f7dcff14ab5f7d662121ff6cdab60c07118de
|
[
"MIT"
] | 6
|
2019-02-09T13:33:57.000Z
|
2019-11-27T07:13:51.000Z
|
examples/python/geometry/point_cloud_to_rgbd.py
|
jeertmans/Open3D
|
4b9f7dcff14ab5f7d662121ff6cdab60c07118de
|
[
"MIT"
] | null | null | null |
examples/python/geometry/point_cloud_to_rgbd.py
|
jeertmans/Open3D
|
4b9f7dcff14ab5f7d662121ff6cdab60c07118de
|
[
"MIT"
] | 3
|
2019-01-21T13:05:06.000Z
|
2020-01-05T09:41:55.000Z
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import open3d as o3d
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
device = o3d.core.Device('CPU:0')
tum_data = o3d.data.SampleRGBDImageTUM()
depth = o3d.t.io.read_image(tum_data.depth_path).to(device)
color = o3d.t.io.read_image(tum_data.color_path).to(device)
intrinsic = o3d.core.Tensor([[535.4, 0, 320.1], [0, 539.2, 247.6],
[0, 0, 1]])
rgbd = o3d.t.geometry.RGBDImage(color, depth)
pcd = o3d.t.geometry.PointCloud.create_from_rgbd_image(rgbd,
intrinsic,
depth_scale=5000.0,
depth_max=10.0)
o3d.visualization.draw([pcd])
rgbd_reproj = pcd.project_to_rgbd_image(640,
480,
intrinsic,
depth_scale=5000.0,
depth_max=10.0)
fig, axs = plt.subplots(1, 2)
axs[0].imshow(np.asarray(rgbd_reproj.color.to_legacy()))
axs[1].imshow(np.asarray(rgbd_reproj.depth.to_legacy()))
plt.show()
| 48.071429
| 79
| 0.556464
|
017ba80782550323116bf9af8881331eb47dbbe1
| 4,585
|
py
|
Python
|
benchmark/startQiskit_noisy1608.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1608.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1608.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=60
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=54
prog.cx(input_qubit[1],input_qubit[0]) # number=57
prog.z(input_qubit[1]) # number=58
prog.cx(input_qubit[1],input_qubit[0]) # number=59
prog.cx(input_qubit[1],input_qubit[0]) # number=56
prog.cx(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.cx(input_qubit[3],input_qubit[2]) # number=45
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.x(input_qubit[3]) # number=46
prog.y(input_qubit[1]) # number=47
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1608.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.51773
| 82
| 0.622246
|
d173ca556a23b2354daae71e205d1f276b2794a8
| 14,264
|
py
|
Python
|
chart/tests/test_webserver.py
|
aditishankar/test
|
dd3c46115ed3d5af7e6a4a6f8745cffb9b762c3a
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
chart/tests/test_webserver.py
|
aditishankar/test
|
dd3c46115ed3d5af7e6a4a6f8745cffb9b762c3a
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
chart/tests/test_webserver.py
|
aditishankar/test
|
dd3c46115ed3d5af7e6a4a6f8745cffb9b762c3a
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jmespath
from parameterized import parameterized
from tests.helm_template_generator import render_chart
class WebserverDeploymentTest(unittest.TestCase):
def test_should_add_host_header_to_liveness_and_readiness_probes(self):
docs = render_chart(
values={
"config": {
"webserver": {"base_url": "https://example.com:21222/mypath/path"},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {"name": "Host", "value": "example.com"} in jmespath.search(
"spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0]
)
assert {"name": "Host", "value": "example.com"} in jmespath.search(
"spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0]
)
def test_should_add_path_to_liveness_and_readiness_probes(self):
docs = render_chart(
values={
"config": {
"webserver": {"base_url": "https://example.com:21222/mypath/path"},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.path", docs[0])
== "/mypath/path/health"
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.path", docs[0])
== "/mypath/path/health"
)
def test_should_not_contain_host_header_if_host_empty_string(self):
docs = render_chart(
values={},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0])
is None
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0])
is None
)
def test_should_not_contain_host_header_if_base_url_not_set(self):
docs = render_chart(
values={
"config": {
"webserver": {"base_url": ""},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0])
is None
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0])
is None
)
def test_should_not_contain_host_header_by_default(self):
docs = render_chart(
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0])
is None
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0])
is None
)
def test_should_add_volume_and_volume_mount_when_exist_webserver_config(self):
docs = render_chart(
values={"webserver": {"webserverConfig": "CSRF_ENABLED = True"}},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {
"name": "webserver-config",
"configMap": {"name": "RELEASE-NAME-webserver-config"},
} in jmespath.search("spec.template.spec.volumes", docs[0])
assert {
"name": "webserver-config",
"mountPath": "/opt/airflow/webserver_config.py",
"subPath": "webserver_config.py",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
def test_should_add_extra_containers(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"webserver": {
"extraContainers": [
{"name": "test-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {
"name": "test-container",
"image": "test-registry/test-repo:test-tag",
} == jmespath.search("spec.template.spec.containers[-1]", docs[0])
def test_should_add_extra_init_containers(self):
docs = render_chart(
values={
"webserver": {
"extraInitContainers": [
{"name": "test-init-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {
"name": "test-init-container",
"image": "test-registry/test-repo:test-tag",
} == jmespath.search("spec.template.spec.initContainers[-1]", docs[0])
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"webserver": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert "Deployment" == jmespath.search("kind", docs[0])
assert "foo" == jmespath.search(
"spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.template.spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.template.spec.tolerations[0].key",
docs[0],
)
@parameterized.expand(
[
({"enabled": False}, None),
({"enabled": True}, "RELEASE-NAME-logs"),
({"enabled": True, "existingClaim": "test-claim"}, "test-claim"),
]
)
def test_logs_persistence_adds_volume_and_mount(self, log_persistence_values, expected_claim_name):
docs = render_chart(
values={"logs": {"persistence": log_persistence_values}},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
if expected_claim_name:
assert {
"name": "logs",
"persistentVolumeClaim": {"claimName": expected_claim_name},
} == jmespath.search("spec.template.spec.volumes[1]", docs[0])
assert {
"name": "logs",
"mountPath": "/opt/airflow/logs",
} == jmespath.search("spec.template.spec.containers[0].volumeMounts[1]", docs[0])
else:
assert "logs" not in [v["name"] for v in jmespath.search("spec.template.spec.volumes", docs[0])]
assert "logs" not in [
v["name"] for v in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
]
def test_webserver_resources_are_configurable(self):
docs = render_chart(
values={
"webserver": {
"resources": {
"limits": {"cpu": "200m", 'memory': "128Mi"},
"requests": {"cpu": "300m", 'memory': "169Mi"},
}
},
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert "128Mi" == jmespath.search("spec.template.spec.containers[0].resources.limits.memory", docs[0])
assert "169Mi" == jmespath.search(
"spec.template.spec.containers[0].resources.requests.memory", docs[0]
)
assert "300m" == jmespath.search("spec.template.spec.containers[0].resources.requests.cpu", docs[0])
def test_webserver_resources_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources", docs[0]) == {}
@parameterized.expand(
[
("2.0.2", {"type": "RollingUpdate", "rollingUpdate": {"maxSurge": 1, "maxUnavailable": 0}}),
("1.10.14", {"type": "Recreate"}),
("1.9.0", {"type": "Recreate"}),
("2.1.0", {"type": "RollingUpdate", "rollingUpdate": {"maxSurge": 1, "maxUnavailable": 0}}),
],
)
def test_default_update_strategy(self, airflow_version, expected_strategy):
docs = render_chart(
values={"airflowVersion": airflow_version},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert jmespath.search("spec.strategy", docs[0]) == expected_strategy
def test_update_strategy(self):
expected_strategy = {"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": 1}}
docs = render_chart(
values={"webserver": {"strategy": expected_strategy}},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert jmespath.search("spec.strategy", docs[0]) == expected_strategy
def test_no_airflow_local_settings_by_default(self):
docs = render_chart(show_only=["templates/webserver/webserver-deployment.yaml"])
volume_mounts = jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": "# Well hello!"},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
class WebserverServiceTest(unittest.TestCase):
def test_default_service(self):
docs = render_chart(
show_only=["templates/webserver/webserver-service.yaml"],
)
assert "RELEASE-NAME-webserver" == jmespath.search("metadata.name", docs[0])
assert jmespath.search("metadata.annotations", docs[0]) is None
assert {"tier": "airflow", "component": "webserver", "release": "RELEASE-NAME"} == jmespath.search(
"spec.selector", docs[0]
)
assert "ClusterIP" == jmespath.search("spec.type", docs[0])
assert {"name": "airflow-ui", "protocol": "TCP", "port": 8080} in jmespath.search(
"spec.ports", docs[0]
)
def test_overrides(self):
docs = render_chart(
values={
"ports": {"airflowUI": 9000},
"webserver": {
"service": {
"type": "LoadBalancer",
"loadBalancerIP": "127.0.0.1",
"annotations": {"foo": "bar"},
}
},
},
show_only=["templates/webserver/webserver-service.yaml"],
)
assert {"foo": "bar"} == jmespath.search("metadata.annotations", docs[0])
assert "LoadBalancer" == jmespath.search("spec.type", docs[0])
assert {"name": "airflow-ui", "protocol": "TCP", "port": 9000} in jmespath.search(
"spec.ports", docs[0]
)
assert "127.0.0.1" == jmespath.search("spec.loadBalancerIP", docs[0])
class WebserverConfigmapTest(unittest.TestCase):
def test_no_webserver_config_configmap_by_default(self):
docs = render_chart(show_only=["templates/configmaps/webserver-configmap.yaml"])
assert 0 == len(docs)
def test_webserver_config_configmap(self):
docs = render_chart(
values={"webserver": {"webserverConfig": "CSRF_ENABLED = True # {{ .Release.Name }}"}},
show_only=["templates/configmaps/webserver-configmap.yaml"],
)
assert "ConfigMap" == docs[0]["kind"]
assert "RELEASE-NAME-webserver-config" == jmespath.search("metadata.name", docs[0])
assert (
"CSRF_ENABLED = True # RELEASE-NAME"
== jmespath.search('data."webserver_config.py"', docs[0]).strip()
)
| 39.732591
| 110
| 0.562465
|
ce3a540e59fb3520d7ab6d3c56da5ec56fd1150a
| 331
|
py
|
Python
|
third_party/conan/recipes/llvm_x86_utils/conanfile.py
|
trobol/orbit
|
62a206c34b1308e0d56b91f695f39ba8879b713c
|
[
"BSD-2-Clause"
] | 1
|
2021-04-15T23:59:38.000Z
|
2021-04-15T23:59:38.000Z
|
third_party/conan/recipes/llvm_x86_utils/conanfile.py
|
idfoxdale/orbit
|
c6525a14e65b1de57028eaca0ab633265aedf348
|
[
"BSD-2-Clause"
] | null | null | null |
third_party/conan/recipes/llvm_x86_utils/conanfile.py
|
idfoxdale/orbit
|
c6525a14e65b1de57028eaca0ab633265aedf348
|
[
"BSD-2-Clause"
] | 1
|
2020-07-14T13:16:03.000Z
|
2020-07-14T13:16:03.000Z
|
from conans import python_requires
common = python_requires('llvm-common/0.0.3@orbitdeps/stable')
class LLVMX86Utils(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'llvm_x86_utils'
llvm_component = 'llvm'
llvm_module = 'X86Utils'
llvm_requires = ['llvm_headers', 'llvm_support']
| 30.090909
| 62
| 0.752266
|
8687d5f9b4615e823cf7f531b786cae1b9483af1
| 3,564
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/arthrobacterarilaitensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/arthrobacterarilaitensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/arthrobacterarilaitensis.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Arthrobacter arilaitensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ArthrobacterArilaitensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Arthrobacter arilaitensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Arthrobacter arilaitensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ArthrobacterArilaitensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33
| 223
| 0.679012
|
ec137a592b8d14e5cc291c592ede979337782ddf
| 17,578
|
py
|
Python
|
sw/3rd_party/VTK-7.1.0/Common/DataModel/Testing/Python/LineIntersectQuadraticCells.py
|
esean/stl_voro_fill
|
c569a4019ff80afbf85482c7193711ea85a7cafb
|
[
"MIT"
] | 4
|
2019-05-30T01:52:12.000Z
|
2021-09-29T21:12:13.000Z
|
graphics/VTK-7.0.0/Common/DataModel/Testing/Python/LineIntersectQuadraticCells.py
|
hlzz/dotfiles
|
0591f71230c919c827ba569099eb3b75897e163e
|
[
"BSD-3-Clause"
] | null | null | null |
graphics/VTK-7.0.0/Common/DataModel/Testing/Python/LineIntersectQuadraticCells.py
|
hlzz/dotfiles
|
0591f71230c919c827ba569099eb3b75897e163e
|
[
"BSD-3-Clause"
] | 2
|
2019-08-30T23:36:13.000Z
|
2019-11-08T16:52:01.000Z
|
#!/usr/bin/env python
import sys
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Prevent .pyc files being created.
# Stops the vtk source being polluted
# by .pyc files.
sys.dont_write_bytecode = True
import backdrop
# Contour every quadratic cell type
# Create a scene with one of each cell type.
# QuadraticEdge
edgePoints = vtk.vtkPoints()
edgePoints.SetNumberOfPoints(3)
edgePoints.InsertPoint(0, 0, 0, 0)
edgePoints.InsertPoint(1, 1.0, 0, 0)
edgePoints.InsertPoint(2, 0.5, 0.25, 0)
edgeScalars = vtk.vtkFloatArray()
edgeScalars.SetNumberOfTuples(3)
edgeScalars.InsertValue(0, 0.0)
edgeScalars.InsertValue(1, 0.0)
edgeScalars.InsertValue(2, 0.9)
aEdge = vtk.vtkQuadraticEdge()
aEdge.GetPointIds().SetId(0, 0)
aEdge.GetPointIds().SetId(1, 1)
aEdge.GetPointIds().SetId(2, 2)
aEdgeGrid = vtk.vtkUnstructuredGrid()
aEdgeGrid.Allocate(1, 1)
aEdgeGrid.InsertNextCell(aEdge.GetCellType(), aEdge.GetPointIds())
aEdgeGrid.SetPoints(edgePoints)
aEdgeGrid.GetPointData().SetScalars(edgeScalars)
aEdgeMapper = vtk.vtkDataSetMapper()
aEdgeMapper.SetInputData(aEdgeGrid)
aEdgeMapper.ScalarVisibilityOff()
aEdgeActor = vtk.vtkActor()
aEdgeActor.SetMapper(aEdgeMapper)
aEdgeActor.GetProperty().SetRepresentationToWireframe()
aEdgeActor.GetProperty().SetAmbient(1.0)
# Quadratic triangle
triPoints = vtk.vtkPoints()
triPoints.SetNumberOfPoints(6)
triPoints.InsertPoint(0, 0.0, 0.0, 0.0)
triPoints.InsertPoint(1, 1.0, 0.0, 0.0)
triPoints.InsertPoint(2, 0.5, 0.8, 0.0)
triPoints.InsertPoint(3, 0.5, 0.0, 0.0)
triPoints.InsertPoint(4, 0.75, 0.4, 0.0)
triPoints.InsertPoint(5, 0.25, 0.4, 0.0)
triScalars = vtk.vtkFloatArray()
triScalars.SetNumberOfTuples(6)
triScalars.InsertValue(0, 0.0)
triScalars.InsertValue(1, 0.0)
triScalars.InsertValue(2, 0.0)
triScalars.InsertValue(3, 1.0)
triScalars.InsertValue(4, 0.0)
triScalars.InsertValue(5, 0.0)
aTri = vtk.vtkQuadraticTriangle()
aTri.GetPointIds().SetId(0, 0)
aTri.GetPointIds().SetId(1, 1)
aTri.GetPointIds().SetId(2, 2)
aTri.GetPointIds().SetId(3, 3)
aTri.GetPointIds().SetId(4, 4)
aTri.GetPointIds().SetId(5, 5)
aTriGrid = vtk.vtkUnstructuredGrid()
aTriGrid.Allocate(1, 1)
aTriGrid.InsertNextCell(aTri.GetCellType(), aTri.GetPointIds())
aTriGrid.SetPoints(triPoints)
aTriGrid.GetPointData().SetScalars(triScalars)
aTriMapper = vtk.vtkDataSetMapper()
aTriMapper.SetInputData(aTriGrid)
aTriMapper.ScalarVisibilityOff()
aTriActor = vtk.vtkActor()
aTriActor.SetMapper(aTriMapper)
aTriActor.GetProperty().SetRepresentationToWireframe()
aTriActor.GetProperty().SetAmbient(1.0)
# Quadratic quadrilateral
quadPoints = vtk.vtkPoints()
quadPoints.SetNumberOfPoints(8)
quadPoints.InsertPoint(0, 0.0, 0.0, 0.0)
quadPoints.InsertPoint(1, 1.0, 0.0, 0.0)
quadPoints.InsertPoint(2, 1.0, 1.0, 0.0)
quadPoints.InsertPoint(3, 0.0, 1.0, 0.0)
quadPoints.InsertPoint(4, 0.5, 0.0, 0.0)
quadPoints.InsertPoint(5, 1.0, 0.5, 0.0)
quadPoints.InsertPoint(6, 0.5, 1.0, 0.0)
quadPoints.InsertPoint(7, 0.0, 0.5, 0.0)
quadScalars = vtk.vtkFloatArray()
quadScalars.SetNumberOfTuples(8)
quadScalars.InsertValue(0, 0.0)
quadScalars.InsertValue(1, 0.0)
quadScalars.InsertValue(2, 1.0)
quadScalars.InsertValue(3, 1.0)
quadScalars.InsertValue(4, 1.0)
quadScalars.InsertValue(5, 0.0)
quadScalars.InsertValue(6, 0.0)
quadScalars.InsertValue(7, 0.0)
aQuad = vtk.vtkQuadraticQuad()
aQuad.GetPointIds().SetId(0, 0)
aQuad.GetPointIds().SetId(1, 1)
aQuad.GetPointIds().SetId(2, 2)
aQuad.GetPointIds().SetId(3, 3)
aQuad.GetPointIds().SetId(4, 4)
aQuad.GetPointIds().SetId(5, 5)
aQuad.GetPointIds().SetId(6, 6)
aQuad.GetPointIds().SetId(7, 7)
aQuadGrid = vtk.vtkUnstructuredGrid()
aQuadGrid.Allocate(1, 1)
aQuadGrid.InsertNextCell(aQuad.GetCellType(), aQuad.GetPointIds())
aQuadGrid.SetPoints(quadPoints)
aQuadGrid.GetPointData().SetScalars(quadScalars)
aQuadMapper = vtk.vtkDataSetMapper()
aQuadMapper.SetInputData(aQuadGrid)
aQuadMapper.ScalarVisibilityOff()
aQuadActor = vtk.vtkActor()
aQuadActor.SetMapper(aQuadMapper)
aQuadActor.GetProperty().SetRepresentationToWireframe()
aQuadActor.GetProperty().SetAmbient(1.0)
# Quadratic tetrahedron
tetPoints = vtk.vtkPoints()
tetPoints.SetNumberOfPoints(10)
tetPoints.InsertPoint(0, 0.0, 0.0, 0.0)
tetPoints.InsertPoint(1, 1.0, 0.0, 0.0)
tetPoints.InsertPoint(2, 0.5, 0.8, 0.0)
tetPoints.InsertPoint(3, 0.5, 0.4, 1.0)
tetPoints.InsertPoint(4, 0.5, 0.0, 0.0)
tetPoints.InsertPoint(5, 0.75, 0.4, 0.0)
tetPoints.InsertPoint(6, 0.25, 0.4, 0.0)
tetPoints.InsertPoint(7, 0.25, 0.2, 0.5)
tetPoints.InsertPoint(8, 0.75, 0.2, 0.5)
tetPoints.InsertPoint(9, 0.50, 0.6, 0.5)
tetScalars = vtk.vtkFloatArray()
tetScalars.SetNumberOfTuples(10)
tetScalars.InsertValue(0, 1.0)
tetScalars.InsertValue(1, 1.0)
tetScalars.InsertValue(2, 1.0)
tetScalars.InsertValue(3, 1.0)
tetScalars.InsertValue(4, 0.0)
tetScalars.InsertValue(5, 0.0)
tetScalars.InsertValue(6, 0.0)
tetScalars.InsertValue(7, 0.0)
tetScalars.InsertValue(8, 0.0)
tetScalars.InsertValue(9, 0.0)
aTet = vtk.vtkQuadraticTetra()
aTet.GetPointIds().SetId(0, 0)
aTet.GetPointIds().SetId(1, 1)
aTet.GetPointIds().SetId(2, 2)
aTet.GetPointIds().SetId(3, 3)
aTet.GetPointIds().SetId(4, 4)
aTet.GetPointIds().SetId(5, 5)
aTet.GetPointIds().SetId(6, 6)
aTet.GetPointIds().SetId(7, 7)
aTet.GetPointIds().SetId(8, 8)
aTet.GetPointIds().SetId(9, 9)
aTetGrid = vtk.vtkUnstructuredGrid()
aTetGrid.Allocate(1, 1)
aTetGrid.InsertNextCell(aTet.GetCellType(), aTet.GetPointIds())
aTetGrid.SetPoints(tetPoints)
aTetGrid.GetPointData().SetScalars(tetScalars)
aTetMapper = vtk.vtkDataSetMapper()
aTetMapper.SetInputData(aTetGrid)
aTetMapper.ScalarVisibilityOff()
aTetActor = vtk.vtkActor()
aTetActor.SetMapper(aTetMapper)
aTetActor.GetProperty().SetRepresentationToWireframe()
aTetActor.GetProperty().SetAmbient(1.0)
# Quadratic hexahedron
hexPoints = vtk.vtkPoints()
hexPoints.SetNumberOfPoints(20)
hexPoints.InsertPoint(0, 0, 0, 0)
hexPoints.InsertPoint(1, 1, 0, 0)
hexPoints.InsertPoint(2, 1, 1, 0)
hexPoints.InsertPoint(3, 0, 1, 0)
hexPoints.InsertPoint(4, 0, 0, 1)
hexPoints.InsertPoint(5, 1, 0, 1)
hexPoints.InsertPoint(6, 1, 1, 1)
hexPoints.InsertPoint(7, 0, 1, 1)
hexPoints.InsertPoint(8, 0.5, 0, 0)
hexPoints.InsertPoint(9, 1, 0.5, 0)
hexPoints.InsertPoint(10, 0.5, 1, 0)
hexPoints.InsertPoint(11, 0, 0.5, 0)
hexPoints.InsertPoint(12, 0.5, 0, 1)
hexPoints.InsertPoint(13, 1, 0.5, 1)
hexPoints.InsertPoint(14, 0.5, 1, 1)
hexPoints.InsertPoint(15, 0, 0.5, 1)
hexPoints.InsertPoint(16, 0, 0, 0.5)
hexPoints.InsertPoint(17, 1, 0, 0.5)
hexPoints.InsertPoint(18, 1, 1, 0.5)
hexPoints.InsertPoint(19, 0, 1, 0.5)
hexScalars = vtk.vtkFloatArray()
hexScalars.SetNumberOfTuples(20)
hexScalars.InsertValue(0, 1.0)
hexScalars.InsertValue(1, 1.0)
hexScalars.InsertValue(2, 1.0)
hexScalars.InsertValue(3, 1.0)
hexScalars.InsertValue(4, 1.0)
hexScalars.InsertValue(5, 1.0)
hexScalars.InsertValue(6, 1.0)
hexScalars.InsertValue(7, 1.0)
hexScalars.InsertValue(8, 0.0)
hexScalars.InsertValue(9, 0.0)
hexScalars.InsertValue(10, 0.0)
hexScalars.InsertValue(11, 0.0)
hexScalars.InsertValue(12, 0.0)
hexScalars.InsertValue(13, 0.0)
hexScalars.InsertValue(14, 0.0)
hexScalars.InsertValue(15, 0.0)
hexScalars.InsertValue(16, 0.0)
hexScalars.InsertValue(17, 0.0)
hexScalars.InsertValue(18, 0.0)
hexScalars.InsertValue(19, 0.0)
aHex = vtk.vtkQuadraticHexahedron()
aHex.GetPointIds().SetId(0, 0)
aHex.GetPointIds().SetId(1, 1)
aHex.GetPointIds().SetId(2, 2)
aHex.GetPointIds().SetId(3, 3)
aHex.GetPointIds().SetId(4, 4)
aHex.GetPointIds().SetId(5, 5)
aHex.GetPointIds().SetId(6, 6)
aHex.GetPointIds().SetId(7, 7)
aHex.GetPointIds().SetId(8, 8)
aHex.GetPointIds().SetId(9, 9)
aHex.GetPointIds().SetId(10, 10)
aHex.GetPointIds().SetId(11, 11)
aHex.GetPointIds().SetId(12, 12)
aHex.GetPointIds().SetId(13, 13)
aHex.GetPointIds().SetId(14, 14)
aHex.GetPointIds().SetId(15, 15)
aHex.GetPointIds().SetId(16, 16)
aHex.GetPointIds().SetId(17, 17)
aHex.GetPointIds().SetId(18, 18)
aHex.GetPointIds().SetId(19, 19)
aHexGrid = vtk.vtkUnstructuredGrid()
aHexGrid.Allocate(1, 1)
aHexGrid.InsertNextCell(aHex.GetCellType(), aHex.GetPointIds())
aHexGrid.SetPoints(hexPoints)
aHexGrid.GetPointData().SetScalars(hexScalars)
aHexMapper = vtk.vtkDataSetMapper()
aHexMapper.SetInputData(aHexGrid)
aHexMapper.ScalarVisibilityOff()
aHexActor = vtk.vtkActor()
aHexActor.SetMapper(aHexMapper)
aHexActor.GetProperty().SetRepresentationToWireframe()
aHexActor.GetProperty().SetAmbient(1.0)
# Quadratic wedge
wedgePoints = vtk.vtkPoints()
wedgePoints.SetNumberOfPoints(15)
wedgePoints.InsertPoint(0, 0, 0, 0)
wedgePoints.InsertPoint(1, 1, 0, 0)
wedgePoints.InsertPoint(2, 0, 1, 0)
wedgePoints.InsertPoint(3, 0, 0, 1)
wedgePoints.InsertPoint(4, 1, 0, 1)
wedgePoints.InsertPoint(5, 0, 1, 1)
wedgePoints.InsertPoint(6, 0.5, 0, 0)
wedgePoints.InsertPoint(7, 0.5, 0.5, 0)
wedgePoints.InsertPoint(8, 0, 0.5, 0)
wedgePoints.InsertPoint(9, 0.5, 0, 1)
wedgePoints.InsertPoint(10, 0.5, 0.5, 1)
wedgePoints.InsertPoint(11, 0, 0.5, 1)
wedgePoints.InsertPoint(12, 0, 0, 0.5)
wedgePoints.InsertPoint(13, 1, 0, 0.5)
wedgePoints.InsertPoint(14, 0, 1, 0.5)
wedgeScalars = vtk.vtkFloatArray()
wedgeScalars.SetNumberOfTuples(15)
wedgeScalars.InsertValue(0, 1.0)
wedgeScalars.InsertValue(1, 1.0)
wedgeScalars.InsertValue(2, 1.0)
wedgeScalars.InsertValue(3, 1.0)
wedgeScalars.InsertValue(4, 1.0)
wedgeScalars.InsertValue(5, 1.0)
wedgeScalars.InsertValue(6, 1.0)
wedgeScalars.InsertValue(7, 1.0)
wedgeScalars.InsertValue(8, 0.0)
wedgeScalars.InsertValue(9, 0.0)
wedgeScalars.InsertValue(10, 0.0)
wedgeScalars.InsertValue(11, 0.0)
wedgeScalars.InsertValue(12, 0.0)
wedgeScalars.InsertValue(13, 0.0)
wedgeScalars.InsertValue(14, 0.0)
aWedge = vtk.vtkQuadraticWedge()
aWedge.GetPointIds().SetId(0, 0)
aWedge.GetPointIds().SetId(1, 1)
aWedge.GetPointIds().SetId(2, 2)
aWedge.GetPointIds().SetId(3, 3)
aWedge.GetPointIds().SetId(4, 4)
aWedge.GetPointIds().SetId(5, 5)
aWedge.GetPointIds().SetId(6, 6)
aWedge.GetPointIds().SetId(7, 7)
aWedge.GetPointIds().SetId(8, 8)
aWedge.GetPointIds().SetId(9, 9)
aWedge.GetPointIds().SetId(10, 10)
aWedge.GetPointIds().SetId(11, 11)
aWedge.GetPointIds().SetId(12, 12)
aWedge.GetPointIds().SetId(13, 13)
aWedge.GetPointIds().SetId(14, 14)
aWedgeGrid = vtk.vtkUnstructuredGrid()
aWedgeGrid.Allocate(1, 1)
aWedgeGrid.InsertNextCell(aWedge.GetCellType(), aWedge.GetPointIds())
aWedgeGrid.SetPoints(wedgePoints)
aWedgeGrid.GetPointData().SetScalars(wedgeScalars)
wedgeContours = vtk.vtkClipDataSet()
wedgeContours.SetInputData(aWedgeGrid)
wedgeContours.SetValue(0.5)
aWedgeContourMapper = vtk.vtkDataSetMapper()
aWedgeContourMapper.SetInputConnection(wedgeContours.GetOutputPort())
aWedgeContourMapper.ScalarVisibilityOff()
aWedgeMapper = vtk.vtkDataSetMapper()
aWedgeMapper.SetInputData(aWedgeGrid)
aWedgeMapper.ScalarVisibilityOff()
aWedgeActor = vtk.vtkActor()
aWedgeActor.SetMapper(aWedgeMapper)
aWedgeActor.GetProperty().SetRepresentationToWireframe()
aWedgeActor.GetProperty().SetAmbient(1.0)
aWedgeContourActor = vtk.vtkActor()
aWedgeContourActor.SetMapper(aWedgeContourMapper)
aWedgeContourActor.GetProperty().SetAmbient(1.0)
# Quadratic pyramid
pyraPoints = vtk.vtkPoints()
pyraPoints.SetNumberOfPoints(13)
pyraPoints.InsertPoint(0, 0, 0, 0)
pyraPoints.InsertPoint(1, 1, 0, 0)
pyraPoints.InsertPoint(2, 1, 1, 0)
pyraPoints.InsertPoint(3, 0, 1, 0)
pyraPoints.InsertPoint(4, 0, 0, 1)
pyraPoints.InsertPoint(5, 0.5, 0, 0)
pyraPoints.InsertPoint(6, 1, 0.5, 0)
pyraPoints.InsertPoint(7, 0.5, 1, 0)
pyraPoints.InsertPoint(8, 0, 0.5, 0)
pyraPoints.InsertPoint(9, 0, 0, 0.5)
pyraPoints.InsertPoint(10, 0.5, 0, 0.5)
pyraPoints.InsertPoint(11, 0.5, 0.5, 0.5)
pyraPoints.InsertPoint(12, 0, 0.5, 0.5)
pyraScalars = vtk.vtkFloatArray()
pyraScalars.SetNumberOfTuples(13)
pyraScalars.InsertValue(0, 1.0)
pyraScalars.InsertValue(1, 1.0)
pyraScalars.InsertValue(2, 1.0)
pyraScalars.InsertValue(3, 1.0)
pyraScalars.InsertValue(4, 1.0)
pyraScalars.InsertValue(5, 1.0)
pyraScalars.InsertValue(6, 1.0)
pyraScalars.InsertValue(7, 1.0)
pyraScalars.InsertValue(8, 0.0)
pyraScalars.InsertValue(9, 0.0)
pyraScalars.InsertValue(10, 0.0)
pyraScalars.InsertValue(11, 0.0)
pyraScalars.InsertValue(12, 0.0)
aPyramid = vtk.vtkQuadraticPyramid()
aPyramid.GetPointIds().SetId(0, 0)
aPyramid.GetPointIds().SetId(1, 1)
aPyramid.GetPointIds().SetId(2, 2)
aPyramid.GetPointIds().SetId(3, 3)
aPyramid.GetPointIds().SetId(4, 4)
aPyramid.GetPointIds().SetId(5, 5)
aPyramid.GetPointIds().SetId(6, 6)
aPyramid.GetPointIds().SetId(7, 7)
aPyramid.GetPointIds().SetId(8, 8)
aPyramid.GetPointIds().SetId(9, 9)
aPyramid.GetPointIds().SetId(10, 10)
aPyramid.GetPointIds().SetId(11, 11)
aPyramid.GetPointIds().SetId(12, 12)
aPyramidGrid = vtk.vtkUnstructuredGrid()
aPyramidGrid.Allocate(1, 1)
aPyramidGrid.InsertNextCell(aPyramid.GetCellType(), aPyramid.GetPointIds())
aPyramidGrid.SetPoints(pyraPoints)
aPyramidGrid.GetPointData().SetScalars(pyraScalars)
pyraContours = vtk.vtkClipDataSet()
pyraContours.SetInputData(aPyramidGrid)
pyraContours.SetValue(0.5)
aPyramidContourMapper = vtk.vtkDataSetMapper()
aPyramidContourMapper.SetInputConnection(pyraContours.GetOutputPort())
aPyramidContourMapper.ScalarVisibilityOff()
aPyramidMapper = vtk.vtkDataSetMapper()
aPyramidMapper.SetInputData(aPyramidGrid)
aPyramidMapper.ScalarVisibilityOff()
aPyramidActor = vtk.vtkActor()
aPyramidActor.SetMapper(aPyramidMapper)
aPyramidActor.GetProperty().SetRepresentationToWireframe()
aPyramidActor.GetProperty().SetAmbient(1.0)
aPyramidContourActor = vtk.vtkActor()
aPyramidContourActor.SetMapper(aPyramidContourMapper)
aPyramidContourActor.GetProperty().SetAmbient(1.0)
# Create the rendering related stuff.
# Since some of our actors are a single vertex, we need to remove all
# cullers so the single vertex actors will render
ren1 = vtk.vtkRenderer()
ren1.GetCullers().RemoveAllItems()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(.1, .2, .3)
renWin.SetSize(400, 200)
# specify properties
ren1.AddActor(aEdgeActor)
ren1.AddActor(aTriActor)
ren1.AddActor(aQuadActor)
ren1.AddActor(aTetActor)
ren1.AddActor(aHexActor)
ren1.AddActor(aWedgeActor)
ren1.AddActor(aPyramidActor)
# places everyone!!
aTriActor.AddPosition(2, 0, 0)
aQuadActor.AddPosition(4, 0, 0)
aTetActor.AddPosition(6, 0, 0)
aHexActor.AddPosition(8, 0, 0)
aWedgeActor.AddPosition(10, 0, 0)
aPyramidActor.AddPosition(12, 0, 0)
[base, back, left] = backdrop.BuildBackdrop(-1, 15, -1, 4, -1, 2, .1)
ren1.AddActor(base)
base.GetProperty().SetDiffuseColor(.2, .2, .2)
ren1.AddActor(left)
left.GetProperty().SetDiffuseColor(.2, .2, .2)
ren1.AddActor(back)
back.GetProperty().SetDiffuseColor(.2, .2, .2)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(2.5)
ren1.ResetCameraClippingRange()
renWin.Render()
# create a little scorecard above each of the cells. These are displayed
# if a ray cast hits the cell, otherwise they are not shown.
pm = vtk.vtkPlaneSource()
pm.SetXResolution(1)
pm.SetYResolution(1)
pmapper = vtk.vtkPolyDataMapper()
pmapper.SetInputConnection(pm.GetOutputPort())
# now try intersecting rays with the cell
cellPicker = vtk.vtkCellPicker()
edgeCheck = vtk.vtkActor()
edgeCheck.SetMapper(pmapper)
edgeCheck.AddPosition(0.5, 2.5, 0)
cellPicker.Pick(87, 71, 0, ren1)
cellId = cellPicker.GetCellId()
if (cellId != -1):
if (pmapper.GetInput().GetCellType(cellId)==aEdge.GetCellType()):
ren1.AddActor(edgeCheck)
triCheck = vtk.vtkActor()
triCheck.SetMapper(pmapper)
triCheck.AddPosition(2.5, 2.5, 0)
cellPicker.Pick(139, 72, 0, ren1)
cellId = cellPicker.GetCellId()
if (cellId != -1):
if (pmapper.GetInput().GetCellType(cellId)==aTri.GetCellType()):
ren1.AddActor(triCheck)
quadCheck = vtk.vtkActor()
quadCheck.SetMapper(pmapper)
quadCheck.AddPosition(4.5, 2.5, 0)
cellPicker.Pick(192, 78, 0, ren1)
cellId = cellPicker.GetCellId()
if (cellId != -1):
if (pmapper.GetInput().GetCellType(cellId)==aQuad.GetCellType()):
ren1.AddActor(quadCheck)
tetCheck = vtk.vtkActor()
tetCheck.SetMapper(pmapper)
tetCheck.AddPosition(6.5, 2.5, 0)
cellPicker.Pick(233, 70, 0, ren1)
cellId = cellPicker.GetCellId()
if (cellId != -1):
if (pmapper.GetInput().GetCellType(cellId)==aTet.GetCellType()):
ren1.AddActor(tetCheck)
hexCheck = vtk.vtkActor()
hexCheck.SetMapper(pmapper)
hexCheck.AddPosition(8.5, 2.5, 0)
cellPicker.Pick(287, 80, 0, ren1)
cellId = cellPicker.GetCellId()
if (cellId != -1):
if (pmapper.GetInput().GetCellType(cellId)==aHex.GetCellType()):
ren1.AddActor(hexCheck)
wedgeCheck = vtk.vtkActor()
wedgeCheck.SetMapper(pmapper)
wedgeCheck.AddPosition(10.5, 2.5, 0)
cellPicker.Pick(287, 80, 0, ren1)
cellId = cellPicker.GetCellId()
if (cellId != -1):
if (pmapper.GetInput().GetCellType(cellId)==aWedge.GetCellType()):
ren1.AddActor(wedgeCheck)
pyraCheck = vtk.vtkActor()
pyraCheck.SetMapper(pmapper)
pyraCheck.AddPosition(12.5, 2.5, 0)
cellPicker.Pick(287, 80, 0, ren1)
cellId = cellPicker.GetCellId()
if (cellId != -1):
if (pmapper.GetInput().GetCellType(cellId)==aPyramid.GetCellType()):
ren1.AddActor(pyraCheck)
# render the image
#
iren.Initialize()
#iren.Start()
| 34.399217
| 76
| 0.745477
|
5726c9828590dbcc55b7716c8be02822d8975d0e
| 5,080
|
py
|
Python
|
selfdrive/car/volkswagen/carcontroller.py
|
snedelkoski/openpilot
|
3e945fb1fb8562949f801ea316e66d16ffa5a4ca
|
[
"MIT"
] | 1
|
2022-03-17T07:17:41.000Z
|
2022-03-17T07:17:41.000Z
|
selfdrive/car/volkswagen/carcontroller.py
|
meiling222/openpilot
|
db7b49c71cfc420259d99d3a4a6f0972b4f887e8
|
[
"MIT"
] | 2
|
2022-01-25T22:23:41.000Z
|
2022-01-26T02:57:58.000Z
|
selfdrive/car/volkswagen/carcontroller.py
|
meiling222/openpilot
|
db7b49c71cfc420259d99d3a4a6f0972b4f887e8
|
[
"MIT"
] | null | null | null |
from cereal import car
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.volkswagen import volkswagencan
from selfdrive.car.volkswagen.values import DBC_FILES, CANBUS, MQB_LDW_MESSAGES, BUTTON_STATES, CarControllerParams as P
from opendbc.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
class CarController():
def __init__(self, dbc_name, CP, VM):
self.apply_steer_last = 0
self.packer_pt = CANPacker(DBC_FILES.mqb)
self.hcaSameTorqueCount = 0
self.hcaEnabledFrameCount = 0
self.graButtonStatesToSend = None
self.graMsgSentCount = 0
self.graMsgStartFramePrev = 0
self.graMsgBusCounterPrev = 0
self.steer_rate_limited = False
def update(self, c, CS, frame, ext_bus, actuators, visual_alert, left_lane_visible, right_lane_visible, left_lane_depart, right_lane_depart):
""" Controls thread """
can_sends = []
# **** Steering Controls ************************************************ #
if frame % P.HCA_STEP == 0:
# Logic to avoid HCA state 4 "refused":
# * Don't steer unless HCA is in state 3 "ready" or 5 "active"
# * Don't steer at standstill
# * Don't send > 3.00 Newton-meters torque
# * Don't send the same torque for > 6 seconds
# * Don't send uninterrupted steering for > 360 seconds
# One frame of HCA disabled is enough to reset the timer, without zeroing the
# torque value. Do that anytime we happen to have 0 torque, or failing that,
# when exceeding ~1/3 the 360 second timer.
if c.latActive:
new_steer = int(round(actuators.steer * P.STEER_MAX))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, P)
self.steer_rate_limited = new_steer != apply_steer
if apply_steer == 0:
hcaEnabled = False
self.hcaEnabledFrameCount = 0
else:
self.hcaEnabledFrameCount += 1
if self.hcaEnabledFrameCount >= 118 * (100 / P.HCA_STEP): # 118s
hcaEnabled = False
self.hcaEnabledFrameCount = 0
else:
hcaEnabled = True
if self.apply_steer_last == apply_steer:
self.hcaSameTorqueCount += 1
if self.hcaSameTorqueCount > 1.9 * (100 / P.HCA_STEP): # 1.9s
apply_steer -= (1, -1)[apply_steer < 0]
self.hcaSameTorqueCount = 0
else:
self.hcaSameTorqueCount = 0
else:
hcaEnabled = False
apply_steer = 0
self.apply_steer_last = apply_steer
idx = (frame / P.HCA_STEP) % 16
can_sends.append(volkswagencan.create_mqb_steering_control(self.packer_pt, CANBUS.pt, apply_steer,
idx, hcaEnabled))
# **** HUD Controls ***************************************************** #
if frame % P.LDW_STEP == 0:
if visual_alert in (VisualAlert.steerRequired, VisualAlert.ldw):
hud_alert = MQB_LDW_MESSAGES["laneAssistTakeOverSilent"]
else:
hud_alert = MQB_LDW_MESSAGES["none"]
can_sends.append(volkswagencan.create_mqb_hud_control(self.packer_pt, CANBUS.pt, c.enabled,
CS.out.steeringPressed, hud_alert, left_lane_visible,
right_lane_visible, CS.ldw_stock_values,
left_lane_depart, right_lane_depart))
# **** ACC Button Controls ********************************************** #
# FIXME: this entire section is in desperate need of refactoring
if CS.CP.pcmCruise:
if frame > self.graMsgStartFramePrev + P.GRA_VBP_STEP:
if c.cruiseControl.cancel:
# Cancel ACC if it's engaged with OP disengaged.
self.graButtonStatesToSend = BUTTON_STATES.copy()
self.graButtonStatesToSend["cancel"] = True
elif c.enabled and CS.esp_hold_confirmation:
# Blip the Resume button if we're engaged at standstill.
# FIXME: This is a naive implementation, improve with visiond or radar input.
self.graButtonStatesToSend = BUTTON_STATES.copy()
self.graButtonStatesToSend["resumeCruise"] = True
if CS.graMsgBusCounter != self.graMsgBusCounterPrev:
self.graMsgBusCounterPrev = CS.graMsgBusCounter
if self.graButtonStatesToSend is not None:
if self.graMsgSentCount == 0:
self.graMsgStartFramePrev = frame
idx = (CS.graMsgBusCounter + 1) % 16
can_sends.append(volkswagencan.create_mqb_acc_buttons_control(self.packer_pt, ext_bus, self.graButtonStatesToSend, CS, idx))
self.graMsgSentCount += 1
if self.graMsgSentCount >= P.GRA_VBP_COUNT:
self.graButtonStatesToSend = None
self.graMsgSentCount = 0
new_actuators = actuators.copy()
new_actuators.steer = self.apply_steer_last / P.STEER_MAX
return new_actuators, can_sends
| 43.418803
| 143
| 0.624606
|
be2042f7d08397a1b748eb9c3ec07452c9c6d2bd
| 741
|
py
|
Python
|
app/products/products.py
|
prinzz1208/StockManagementBackend
|
db9f3e3c0ddb41d988be6bc91f20b7af28ff99e0
|
[
"MIT"
] | null | null | null |
app/products/products.py
|
prinzz1208/StockManagementBackend
|
db9f3e3c0ddb41d988be6bc91f20b7af28ff99e0
|
[
"MIT"
] | null | null | null |
app/products/products.py
|
prinzz1208/StockManagementBackend
|
db9f3e3c0ddb41d988be6bc91f20b7af28ff99e0
|
[
"MIT"
] | 1
|
2021-09-30T18:01:08.000Z
|
2021-09-30T18:01:08.000Z
|
from app.categories.schemas import AddproductDTO, productDTO
from sqlalchemy.orm.session import Session
from app.database.dbconfig import get_db
from fastapi import APIRouter
from fastapi.param_functions import Depends
from app.products.schemas import AddProductDTO, ProductDTO
from . import repository
router = APIRouter(tags=['products'])
@router.get('/{category_id}')
def get_products(category_id:int, db: Session = Depends(get_db)):
return repository.get_products(category_id=category_id,db=db)
@router.post('/{category_id}',response_model=ProductDTO)
def add_product(category_id:int, add_product_dto: AddProductDTO, db: Session = Depends(get_db)):
return repository.add_product(category_id, add_product_dto=add_product_dto,db=db)
| 43.588235
| 96
| 0.820513
|
771956d5fc0348b4bf57c43c19f23950f9f5b3d5
| 6,926
|
py
|
Python
|
meerk40t/device/ch341/ch341.py
|
aniziorodrigues/meerk40t
|
ca1180b690a2f25748fe04cb7fdbf270520deab9
|
[
"MIT"
] | null | null | null |
meerk40t/device/ch341/ch341.py
|
aniziorodrigues/meerk40t
|
ca1180b690a2f25748fe04cb7fdbf270520deab9
|
[
"MIT"
] | null | null | null |
meerk40t/device/ch341/ch341.py
|
aniziorodrigues/meerk40t
|
ca1180b690a2f25748fe04cb7fdbf270520deab9
|
[
"MIT"
] | null | null | null |
from ...kernel import Module
def plugin(kernel, lifecycle=None):
if lifecycle == "register":
kernel.register("module/ch341", CH341)
class Connection:
"""
A single connection to an CH341 device.
"""
def __init__(self, channel, state):
self.channel = channel
self.state = state
self.index = None
self.chipv = None
self.bus = None
self.address = None
def open(self):
"""
Opens the connection.
"""
pass
def close(self):
"""
Closes the driver for the stated device index.
"""
pass
def write(self, packet):
"""
Writes a 32 byte packet to the device. This is typically \x00 + 30 bytes + CRC
The driver will packetize the \0xA6 writes.
:param packet: 32 bytes of data to be written to the CH341.
:return:
"""
pass
def write_addr(self, packet):
"""
Writes an address byte packet to the device. This is typically 1 byte
The driver will packetize the \0xA7 writes.
:param packet: 1 byte of data to be written to the CH341.
:return:
"""
pass
def get_status(self):
"""
Gets the status bytes from the CH341. This is usually 255 for the D0-D7 values
And the state flags for the chip signals. Importantly are WAIT which means do not
send data, and ERR which means the data sent was faulty. And PEMP which means the
buffer is empty.
StateBitERR 0x00000100
StateBitPEMP 0x00000200
StateBitINT 0x00000400
StateBitSLCT 0x00000800
StateBitWAIT 0x00002000
StateBitDATAS 0x00004000
StateBitADDRS 0x00008000
StateBitRESET 0x00010000
StateBitWRITE 0x00020000
StateBitSCL 0x00400000
StateBitSDA 0x00800000
:return:
"""
raise NotImplementedError
def get_chip_version(self):
"""
Gets the version of the CH341 chip being used.
:return: version. Eg. 48.
"""
raise NotImplementedError
class Handler:
"""
Handlers provide an implementation of a particular backend tasked with providing connections.
"""
def __init__(self, channel, status):
self.channel = channel
self.status = status
def connect(self, driver_index=0, chipv=-1, bus=-1, address=-1):
pass
class CH341(Module, Handler):
"""
Generic CH341 Module performs the interactions between the requested operations and several delegated backend ch341
drivers. This permits interfacing with LibUsb, Windll or Mock Ch341 backends. In use-agnostic fashion, this should
be valid and acceptable for any CH341 interactions. CH341 chip interfacing is required for Lhystudios Controllers,
Moshiboard Controllers, and other interactions such as a plugin that uses addition CH341 devices.
"""
def __init__(self, *args, **kwargs):
Module.__init__(self, *args, **kwargs)
if "log" in kwargs:
channel = kwargs["log"]
if isinstance(channel, str):
channel = self.context.channel(channel, buffer_size=500)
else:
channel = self.context.channel("ch341/usb", buffer_size=500)
Handler.__init__(self, channel, self._state_change)
def connect(self, driver_index=-1, chipv=-1, bus=-1, address=-1, mock=False):
"""
Requests and returns an available connection. The connection object itself has open() and close() functions and
provides any information about the connection if available. If the connection is not opened, no resources are
reserved.
"""
_ = self.channel._
if mock:
return self._connect_mock(driver_index, chipv, bus, address)
handlers = []
try:
from .libusb import Handler as LibUsbHandler
handlers.append(LibUsbHandler(channel=self.channel, status=self.status))
except ImportError:
self.channel(_("PyUsb is not installed. Skipping."))
try:
from .windll import Handler as WinHandler
handlers.append(WinHandler(channel=self.channel, status=self.status))
except ImportError:
self.channel(_("No Windll interfacing. Skipping."))
if driver_index != -1: # Match one specific index.
for driver_handler in handlers:
try:
return self._connect_attempt(
driver_handler, driver_index, chipv, bus, address
)
except ConnectionRefusedError:
pass
else:
for i in range(16):
for driver_handler in handlers:
try:
connection = self._connect_attempt(
driver_handler, i, chipv, bus, address
)
return connection
except ConnectionRefusedError:
pass
except PermissionError:
return # OS denied permissions, no point checking anything else.
def _state_change(self, state_value):
self.context.signal("pipe;state", state_value)
def _connect_attempt(self, handler, driver_index=-1, chipv=-1, bus=-1, address=-1):
_ = self.channel._
connection = handler.connect(
driver_index=driver_index, chipv=chipv, bus=bus, address=address
)
try:
chip_version = connection.get_chip_version()
except AttributeError:
return connection
self.channel(_("CH341 Chip Version: %d") % chip_version)
self.context.signal("pipe;index", connection.index)
self.context.signal("pipe;chipv", chip_version)
self.context.signal("pipe;bus", connection.bus)
self.context.signal("pipe;address", connection.address)
self.channel(_("Driver Detected: %s") % connection.driver_name)
self._state_change("STATE_CONNECTED")
self.channel(_("Device Connected.\n"))
return connection
def _connect_mock(self, driver_index=-1, chipv=-1, bus=-1, address=-1):
from .mock import Handler
driver_handler = Handler(channel=self.channel, status=self.status)
if driver_index != -1:
try:
return self._connect_attempt(
driver_handler, driver_index, chipv, bus, address
)
except ConnectionRefusedError:
pass
else:
for i in range(16):
try:
return self._connect_attempt(driver_handler, i, chipv, bus, address)
except ConnectionRefusedError:
pass
| 34.80402
| 119
| 0.594282
|
d85ced9ef535e5311366ca531fe0b8650ece10b5
| 700
|
py
|
Python
|
91. Decode Ways/main.py
|
Competitive-Programmers-Community/LeetCode
|
841fdee805b1a626e9f1cd0e12398d25054638af
|
[
"MIT"
] | 2
|
2019-10-05T09:48:20.000Z
|
2019-10-05T15:40:01.000Z
|
91. Decode Ways/main.py
|
Competitive-Programmers-Community/LeetCode
|
841fdee805b1a626e9f1cd0e12398d25054638af
|
[
"MIT"
] | null | null | null |
91. Decode Ways/main.py
|
Competitive-Programmers-Community/LeetCode
|
841fdee805b1a626e9f1cd0e12398d25054638af
|
[
"MIT"
] | 3
|
2020-09-27T05:48:30.000Z
|
2021-08-13T10:07:08.000Z
|
class Solution:
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if s[0] == '0':
return 0
dp = [0 for i in range(len(s) + 1)]
dp[0] = 1
dp[1] = 1
for i in range(2, len(s)+1):
if (1 <= int(s[i-1]) <= 9) and ((1 <= int(s[i-2:i]) <= 26) and (1 <= int(s[i-2]) <= 2)):
dp[i] = dp[i-1] + dp[i-2]
elif (1 <= int(s[i-1]) <= 9):
dp[i] = dp[i-1]
elif ((1 <= int(s[i-2:i]) <= 26) and (1 <= int(s[i-2]) <= 2)):
dp[i] = dp[i-2]
print(dp)
return dp[-1]
| 25.925926
| 100
| 0.31
|
af27be71dcbbd0e064ccf2c0541f336522cdcc3d
| 7,488
|
py
|
Python
|
heatclient/osc/v1/software_config.py
|
enterstudio/python-heatclient
|
954e475a6a0a12432ec325d7579460fabcf3f40a
|
[
"Apache-2.0"
] | 57
|
2015-01-15T05:36:00.000Z
|
2021-10-04T13:28:31.000Z
|
heatclient/osc/v1/software_config.py
|
enterstudio/python-heatclient
|
954e475a6a0a12432ec325d7579460fabcf3f40a
|
[
"Apache-2.0"
] | null | null | null |
heatclient/osc/v1/software_config.py
|
enterstudio/python-heatclient
|
954e475a6a0a12432ec325d7579460fabcf3f40a
|
[
"Apache-2.0"
] | 57
|
2015-01-06T07:00:01.000Z
|
2021-06-15T05:17:52.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Orchestration v1 software config action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions as exc
from osc_lib import utils
import six
from six.moves.urllib import request
import yaml
from heatclient._i18n import _
from heatclient.common import format_utils
from heatclient.common import template_format
from heatclient.common import utils as heat_utils
from heatclient import exc as heat_exc
class DeleteConfig(command.Command):
"""Delete software configs"""
log = logging.getLogger(__name__ + ".DeleteConfig")
def get_parser(self, prog_name):
parser = super(DeleteConfig, self).get_parser(prog_name)
parser.add_argument(
'config',
metavar='<config>',
nargs='+',
help=_('IDs of the software configs to delete')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
return _delete_config(heat_client, parsed_args)
def _delete_config(heat_client, args):
failure_count = 0
for config_id in args.config:
try:
heat_client.software_configs.delete(
config_id=config_id)
except Exception as e:
if isinstance(e, heat_exc.HTTPNotFound):
print(_('Software config with ID %s not found') % config_id)
failure_count += 1
continue
if failure_count:
raise exc.CommandError(_('Unable to delete %(count)s of the '
'%(total)s software configs.') %
{'count': failure_count,
'total': len(args.config)})
class ListConfig(command.Lister):
"""List software configs"""
log = logging.getLogger(__name__ + ".ListConfig")
def get_parser(self, prog_name):
parser = super(ListConfig, self).get_parser(prog_name)
parser.add_argument(
'--limit',
metavar='<limit>',
help=_('Limit the number of configs returned')
)
parser.add_argument(
'--marker',
metavar='<id>',
help=_('Return configs that appear after the given config ID')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
return _list_config(heat_client, parsed_args)
def _list_config(heat_client, args):
kwargs = {}
if args.limit:
kwargs['limit'] = args.limit
if args.marker:
kwargs['marker'] = args.marker
scs = heat_client.software_configs.list(**kwargs)
columns = ['id', 'name', 'group', 'creation_time']
return (columns, (utils.get_item_properties(s, columns) for s in scs))
class CreateConfig(format_utils.JsonFormat):
"""Create software config"""
log = logging.getLogger(__name__ + ".CreateConfig")
def get_parser(self, prog_name):
parser = super(CreateConfig, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<config-name>',
help=_('Name of the software config to create')
)
parser.add_argument(
'--config-file',
metavar='<config-file>',
help=_('Path to JSON/YAML containing map defining '
'<inputs>, <outputs>, and <options>')
)
parser.add_argument(
'--definition-file',
metavar='<destination-file>',
help=_('Path to software config script/data')
)
parser.add_argument(
'--group',
metavar='<group>',
default='Heat::Ungrouped',
help=_('Group name of tool expected by the software config')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
return _create_config(heat_client, parsed_args)
def _create_config(heat_client, args):
config = {
'group': args.group,
'config': ''
}
defn = {}
if args.definition_file:
defn_url = heat_utils.normalise_file_path_to_url(
args.definition_file)
defn_raw = request.urlopen(defn_url).read() or '{}'
defn = yaml.load(defn_raw, Loader=template_format.yaml_loader)
config['inputs'] = defn.get('inputs', [])
config['outputs'] = defn.get('outputs', [])
config['options'] = defn.get('options', {})
if args.config_file:
config_url = heat_utils.normalise_file_path_to_url(
args.config_file)
config['config'] = request.urlopen(config_url).read()
# build a mini-template with a config resource and validate it
validate_template = {
'heat_template_version': '2013-05-23',
'resources': {
args.name: {
'type': 'OS::Heat::SoftwareConfig',
'properties': config
}
}
}
heat_client.stacks.validate(template=validate_template)
config['name'] = args.name
sc = heat_client.software_configs.create(**config).to_dict()
rows = list(six.itervalues(sc))
columns = list(six.iterkeys(sc))
return columns, rows
class ShowConfig(format_utils.YamlFormat):
"""Show software config details"""
log = logging.getLogger(__name__ + ".ShowConfig")
def get_parser(self, prog_name):
parser = super(ShowConfig, self).get_parser(prog_name)
parser.add_argument(
'config',
metavar='<config>',
help=_('ID of the config')
)
parser.add_argument(
'--config-only',
default=False,
action="store_true",
help=_('Only display the value of the <config> property.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
return _show_config(heat_client, config_id=parsed_args.config,
config_only=parsed_args.config_only)
def _show_config(heat_client, config_id, config_only):
try:
sc = heat_client.software_configs.get(config_id=config_id)
except heat_exc.HTTPNotFound:
raise exc.CommandError(_('Configuration not found: %s') % config_id)
columns = None
rows = None
if config_only:
print(sc.config)
else:
columns = (
'id',
'name',
'group',
'config',
'inputs',
'outputs',
'options',
'creation_time',
)
rows = utils.get_dict_properties(sc.to_dict(), columns)
return columns, rows
| 31.070539
| 77
| 0.614183
|
aa0e38769c7c9ce7f6a6425c97060ea70edbc59a
| 456
|
py
|
Python
|
nocloud/storage-pool/list_storage_pool_by_name.py
|
chong601/NoCloud
|
98fa5fba5220c5f98a7c55974161c2285615ac1f
|
[
"MIT"
] | 14
|
2021-02-24T00:11:01.000Z
|
2021-06-19T09:58:06.000Z
|
nocloud/storage-pool/list_storage_pool_by_name.py
|
chong601/NoCloud
|
98fa5fba5220c5f98a7c55974161c2285615ac1f
|
[
"MIT"
] | 1
|
2021-03-05T00:12:55.000Z
|
2021-03-05T00:17:16.000Z
|
nocloud/storage-pool/list_storage_pool_by_name.py
|
chong601/NoCloud
|
98fa5fba5220c5f98a7c55974161c2285615ac1f
|
[
"MIT"
] | 3
|
2021-02-24T00:23:32.000Z
|
2021-03-02T09:09:33.000Z
|
import libvirt
# Constants
LIBVIRT_URI = 'qemu+ssh://chong601@10.102.0.5/system'
INFO_VOLUME_ACTIVATING = 'Pool "{}" is inactive, temporarily activating pool...'
INFO_VOLUME_DEACTIVATING = 'Deactivating pool "{}"...'
# Parameters
STORAGE_POOL_NAME = 'vm-ubuntu-focal-lxd-1'
# Internal definitions
libvirt_conn = libvirt.open(LIBVIRT_URI)
pool = libvirt_conn.storagePoolLookupByName(STORAGE_POOL_NAME)
print("Pool name found: {}".format(pool.name()))
| 25.333333
| 80
| 0.760965
|
6fd6ed672a3cf7b5b3b8c39d77d87f188c77aae3
| 15,167
|
py
|
Python
|
salt/crypt.py
|
slafs/salt
|
13340355afb15db7fcc1b8393247f7aa10024710
|
[
"Apache-2.0"
] | null | null | null |
salt/crypt.py
|
slafs/salt
|
13340355afb15db7fcc1b8393247f7aa10024710
|
[
"Apache-2.0"
] | null | null | null |
salt/crypt.py
|
slafs/salt
|
13340355afb15db7fcc1b8393247f7aa10024710
|
[
"Apache-2.0"
] | null | null | null |
'''
The crypt module manages all of the cryptography functions for minions and
masters, encrypting and decrypting payloads, preparing messages, and
authenticating peers
'''
# Import python libs
import os
import sys
import hmac
import hashlib
import logging
# Import third party libs
from M2Crypto import RSA
from Crypto.Cipher import AES
try:
import win32api
import win32con
is_windows = True
except ImportError:
is_windows = False
# Import salt utils
import salt.utils
import salt.payload
import salt.utils.verify
from salt.exceptions import AuthenticationError, SaltClientError, SaltReqTimeoutError
log = logging.getLogger(__name__)
def clean_old_key(rsa_path):
'''
Read in an old m2crypto key and save it back in the clear so
pycrypto can handle it
'''
def foo_pass(self, data=''):
return 'foo'
mkey = RSA.load_key(rsa_path, callback=foo_pass)
try:
os.remove(rsa_path)
except (IOError, OSError):
pass
# Set write permission for minion.pem file - reverted after saving the key
if is_windows:
win32api.SetFileAttributes(rsa_path, win32con.FILE_ATTRIBUTE_NORMAL)
try:
mkey.save_key(rsa_path, None)
except IOError:
log.error(
('Failed to update old RSA format for key {0}, future '
'releases may not be able to use this key').format(rsa_path)
)
# Set read-only permission for minion.pem file
if is_windows:
win32api.SetFileAttributes(rsa_path, win32con.FILE_ATTRIBUTE_READONLY)
return mkey
def gen_keys(keydir, keyname, keysize):
'''
Generate a keypair for use with salt
'''
base = os.path.join(keydir, keyname)
priv = '{0}.pem'.format(base)
pub = '{0}.pub'.format(base)
gen = RSA.gen_key(keysize, 1, callback=lambda x,y,z:None)
cumask = os.umask(191)
gen.save_key(priv, None)
os.umask(cumask)
gen.save_pub_key(pub)
os.chmod(priv, 256)
return priv
class MasterKeys(dict):
'''
The Master Keys class is used to manage the public key pair used for
authentication by the master.
'''
def __init__(self, opts):
self.opts = opts
self.pub_path = os.path.join(self.opts['pki_dir'], 'master.pub')
self.rsa_path = os.path.join(self.opts['pki_dir'], 'master.pem')
self.key = self.__get_keys()
self.token = self.__gen_token()
def __get_keys(self):
'''
Returns a key objects for the master
'''
if os.path.exists(self.rsa_path):
try:
key = RSA.load_key(self.rsa_path)
except Exception:
# This is probably an "old key", we need to use m2crypto to
# open it and then save it back without a pass phrase
key = clean_old_key(self.rsa_path)
log.debug('Loaded master key: {0}'.format(self.rsa_path))
else:
log.info('Generating keys: {0}'.format(self.opts['pki_dir']))
gen_keys(self.opts['pki_dir'], 'master', 4096)
key = RSA.load_key(self.rsa_path)
return key
def __gen_token(self):
'''
Generate the authentication token
'''
return self.key.private_encrypt('salty bacon', 5)
def get_pub_str(self):
'''
Return the string representation of the public key
'''
if not os.path.isfile(self.pub_path):
key = self.__get_keys()
key.save_pub_key(self.pub_path)
return salt.utils.fopen(self.pub_path, 'r').read()
class Auth(object):
'''
The Auth class provides the sequence for setting up communication with
the master server from a minion.
'''
def __init__(self, opts):
self.opts = opts
self.token = Crypticle.generate_key_string()
self.serial = salt.payload.Serial(self.opts)
self.pub_path = os.path.join(self.opts['pki_dir'], 'minion.pub')
self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
if 'syndic_master' in self.opts:
self.mpub = 'syndic_master.pub'
elif 'alert_master' in self.opts:
self.mpub = 'monitor_master.pub'
else:
self.mpub = 'minion_master.pub'
def get_keys(self):
'''
Returns a key objects for the minion
'''
# Make sure all key parent directories are accessible
user = self.opts.get('user', 'root')
salt.utils.verify.check_path_traversal(self.opts['pki_dir'], user)
if os.path.exists(self.rsa_path):
try:
key = RSA.load_key(self.rsa_path)
except Exception:
# This is probably an "old key", we need to use m2crypto to
# open it and then save it back without a pass phrase
key = clean_old_key(self.rsa_path)
log.debug('Loaded minion key: {0}'.format(self.rsa_path))
else:
log.info('Generating keys: {0}'.format(self.opts['pki_dir']))
gen_keys(self.opts['pki_dir'], 'minion', 4096)
key = RSA.load_key(self.rsa_path)
return key
def minion_sign_in_payload(self):
'''
Generates the payload used to authenticate with the master
server. This payload consists of the passed in id_ and the ssh
public key to encrypt the AES key sent back form the master.
'''
payload = {}
key = self.get_keys()
tmp_pub = salt.utils.mkstemp()
key.save_pub_key(tmp_pub)
payload['enc'] = 'clear'
payload['load'] = {}
payload['load']['cmd'] = '_auth'
payload['load']['id'] = self.opts['id']
try:
pub = RSA.load_pub_key(os.path.join(self.opts['pki_dir'], self.mpub))
payload['load']['token'] = pub.public_encrypt(self.token, 4)
except Exception:
pass
with salt.utils.fopen(tmp_pub, 'r') as fp_:
payload['load']['pub'] = fp_.read()
os.remove(tmp_pub)
return payload
def decrypt_aes(self, aes):
'''
This function is used to decrypt the aes seed phrase returned from
the master server, the seed phrase is decrypted with the ssh rsa
host key.
Pass in the encrypted aes key.
Returns the decrypted aes seed key, a string
'''
log.debug('Decrypting the current master AES key')
key = self.get_keys()
return key.private_decrypt(aes, 4)
def verify_master(self, master_pub, token):
'''
Takes the master pubkey and compares it to the saved master pubkey,
the token is sign with the master private key and must be
verified successfully to verify that the master has been connected
to. The token must verify as signature of the phrase 'salty bacon'
with the public key.
Returns a bool
'''
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
if os.path.isfile(m_pub_fn) and not self.opts['open_mode']:
local_master_pub = salt.utils.fopen(m_pub_fn).read()
if not master_pub == local_master_pub:
# This is not the last master we connected to
log.error('The master key has changed, the salt master could '
'have been subverted, verify salt master\'s public '
'key')
return False
try:
if token and not self.decrypt_aes(token) == self.token:
log.error('The master failed to decrypt the random minion token')
return False
except Exception:
log.error('The master failed to decrypt the random minion token')
return False
return True
else:
salt.utils.fopen(m_pub_fn, 'w+').write(master_pub)
return True
def sign_in(self):
'''
Send a sign in request to the master, sets the key information and
returns a dict containing the master publish interface to bind to
and the decrypted aes key for transport decryption.
'''
auth = {}
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
try:
self.opts['master_ip'] = salt.utils.dns_check(
self.opts['master'],
True
)
except SaltClientError:
return 'retry'
sreq = salt.payload.SREQ(
self.opts['master_uri'],
self.opts.get('id', '')
)
try:
payload = sreq.send_auto(self.minion_sign_in_payload())
except SaltReqTimeoutError:
return 'retry'
if 'load' in payload:
if 'ret' in payload['load']:
if not payload['load']['ret']:
log.critical(
'The Salt Master has rejected this minion\'s public '
'key!\nTo repair this issue, delete the public key '
'for this minion on the Salt Master and restart this '
'minion.\nOr restart the Salt Master in open mode to '
'clean out the keys. The Salt Minion will now exit.'
)
sys.exit(42)
else:
log.error(
'The Salt Master has cached the public key for this '
'node, this salt minion will wait for {0} seconds '
'before attempting to re-authenticate'.format(
self.opts['acceptance_wait_time']
)
)
return 'retry'
if not self.verify_master(payload['pub_key'], payload['token']):
log.critical(
'The Salt Master server\'s public key did not authenticate!\n'
'The master may need to be updated if it is a version of Salt '
'lower than 0.10.4, or\n'
'If you are confident that you are connecting to a valid Salt '
'Master, then remove the master public key and restart the '
'Salt Minion.\nThe master public key can be found '
'at:\n{0}'.format(m_pub_fn)
)
sys.exit(42)
if self.opts.get('master_finger', False):
if salt.utils.pem_finger(m_pub_fn) != self.opts['master_finger']:
log.critical((
'The specified fingerprint in the master configuration '
'file:\n{0}\nDoes not match the authenticating master\'s '
'key:\n{1}\nVerify that the configured fingerprint '
'matches the fingerprint of the correct master and that '
'this minion is not subject to a man in the middle attack'
).format(
self.opts['master_finger'],
salt.utils.pem_finger(m_pub_fn)
)
)
sys.exit(42)
auth['aes'] = self.decrypt_aes(payload['aes'])
auth['publish_port'] = payload['publish_port']
return auth
class Crypticle(object):
'''
Authenticated encryption class
Encryption algorithm: AES-CBC
Signing algorithm: HMAC-SHA256
'''
PICKLE_PAD = 'pickle::'
AES_BLOCK_SIZE = 16
SIG_SIZE = hashlib.sha256().digest_size
def __init__(self, opts, key_string, key_size=192):
self.keys = self.extract_keys(key_string, key_size)
self.key_size = key_size
self.serial = salt.payload.Serial(opts)
@classmethod
def generate_key_string(cls, key_size=192):
key = os.urandom(key_size // 8 + cls.SIG_SIZE)
return key.encode('base64').replace('\n', '')
@classmethod
def extract_keys(cls, key_string, key_size):
key = key_string.decode('base64')
assert len(key) == key_size / 8 + cls.SIG_SIZE, 'invalid key'
return key[:-cls.SIG_SIZE], key[-cls.SIG_SIZE:]
def encrypt(self, data):
'''
encrypt data with AES-CBC and sign it with HMAC-SHA256
'''
aes_key, hmac_key = self.keys
pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE
data = data + pad * chr(pad)
iv_bytes = os.urandom(self.AES_BLOCK_SIZE)
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = iv_bytes + cypher.encrypt(data)
sig = hmac.new(hmac_key, data, hashlib.sha256).digest()
return data + sig
def decrypt(self, data):
'''
verify HMAC-SHA256 signature and decrypt data with AES-CBC
'''
aes_key, hmac_key = self.keys
sig = data[-self.SIG_SIZE:]
data = data[:-self.SIG_SIZE]
mac_bytes = hmac.new(hmac_key, data, hashlib.sha256).digest()
if len(mac_bytes) != len(sig):
log.warning('Failed to authenticate message')
raise AuthenticationError('message authentication failed')
result = 0
for x, y in zip(mac_bytes, sig):
result |= ord(x) ^ ord(y)
if result != 0:
log.warning('Failed to authenticate message')
raise AuthenticationError('message authentication failed')
iv_bytes = data[:self.AES_BLOCK_SIZE]
data = data[self.AES_BLOCK_SIZE:]
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = cypher.decrypt(data)
return data[:-ord(data[-1])]
def dumps(self, obj):
'''
Serialize and encrypt a python object
'''
return self.encrypt(self.PICKLE_PAD + self.serial.dumps(obj))
def loads(self, data):
'''
Decrypt and un-serialize a python object
'''
data = self.decrypt(data)
# simple integrity check to verify that we got meaningful data
if not data.startswith(self.PICKLE_PAD):
return {}
return self.serial.loads(data[len(self.PICKLE_PAD):])
class SAuth(Auth):
'''
Set up an object to maintain the standalone authentication session
with the salt master
'''
def __init__(self, opts):
super(SAuth, self).__init__(opts)
self.crypticle = self.__authenticate()
def __authenticate(self):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master aes key.
'''
creds = self.sign_in()
if creds == 'retry':
log.error('Failed to authenticate with the master, verify this'\
+ ' minion\'s public key has been accepted on the salt master')
sys.exit(2)
return Crypticle(self.opts, creds['aes'])
def gen_token(self, clear_tok):
'''
Encrypt a string with the minion private key to verify identity
with the master.
'''
return self.get_keys().private_encrypt(clear_tok, 5)
| 36.459135
| 85
| 0.58291
|
1616437c78665e6d42eb002ea9fbc5edc07ff33d
| 191
|
py
|
Python
|
ch04/filter.regular.py
|
kxen42/Learn-Python-Programming-Third-Edition
|
851ddc5e6094fadd44f31a9ad1d3876456b04372
|
[
"MIT"
] | 19
|
2021-11-05T22:54:09.000Z
|
2022-03-29T15:03:47.000Z
|
ch04/filter.regular.py
|
kxen42/Learn-Python-Programming-Third-Edition
|
851ddc5e6094fadd44f31a9ad1d3876456b04372
|
[
"MIT"
] | null | null | null |
ch04/filter.regular.py
|
kxen42/Learn-Python-Programming-Third-Edition
|
851ddc5e6094fadd44f31a9ad1d3876456b04372
|
[
"MIT"
] | 26
|
2021-11-12T17:04:50.000Z
|
2022-03-29T01:10:35.000Z
|
# filter.regular.py
def is_multiple_of_five(n):
return not n % 5
def get_multiples_of_five(n):
return list(filter(is_multiple_of_five, range(n)))
print(get_multiples_of_five(50))
| 17.363636
| 54
| 0.748691
|
c4d6343021539078383822b733ff9c84c80b6df7
| 105,279
|
py
|
Python
|
autotest/ogr/ogr_elasticsearch.py
|
jcphill/gdal
|
98c9ecf8513325bb69888d368bd9a0d54b79e72b
|
[
"Apache-2.0"
] | 2
|
2022-03-24T00:53:48.000Z
|
2022-03-26T02:52:52.000Z
|
autotest/ogr/ogr_elasticsearch.py
|
jcphill/gdal
|
98c9ecf8513325bb69888d368bd9a0d54b79e72b
|
[
"Apache-2.0"
] | 1
|
2022-02-26T19:07:05.000Z
|
2022-02-27T00:11:45.000Z
|
autotest/ogr/ogr_elasticsearch.py
|
jcphill/gdal
|
98c9ecf8513325bb69888d368bd9a0d54b79e72b
|
[
"Apache-2.0"
] | 1
|
2022-01-13T19:15:30.000Z
|
2022-01-13T19:15:30.000Z
|
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Elasticsearch driver testing (with fake server)
# Author: Even Rouault <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2012, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import json
import time
import ogrtest
import gdaltest
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import pytest
pytestmark = pytest.mark.require_driver('Elasticsearch')
###############################################################################
# Cleanup
def ogr_elasticsearch_delete_files():
for subdir in ['_search', '_cat', 'no_srs', 'non_standard_geometries', 'other_srs', 'a_layer']:
lst = gdal.ReadDir('/vsimem/fakeelasticsearch/' + subdir)
if lst:
for f in lst:
gdal.Unlink('/vsimem/fakeelasticsearch/' + subdir + '/' + f)
lst = gdal.ReadDir('/vsimem/fakeelasticsearch/' +
subdir + '/FeatureCollection')
if lst:
for f in lst:
gdal.Unlink('/vsimem/fakeelasticsearch/' +
subdir + '/FeatureCollection/' + f)
lst = gdal.ReadDir('/vsimem/fakeelasticsearch')
if lst:
for f in lst:
gdal.Unlink('/vsimem/fakeelasticsearch/' + f)
gdal.Unlink('/vsimem/fakeelasticsearch')
gdal.Unlink('/vsimem/fakeelasticsearch&USERPWD=user:pwd')
###############################################################################
@pytest.fixture(autouse=True, scope='module')
def startup_and_cleanup():
ogrtest.srs_wgs84 = osr.SpatialReference()
ogrtest.srs_wgs84.SetFromUserInput('WGS84')
ogrtest.elasticsearch_drv = ogr.GetDriverByName('Elasticsearch')
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
yield
ogr_elasticsearch_delete_files()
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', None)
###############################################################################
# Test writing into an nonexistent Elasticsearch datastore.
def test_ogr_elasticsearch_nonexistent_server():
with gdaltest.error_handler():
ds = ogrtest.elasticsearch_drv.CreateDataSource(
'/vsimem/nonexistent_host')
assert ds is None, 'managed to open nonexistent Elasticsearch datastore.'
with gdaltest.error_handler():
ds = ogrtest.elasticsearch_drv.Open('ES:/vsimem/nonexistent_host')
assert ds is None, 'managed to open nonexistent Elasticsearch datastore.'
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch", """{}""")
with gdaltest.error_handler():
ds = ogrtest.elasticsearch_drv.Open('ES:/vsimem/fakeelasticsearch')
assert ds is None, 'managed to open invalid Elasticsearch datastore.'
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch", """{"version":null}""")
with gdaltest.error_handler():
ds = ogrtest.elasticsearch_drv.Open('ES:/vsimem/fakeelasticsearch')
assert ds is None, 'managed to open invalid Elasticsearch datastore.'
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch", """{"version":{}}""")
with gdaltest.error_handler():
ds = ogrtest.elasticsearch_drv.Open('ES:/vsimem/fakeelasticsearch')
assert ds is None, 'managed to open invalid Elasticsearch datastore.'
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":null}}""")
with gdaltest.error_handler():
ds = ogrtest.elasticsearch_drv.Open('ES:/vsimem/fakeelasticsearch')
assert ds is None, 'managed to open invalid Elasticsearch datastore.'
###############################################################################
# Simple test
def test_ogr_elasticsearch_1():
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"2.0.0"}}""")
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
assert ds is not None, 'did not managed to open Elasticsearch datastore'
assert ds.TestCapability(ogr.ODsCCreateLayer) != 0
assert ds.TestCapability(ogr.ODsCDeleteLayer) != 0
assert ds.TestCapability(ogr.ODsCCreateGeomFieldAfterCreateLayer) != 0
# Failed index creation
with gdaltest.error_handler():
lyr = ds.CreateLayer('foo', srs=ogrtest.srs_wgs84, options=['FID='])
assert lyr is None
assert gdal.GetLastErrorType() == gdal.CE_Failure
gdal.ErrorReset()
# Successful index creation
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo&CUSTOMREQUEST=PUT', '{}')
lyr = ds.CreateLayer('foo', srs=ogrtest.srs_wgs84, options=['FID='])
assert lyr is not None
assert gdal.GetLastErrorType() == gdal.CE_None
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo/_mapping/FeatureCollection&POSTFIELDS'
'={ "FeatureCollection": { "properties": { "type": '
'{ "type": "string" }, "properties": { } } } }', '{}')
# OVERWRITE an nonexistent layer.
lyr = ds.CreateLayer('foo', geom_type=ogr.wkbNone,
options=['OVERWRITE=TRUE', 'FID='])
assert gdal.GetLastErrorType() == gdal.CE_None
# Simulate failed overwrite
gdal.FileFromMemBuffer('/vsimem/fakeelasticsearch/foo',
'{"foo":{"mappings":{"FeatureCollection":{}}}}')
with gdaltest.error_handler():
lyr = ds.CreateLayer('foo', geom_type=ogr.wkbNone,
options=['OVERWRITE=TRUE'])
assert gdal.GetLastErrorType() == gdal.CE_Failure
gdal.ErrorReset()
# Successful overwrite
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo&CUSTOMREQUEST=DELETE', '{}')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo/FeatureCollection&POSTFIELDS={ }', '{}')
lyr = ds.CreateLayer('foo', geom_type=ogr.wkbNone, options=[
'OVERWRITE=TRUE', 'BULK_INSERT=NO', 'FID='])
assert gdal.GetLastErrorType() == gdal.CE_None
assert lyr.TestCapability(ogr.OLCFastFeatureCount) != 0
assert lyr.TestCapability(ogr.OLCStringsAsUTF8) != 0
assert lyr.TestCapability(ogr.OLCSequentialWrite) != 0
assert lyr.TestCapability(ogr.OLCCreateField) != 0
assert lyr.TestCapability(ogr.OLCCreateGeomField) != 0
feat = ogr.Feature(lyr.GetLayerDefn())
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo/FeatureCollection&POSTFIELDS={ "properties": { } }', '{}')
ret = lyr.CreateFeature(feat)
assert ret == 0
feat = None
gdal.FileFromMemBuffer('/vsimem/fakeelasticsearch/foo&CUSTOMREQUEST=PUT',
'{"error":"IndexAlreadyExistsException[[foo] already exists]","status":400}')
with gdaltest.error_handler():
lyr = ds.CreateLayer('foo', srs=ogrtest.srs_wgs84)
assert gdal.GetLastErrorType() == gdal.CE_Failure
assert lyr is None
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/foo/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { } }, "geometry": { "type": "geo_shape" } } } }""", "")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", '')
ds.DeleteLayer(-1)
ds.DeleteLayer(10)
ret = ds.DeleteLayer(0)
assert ret == 0
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo2&CUSTOMREQUEST=PUT', '{}')
gdal.FileFromMemBuffer('/vsimem/fakeelasticsearch/foo2/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { "str_field": { "type": "string", "index": "not_analyzed" }, "int_field": { "type": "integer", "store": "yes" }, "int64_field": { "type": "long", "index": "no" }, "real_field": { "type": "double" }, "real_field_unset": { "type": "double" }, "boolean_field": { "type": "boolean" }, "strlist_field": { "type": "string" }, "intlist_field": { "type": "integer" }, "int64list_field": { "type": "long" }, "reallist_field": { "type": "double" }, "date_field": { "type": "date", "format": "yyyy\\/MM\\/dd HH:mm:ss.SSSZZ||yyyy\\/MM\\/dd HH:mm:ss.SSS||yyyy\\/MM\\/dd" }, "datetime_field": { "type": "date", "format": "yyyy\\/MM\\/dd HH:mm:ss.SSSZZ||yyyy\\/MM\\/dd HH:mm:ss.SSS||yyyy\\/MM\\/dd" }, "time_field": { "type": "date", "format": "HH:mm:ss.SSS" }, "binary_field": { "type": "binary" } } }, "geometry": { "properties": { "type": { "type": "string" }, "coordinates": { "type": "geo_point" } } } }, "_meta": { "fields": { "strlist_field": "StringList", "intlist_field": "IntegerList", "int64list_field": "Integer64List", "reallist_field": "RealList" } } } }', '{}')
lyr = ds.CreateLayer('foo2', srs=ogrtest.srs_wgs84, geom_type=ogr.wkbPoint,
options=['BULK_INSERT=NO', 'FID=', 'STORED_FIELDS=int_field', 'NOT_ANALYZED_FIELDS=str_field', 'NOT_INDEXED_FIELDS=int64_field'])
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('int_field', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('int64_field', ogr.OFTInteger64))
lyr.CreateField(ogr.FieldDefn('real_field', ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn('real_field_unset', ogr.OFTReal))
fld_defn = ogr.FieldDefn('boolean_field', ogr.OFTInteger)
fld_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(fld_defn)
lyr.CreateField(ogr.FieldDefn('strlist_field', ogr.OFTStringList))
lyr.CreateField(ogr.FieldDefn('intlist_field', ogr.OFTIntegerList))
lyr.CreateField(ogr.FieldDefn('int64list_field', ogr.OFTInteger64List))
lyr.CreateField(ogr.FieldDefn('reallist_field', ogr.OFTRealList))
lyr.CreateField(ogr.FieldDefn('date_field', ogr.OFTDate))
lyr.CreateField(ogr.FieldDefn('datetime_field', ogr.OFTDateTime))
lyr.CreateField(ogr.FieldDefn('time_field', ogr.OFTTime))
lyr.CreateField(ogr.FieldDefn('binary_field', ogr.OFTBinary))
ret = lyr.SyncToDisk()
assert ret == 0
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('str_field', 'a')
feat.SetField('int_field', 1)
feat.SetField('int64_field', 123456789012)
feat.SetField('real_field', 2.34)
feat.SetField('boolean_field', 1)
feat['strlist_field'] = ['a', 'b']
feat['intlist_field'] = [1, 2]
feat['int64list_field'] = [123456789012, 2]
feat['reallist_field'] = [1.23, 4.56]
feat['date_field'] = '2015/08/12'
feat['datetime_field'] = '2015/08/12 12:34:56.789'
feat['time_field'] = '12:34:56.789'
feat.SetFieldBinaryFromHexString('binary_field', '0123465789ABCDEF')
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 1)'))
# Simulate server error
with gdaltest.error_handler():
ret = lyr.CreateFeature(feat)
assert ret != 0
# Success
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo2/FeatureCollection&POSTFIELDS={ "geometry": { "type": "Point", "coordinates": [ 0.0, 1.0 ] }, "type": "Feature", "properties": { "str_field": "a", "int_field": 1, "int64_field": 123456789012, "real_field": 2.34, "boolean_field": true, "strlist_field": [ "a", "b" ], "intlist_field": [ 1, 2 ], "int64list_field": [ 123456789012, 2 ], "reallist_field": [ 1.23, 4.56 ], "date_field": "2015\\/08\\/12", "datetime_field": "2015\\/08\\/12 12:34:56.789", "time_field": "12:34:56.789", "binary_field": "ASNGV4mrze8=" } }', '{ "_id": "my_id" }')
ret = lyr.CreateFeature(feat)
assert ret == 0
assert feat['_id'] == 'my_id'
# DateTime with TZ
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo2/FeatureCollection&POSTFIELDS={ "properties": { "datetime_field": "2015\\/08\\/12 12:34:56.789+03:00" } }', '{}')
feat = ogr.Feature(lyr.GetLayerDefn())
feat['datetime_field'] = '2015/08/12 12:34:56.789+0300'
ret = lyr.CreateFeature(feat)
assert ret == 0
# CreateFeature() with _id set
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo2/FeatureCollection/my_id2&POSTFIELDS={ "properties": { } }', '{}')
feat = ogr.Feature(lyr.GetLayerDefn())
feat['_id'] = 'my_id2'
ret = lyr.CreateFeature(feat)
assert ret == 0
# Failed SetFeature because of missing _id
feat = ogr.Feature(lyr.GetLayerDefn())
with gdaltest.error_handler():
ret = lyr.SetFeature(feat)
assert ret != 0
# Simulate server error
feat['_id'] = 'my_id'
with gdaltest.error_handler():
ret = lyr.SetFeature(feat)
assert ret != 0
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo2/FeatureCollection/my_id&POSTFIELDS={ "properties": { } }', '{}')
ret = lyr.SetFeature(feat)
assert ret == 0
# With explicit GEOM_MAPPING_TYPE=GEO_POINT
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo3&CUSTOMREQUEST=PUT', '{}')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo3/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { } }, "geometry": { "properties": { "type": { "type": "string" }, "coordinates": { "type": "geo_point", "fielddata": { "format": "compressed", "precision": "1m" } } } } }, "_meta": { "fid": "ogc_fid" } } }', '{}')
lyr = ds.CreateLayer('foo3', srs=ogrtest.srs_wgs84, options=[
'GEOM_MAPPING_TYPE=GEO_POINT', 'GEOM_PRECISION=1m', 'BULK_INSERT=NO'])
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo3/FeatureCollection&POSTFIELDS={ "ogc_fid": 1, "geometry": { "type": "Point", "coordinates": [ 0.5, 0.5 ] }, "type": "Feature", "properties": { } }', '{}')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(0 0,1 1)'))
ret = lyr.CreateFeature(feat)
assert ret == 0
feat = None
# Test explicit MAPPING first with error case
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo4&CUSTOMREQUEST=PUT', '{}')
with gdaltest.error_handler():
lyr = ds.CreateLayer('foo4', srs=ogrtest.srs_wgs84, options=[
'MAPPING={ "FeatureCollection": { "properties": {} }}'])
assert lyr is None
# Test successful explicit MAPPING with inline JSon mapping
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo4/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": {} }}', '{}')
lyr = ds.CreateLayer('foo4', srs=ogrtest.srs_wgs84, options=[
'MAPPING={ "FeatureCollection": { "properties": {} }}'])
assert lyr is not None
# Test successful explicit MAPPING with reference to file with mapping
gdal.FileFromMemBuffer(
'/vsimem/map.txt', '{ "FeatureCollection": { "properties": { "foo": { "type": "string" } } }}')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo4/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "foo": { "type": "string" } } }}', '{}')
lyr = ds.CreateLayer('foo4', srs=ogrtest.srs_wgs84,
options=['MAPPING=/vsimem/map.txt'])
gdal.Unlink('/vsimem/map.txt')
assert lyr is not None
# Test successful explicit INDEX_DEFINITION with inline JSon mapping
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo4&CUSTOMREQUEST=PUT&POSTFIELDS={}', '{}')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo4/_mapping/FeatureCollection&POSTFIELDS={}', '{}')
lyr = ds.CreateLayer('foo4', srs=ogrtest.srs_wgs84, options=[
'INDEX_DEFINITION={}', 'MAPPING={}'])
assert lyr is not None
# Test successful explicit INDEX_DEFINITION with reference to file
gdal.FileFromMemBuffer('/vsimem/map.txt', '{"foo":"bar"}')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo4&CUSTOMREQUEST=PUT&POSTFIELDS={"foo":"bar"}', '{}')
lyr = ds.CreateLayer('foo4', srs=ogrtest.srs_wgs84,
options=['INDEX_DEFINITION=/vsimem/map.txt', 'MAPPING={}'])
gdal.Unlink('/vsimem/map.txt')
assert lyr is not None
###############################################################################
# Geo_shape geometries
def test_ogr_elasticsearch_2():
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
assert ds is not None, 'did not managed to open Elasticsearch datastore'
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo&CUSTOMREQUEST=PUT', '{}')
gdal.Unlink('/vsimem/fakeelasticsearch/foo')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { }, "geometry": { "type": "geo_shape" } } } }', '{}')
lyr = ds.CreateLayer('foo', srs=ogrtest.srs_wgs84,
options=['BULK_INSERT=NO', 'FID='])
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt(
'GEOMETRYCOLLECTION(POINT(0 1),LINESTRING(0 1,2 3),POLYGON((0 0,0 10,10 10,0 0),(1 1,1 9,9 9,1 1)),MULTIPOINT(0 1, 2 3),MULTILINESTRING((0 1,2 3),(4 5,6 7)),MULTIPOLYGON(((0 0,0 10,10 10,0 0),(1 1,1 9,9 9,1 1)),((-1 -1,-1 -9,-9 -9,-1 -1))))'))
gdal.FileFromMemBuffer('/vsimem/fakeelasticsearch/foo/FeatureCollection&POSTFIELDS={ "geometry": { "type": "geometrycollection", "geometries": [ { "type": "point", "coordinates": [ 0.0, 1.0 ] }, { "type": "linestring", "coordinates": [ [ 0.0, 1.0 ], [ 2.0, 3.0 ] ] }, { "type": "polygon", "coordinates": [ [ [ 0.0, 0.0 ], [ 0.0, 10.0 ], [ 10.0, 10.0 ], [ 0.0, 0.0 ] ], [ [ 1.0, 1.0 ], [ 1.0, 9.0 ], [ 9.0, 9.0 ], [ 1.0, 1.0 ] ] ] }, { "type": "multipoint", "coordinates": [ [ 0.0, 1.0 ], [ 2.0, 3.0 ] ] }, { "type": "multilinestring", "coordinates": [ [ [ 0.0, 1.0 ], [ 2.0, 3.0 ] ], [ [ 4.0, 5.0 ], [ 6.0, 7.0 ] ] ] }, { "type": "multipolygon", "coordinates": [ [ [ [ 0.0, 0.0 ], [ 0.0, 10.0 ], [ 10.0, 10.0 ], [ 0.0, 0.0 ] ], [ [ 1.0, 1.0 ], [ 1.0, 9.0 ], [ 9.0, 9.0 ], [ 1.0, 1.0 ] ] ], [ [ [ -1.0, -1.0 ], [ -1.0, -9.0 ], [ -9.0, -9.0 ], [ -1.0, -1.0 ] ] ] ] } ] }, "type": "Feature", "properties": { } }', '{}')
ret = lyr.CreateFeature(feat)
assert ret == 0
feat = None
# Same but with explicit GEOM_MAPPING_TYPE=GEO_SHAPE
lyr = ds.CreateLayer('foo', srs=ogrtest.srs_wgs84, options=[
'GEOM_MAPPING_TYPE=GEO_SHAPE', 'GEOM_PRECISION=1m', 'BULK_INSERT=NO', 'FID='])
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt(
'GEOMETRYCOLLECTION(POINT(0 1),LINESTRING(0 1,2 3),POLYGON((0 0,0 10,10 10,0 0),(1 1,1 9,9 9,1 1)),MULTIPOINT(0 1, 2 3),MULTILINESTRING((0 1,2 3),(4 5,6 7)),MULTIPOLYGON(((0 0,0 10,10 10,0 0),(1 1,1 9,9 9,1 1)),((-1 -1,-1 -9,-9 -9,-1 -1))))'))
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { } }, "geometry": { "type": "geo_shape", "precision": "1m" } } } }', '{}')
ret = lyr.CreateFeature(feat)
assert ret == 0
feat = None
###############################################################################
# Test bulk insert and layer name laundering
def test_ogr_elasticsearch_3():
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
assert ds is not None, 'did not managed to open Elasticsearch datastore'
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/name_laundering&CUSTOMREQUEST=PUT', '{}')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/name_laundering/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { } }, "geometry": { "type": "geo_shape" } } } }', '{}')
lyr = ds.CreateLayer(
'NAME/laundering', srs=ogrtest.srs_wgs84, options=['FID='])
feat = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(feat)
assert ret == 0
feat = None
with gdaltest.error_handler():
ret = lyr.SyncToDisk()
assert ret != 0
feat = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(feat)
assert ret == 0
feat = None
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_bulk&POSTFIELDS={"index" :{"_index":"name_laundering", "_type":"FeatureCollection"}}
{ "properties": { } }
""", '{}')
ret = lyr.SyncToDisk()
assert ret == 0
ds = None
###############################################################################
# Test basic read functionality
def test_ogr_elasticsearch_4():
with gdaltest.error_handler():
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
assert ds is not None
# Test case where there's no index
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", '\n')
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
assert ds is not None
assert ds.GetLayerCount() == 0
# Test opening a layer by name
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/_mapping?pretty""", """
{
"a_layer":
{
"mappings":
{
"FeatureCollection":
{
"_meta": {
"fid": "my_fid",
"geomfields": {
"a_geoshape": "LINESTRING"
},
"fields": {
"strlist_field": "StringList",
"intlist_field": "IntegerList",
"int64list_field": "Integer64List",
"doublelist_field": "RealList"
}
},
"properties":
{
"type": { "type": "string" },
"a_geoshape":
{
"type": "geo_shape",
},
"a_geopoint":
{
"properties":
{
"coordinates":
{
"type": "geo_point"
}
}
},
"my_fid": { "type": "long" },
"properties" :
{
"properties":
{
"str_field": { "type": "string"},
"int_field": { "type": "integer"},
"int64_field": { "type": "long"},
"double_field": { "type": "double"},
"float_field": { "type": "float"},
"boolean_field": { "type": "boolean"},
"binary_field": { "type": "binary"},
"dt_field": { "type": "date"},
"date_field": { "type": "date", "format": "yyyy\\/MM\\/dd"},
"time_field": { "type": "date", "format": "HH:mm:ss.SSS"},
"strlist_field": { "type": "string"},
"intlist_field": { "type": "integer"},
"int64list_field": { "type": "long"},
"doublelist_field": { "type": "double"}
}
}
}
}
}
}
}
""")
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
assert ds is not None
lyr = ds.GetLayerByName('a_layer')
assert lyr is not None
lyr = ds.GetLayerByName('a_layer')
assert lyr is not None
with gdaltest.error_handler():
lyr = ds.GetLayerByName('not_a_layer')
assert lyr is None
ds = None
# Test LAYER open option
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['LAYER=a_layer'])
assert ds.GetLayerCount() == 1
ds = None
with gdaltest.error_handler():
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['LAYER=not_a_layer'])
assert ds is None
ds = None
# Test GetLayerByName() and GetLayerCount()
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
lyr = ds.GetLayerByName('a_layer')
lyr = ds.GetLayerByName('a_layer')
assert ds.GetLayerCount() == 1
ds = None
# Test GetLayerCount()
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", 'a_layer \n')
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
assert ds is not None
assert ds.GetLayerCount() == 1
lyr = ds.GetLayer(0)
with gdaltest.error_handler():
lyr_defn = lyr.GetLayerDefn()
idx = lyr_defn.GetFieldIndex("strlist_field")
assert lyr_defn.GetFieldDefn(idx).GetType() == ogr.OFTStringList
idx = lyr_defn.GetGeomFieldIndex("a_geoshape")
assert lyr_defn.GetGeomFieldDefn(idx).GetType() == ogr.wkbLineString
assert lyr.GetFIDColumn() == 'my_fid'
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_count?pretty""", """{
}""")
with gdaltest.error_handler():
lyr.GetFeatureCount()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_count?pretty""", """{
"hits": null
}""")
with gdaltest.error_handler():
lyr.GetFeatureCount()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_count?pretty""", """{
"hits": { "count": null }
}""")
with gdaltest.error_handler():
lyr.GetFeatureCount()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_count?pretty""", """{
"hits":
{
"count": 3
}
}""")
fc = lyr.GetFeatureCount()
assert fc == 3
with gdaltest.error_handler():
f = lyr.GetNextFeature()
assert f is None
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{
}""")
lyr.ResetReading()
f = lyr.GetNextFeature()
assert f is None
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{
"hits": null
}""")
lyr.ResetReading()
lyr.GetNextFeature()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{
"hits": { "hits": null }
}""")
lyr.ResetReading()
lyr.GetNextFeature()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{
"hits": { "hits": [ null, {}, { "_source":null } ] }
}""")
lyr.ResetReading()
lyr.GetNextFeature()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"my_fid": 5,
"a_geopoint" : {
"type": "Point",
"coordinates": [2,49]
},
"a_geoshape": {
"type": "linestring",
"coordinates": [[2,49],[3,50]]
},
"properties": {
"str_field": "foo",
"int_field": 1,
"int64_field": 123456789012,
"double_field": 1.23,
"float_field": 3.45,
"boolean_field": true,
"binary_field": "ASNGV4mrze8=",
"dt_field": "2015\\/08\\/12 12:34:56.789",
"date_field": "2015\\/08\\/12",
"time_field": "12:34:56.789",
"strlist_field": ["foo"],
"intlist_field": [1],
"int64list_field": [123456789012],
"doublelist_field": [1.23]
}
},
}]
}
}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_search/scroll?scroll=1m&scroll_id=my_scrollid""", "{}")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_search/scroll?scroll_id=my_scrollid&CUSTOMREQUEST=DELETE""", '{}')
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
lyr = ds.GetLayer(0)
assert lyr.GetLayerDefn().GetFieldCount() == 15
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"a_geopoint" : {
"type": "Point",
"coordinates": [2,49]
},
"a_geoshape": {
"type": "linestring",
"coordinates": [[2,49],[3,50]]
},
"my_fid": 5,
"properties": {
"str_field": "foo",
"int_field": 1,
"int64_field": 123456789012,
"double_field": 1.23,
"float_field": 3.45,
"boolean_field": true,
"binary_field": "ASNGV4mrze8=",
"dt_field": "2015\\/08\\/12 12:34:56.789",
"date_field": "2015\\/08\\/12",
"time_field": "12:34:56.789",
"strlist_field": ["foo"],
"intlist_field": [1],
"int64list_field": [123456789012],
"doublelist_field": [1.23]
}
},
},
{
"_source": {
"type": "Feature",
"properties": {
"non_existing": "foo"
}
},
}
]
}
}""")
lyr.ResetReading()
f = lyr.GetNextFeature()
assert f is not None
if f.GetFID() != 5 or f['_id'] != 'my_id' or f['str_field'] != 'foo' or f['int_field'] != 1 or f['int64_field'] != 123456789012 or \
f['double_field'] != 1.23 or f['float_field'] != 3.45 or f['boolean_field'] != 1 or \
f['binary_field'] != '0123465789ABCDEF' or f['dt_field'] != '2015/08/12 12:34:56.789' or \
f['date_field'] != '2015/08/12' or f['time_field'] != '12:34:56.789' or \
f['strlist_field'] != ['foo'] or \
f['intlist_field'] != [1] or \
f['int64list_field'] != [123456789012] or \
f['doublelist_field'] != [1.23] or \
f['a_geopoint'].ExportToWkt() != 'POINT (2 49)' or \
f['a_geoshape'].ExportToWkt() != 'LINESTRING (2 49,3 50)':
f.DumpReadable()
pytest.fail()
lyr.ResetReading()
lyr.GetNextFeature()
f = lyr.GetNextFeature()
assert f is not None
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_search/scroll?scroll=1m&scroll_id=my_scrollid""", """{
"hits":
{
"hits":[
{
"_source": {
"type": "Feature",
"properties": {
"int_field": 2,
}
},
}
]
}
}""")
f = lyr.GetNextFeature()
assert f['int_field'] == 2
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_search/scroll?scroll=1m&scroll_id=my_scrollid""", """{
"hits":
{
"hits":[]
}
}""")
f = lyr.GetNextFeature()
assert f is None
f = lyr.GetNextFeature()
assert f is None
lyr.SetSpatialFilterRect(1, 48, 3, 50)
lyr.ResetReading()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "geo_shape": { "a_geoshape": { "shape": { "type": "envelope", "coordinates": [ [ 1.0, 50.0 ], [ 3.0, 48.0 ] ] } } } } } } }""", """{
"hits":
{
"hits":[
{
"_source": {
"type": "Feature",
"a_geoshape" : {
"type": "Point",
"coordinates": [2,49]
},
"properties": {
"int_field": 3,
}
},
}
]
}
}""")
f = lyr.GetNextFeature()
assert f['int_field'] == 3
lyr.SetSpatialFilterRect(1, 1, 48, 3, 50)
lyr.ResetReading()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "geo_bounding_box": { "a_geopoint.coordinates": { "top_left": { "lat": 50.0, "lon": 1.0 }, "bottom_right": { "lat": 48.0, "lon": 3.0 } } } } } } }""", """{
"hits":
{
"hits":[
{
"_source": {
"type": "Feature",
"a_geopoint" : {
"type": "Point",
"coordinates": [2,49]
},
"properties": {
"int_field": 4,
}
},
}
]
}
}""")
f = lyr.GetNextFeature()
assert f['int_field'] == 4
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?pretty&POSTFIELDS={ "size": 0, "query": { "constant_score" : { "filter": { "geo_bounding_box": { "a_geopoint.coordinates": { "top_left": { "lat": 50.0, "lon": 1.0 }, "bottom_right": { "lat": 48.0, "lon": 3.0 } } } } } } }""", """{
"hits":
{
"total": 10
}
}""")
fc = lyr.GetFeatureCount()
assert fc == 10
lyr.SetSpatialFilter(None)
lyr.SetSpatialFilterRect(-180, -90, 180, 90)
with gdaltest.error_handler():
lyr.SetSpatialFilter(-1, None)
lyr.SetSpatialFilter(2, None)
lyr.SetAttributeFilter("{ 'FOO' : 'BAR' }")
lyr.ResetReading()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ 'FOO' : 'BAR' }""", """{
"_scroll_id": "invalid",
"hits":
{
"hits":[
{
"_source": {
"type": "Feature",
"a_geoshape" : {
"type": "Point",
"coordinates": [2,49]
},
"properties": {
"int_field": 5,
}
},
}
]
}
}""")
f = lyr.GetNextFeature()
assert f['int_field'] == 5
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?pretty&POSTFIELDS={ "size": 0, 'FOO' : 'BAR' }""", """{
"hits":
{
"total": 1234
}
}""")
assert lyr.GetFeatureCount() == 1234
lyr.SetAttributeFilter(None)
sql_lyr = ds.ExecuteSQL("{ 'FOO' : 'BAR' }", dialect='ES')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_search?scroll=1m&size=100&POSTFIELDS={ 'FOO' : 'BAR' }""", """{
"hits":
{
"hits":[
{
"_index": "some_layer",
"_type": "some_type",
"_source": {
"some_field": 5
},
}
]
}
}""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_mapping/some_type?pretty""", """
{
"some_layer":
{
"mappings":
{
"some_type":
{
"properties":
{
"some_field": { "type": "string"}
}
}
}
}
}
""")
f = sql_lyr.GetNextFeature()
if f['some_field'] != '5':
f.DumpReadable()
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Invalid index
with gdaltest.error_handler():
bbox = lyr.GetExtent(geom_field=-1)
# geo_shape
bbox = lyr.GetExtent(geom_field=0)
# Invalid index
with gdaltest.error_handler():
bbox = lyr.GetExtent(geom_field=2)
# No response
with gdaltest.error_handler():
bbox = lyr.GetExtent(geom_field=1)
# Invalid response
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?search_type=count&pretty&POSTFIELDS={ "aggs" : { "bbox" : { "geo_bounds" : { "field" : "a_geopoint.coordinates" } } } }""",
"""{
"aggregations" : {
"bbox" : {
"bounds" : {
"top_left" : {
},
"bottom_right" : {
}
}
}
}
}""")
with gdaltest.error_handler():
bbox = lyr.GetExtent(geom_field=1)
# Valid response
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?pretty&POSTFIELDS={ "size": 0, "aggs" : { "bbox" : { "geo_bounds" : { "field" : "a_geopoint.coordinates" } } } }""",
"""{
"aggregations" : {
"bbox" : {
"bounds" : {
"top_left" : {
"lat" : 10,
"lon" : 1
},
"bottom_right" : {
"lat" : 9,
"lon" : 2
}
}
}
}
}""")
bbox = lyr.GetExtent(geom_field=1)
assert bbox == (1.0, 2.0, 9.0, 10.0)
# Operations not available in read-only mode
with gdaltest.error_handler():
ret = lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
assert ret != 0
with gdaltest.error_handler():
ret = lyr.CreateGeomField(ogr.GeomFieldDefn('shape', ogr.wkbPoint))
assert ret != 0
with gdaltest.error_handler():
ret = lyr.CreateFeature(ogr.Feature(lyr.GetLayerDefn()))
assert ret != 0
lyr.ResetReading()
with gdaltest.error_handler():
ret = lyr.SetFeature(lyr.GetNextFeature())
assert ret != 0
with gdaltest.error_handler():
lyr = ds.CreateLayer('will_not_work')
assert lyr is None
with gdaltest.error_handler():
ret = ds.DeleteLayer(0)
assert ret != 0
###############################################################################
# Write documents with non geojson structure
def test_ogr_elasticsearch_5():
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch/_stats",
"""{"_shards":{"total":0,"successful":0,"failed":0},"indices":{}}""")
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/non_geojson&CUSTOMREQUEST=PUT', '')
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/non_geojson/_mapping/my_mapping&POSTFIELDS={ "my_mapping": { "properties": { "str": { "type": "string", "store": "yes" }, "geometry": { "type": "geo_shape" } }, "_meta": { "fid": "ogc_fid" } } }', '{}')
lyr = ds.CreateLayer('non_geojson', srs=ogrtest.srs_wgs84, options=[
'MAPPING_NAME=my_mapping', 'BULK_INSERT=NO', 'STORE_FIELDS=YES'])
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetFID(5)
feat['str'] = 'foo'
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/non_geojson/my_mapping&POSTFIELDS={ "ogc_fid": 5, "str": "foo" }', '{}')
ret = lyr.CreateFeature(feat)
assert ret == 0
feat = None
ds = None
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", 'non_geojson\n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/non_geojson/_mapping?pretty""", """
{
"non_geojson":
{
"mappings":
{
"my_mapping":
{
"properties":
{
"a_geoshape":
{
"type": "geo_shape",
},
"a_geopoint":
{
"properties":
{
"type": "string",
"coordinates":
{
"type": "geo_point"
}
}
},
"another_geopoint": { "type": "geo_point" },
"str_field": { "type": "string"},
"superobject": {
"properties": {
"subfield": { "type": "string" },
"subobject": {
"properties": {
"subfield": { "type": "string" }
}
},
"another_geoshape": { "type": "geo_shape" }
}
}
}
}
}
}
}
""")
ds = gdal.OpenEx("ES:/vsimem/fakeelasticsearch",
gdal.OF_UPDATE, open_options=['BULK_INSERT=NO'])
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/non_geojson/my_mapping/_search?scroll=1m&size=100""", """{
"hits":
{
"hits":[
{
"_source": {
"a_geopoint" : {
"type": "Point",
"coordinates": [2,49]
},
"a_geoshape": {
"type": "linestring",
"coordinates": [[2,49],[3,50]]
},
"another_geopoint": "49.5,2.5",
"str_field": "foo",
"superobject": {
"subfield": 5,
"subobject":
{
"subfield": "foo",
"another_subfield": 6
},
"another_geoshape": {
"type": "point",
"coordinates": [3,50]
},
"another_geoshape2": {
"type": "point",
"coordinates": [2,50]
}
}
}
},
{
"_source": {
"another_field": "foo",
"another_geopoint": { "lat": 49.1, "lon": 2.1 }
}
},
{
"_source": {
"another_geopoint": "49.2,2.2"
}
},
{
"_source": {""" +
# "this is the geohash format",
""" "another_geopoint": "u09qv80meqh16ve02equ"
}
}]
}
}""")
index = lyr.GetLayerDefn().GetFieldIndex('another_field')
assert index >= 0
f = lyr.GetNextFeature()
if f['str_field'] != 'foo' or \
f['superobject.subfield'] != '5' or \
f['superobject.subobject.subfield'] != 'foo' or \
f['superobject.subobject.another_subfield'] != 6 or \
f['a_geopoint'].ExportToWkt() != 'POINT (2 49)' or \
f['another_geopoint'].ExportToWkt() != 'POINT (2.5 49.5)' or \
f['a_geoshape'].ExportToWkt() != 'LINESTRING (2 49,3 50)' or \
f['superobject.another_geoshape'].ExportToWkt() != 'POINT (3 50)' or \
f['superobject.another_geoshape2'].ExportToWkt() != 'POINT (2 50)':
f.DumpReadable()
pytest.fail()
f['_id'] = 'my_id'
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/non_geojson/my_mapping/my_id&POSTFIELDS={ "a_geoshape": { "type": "linestring", "coordinates": [ [ 2.0, 49.0 ], [ 3.0, 50.0 ] ] }, "a_geopoint": { "type": "Point", "coordinates": [ 2.0, 49.0 ] }, "another_geopoint": [ 2.5, 49.5 ], "superobject": { "another_geoshape": { "type": "point", "coordinates": [ 3.0, 50.0 ] }, "another_geoshape2": { "type": "point", "coordinates": [ 2.0, 50.0 ] }, "subfield": "5", "subobject": { "subfield": "foo", "another_subfield": 6 } }, "str_field": "foo" }""", "{}")
ret = lyr.SetFeature(f)
assert ret == 0
f = lyr.GetNextFeature()
if f['another_geopoint'].ExportToWkt() != 'POINT (2.1 49.1)':
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f['another_geopoint'].ExportToWkt() != 'POINT (2.2 49.2)':
f.DumpReadable()
pytest.fail()
# Test geohash
f = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(f['another_geopoint'], 'POINT (2 49)') != 0:
f.DumpReadable()
pytest.fail()
f = None
lyr.CreateField(ogr.FieldDefn('superobject.subfield2', ogr.OFTString))
with gdaltest.error_handler():
lyr.CreateGeomField(ogr.GeomFieldDefn(
'superobject.another_geoshape3', ogr.wkbPoint))
f = ogr.Feature(lyr.GetLayerDefn())
f['superobject.subfield2'] = 'foo'
f['superobject.another_geoshape3'] = ogr.CreateGeometryFromWkt(
'POINT (3 50)')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/non_geojson/_mapping/my_mapping&POSTFIELDS={ "my_mapping": { "properties": { "str_field": { "type": "string" }, "superobject": { "properties": { "subfield": { "type": "string" }, "subobject": { "properties": { "subfield": { "type": "string" }, "another_subfield": { "type": "integer" } } }, "subfield2": { "type": "string" }, "another_geoshape": { "type": "geo_shape" }, "another_geoshape2": { "type": "geo_shape" }, "another_geoshape3": { "properties": { "type": { "type": "string" }, "coordinates": { "type": "geo_point" } } } } }, "another_field": { "type": "string" }, "a_geoshape": { "type": "geo_shape" }, "a_geopoint": { "properties": { "type": { "type": "string" }, "coordinates": { "type": "geo_point" } } }, "another_geopoint": { "type": "geo_point" } }, "_meta": { "geomfields": { "superobject.another_geoshape2": "Point" } } } }""", '{}')
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/non_geojson/my_mapping&POSTFIELDS={ "superobject": { "another_geoshape3": { "type": "Point", "coordinates": [ 3.0, 50.0 ] }, "subfield2": "foo" } }""", "{}")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/non_geojson/my_mapping/_count?pretty""", "{}")
lyr.CreateFeature(f)
ds = gdal.OpenEx("ES:/vsimem/fakeelasticsearch",
open_options=['FEATURE_COUNT_TO_ESTABLISH_FEATURE_DEFN=0'])
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['str_field'] != 'foo' or \
f['superobject.subfield'] != '5' or \
f['a_geopoint'].ExportToWkt() != 'POINT (2 49)' or \
f['a_geoshape'].ExportToWkt() != 'LINESTRING (2 49,3 50)' or \
f['superobject.another_geoshape'].ExportToWkt() != 'POINT (3 50)':
f.DumpReadable()
pytest.fail()
ds = gdal.OpenEx("ES:/vsimem/fakeelasticsearch", open_options=[
'FEATURE_COUNT_TO_ESTABLISH_FEATURE_DEFN=0', 'FLATTEN_NESTED_ATTRIBUTES=FALSE'])
lyr = ds.GetLayer(0)
index = lyr.GetLayerDefn().GetFieldIndex('another_field')
assert index < 0
f = lyr.GetNextFeature()
if f['str_field'] != 'foo' or \
f['superobject'] != '{ "subfield": 5, "subobject": { "subfield": "foo", "another_subfield": 6 }, "another_geoshape": { "type": "point", "coordinates": [ 3, 50 ] }, "another_geoshape2": { "type": "point", "coordinates": [ 2, 50 ] } }' or \
f['a_geopoint'].ExportToWkt() != 'POINT (2 49)' or \
f['a_geoshape'].ExportToWkt() != 'LINESTRING (2 49,3 50)':
f.DumpReadable()
pytest.fail()
ds = gdal.OpenEx("ES:/vsimem/fakeelasticsearch",
gdal.OF_UPDATE, open_options=['JSON_FIELD=YES'])
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['str_field'] != 'foo' or \
f['superobject.subfield'] != '5' or \
f['_json'].find('{') != 0 or \
f['a_geopoint'].ExportToWkt() != 'POINT (2 49)' or \
f['a_geoshape'].ExportToWkt() != 'LINESTRING (2 49,3 50)' or \
f['superobject.another_geoshape'].ExportToWkt() != 'POINT (3 50)':
f.DumpReadable()
pytest.fail()
f['_id'] = 'my_id'
f['_json'] = '{ "foo": "bar" }'
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/non_geojson/my_mapping/my_id&POSTFIELDS={ "foo": "bar" }""", "{}")
ret = lyr.SetFeature(f)
assert ret == 0
###############################################################################
# Test reading circle and envelope geometries
def test_ogr_elasticsearch_6():
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", 'non_standard_geometries\n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/non_standard_geometries/_mapping?pretty""", """
{
"non_standard_geometries":
{
"mappings":
{
"my_mapping":
{
"properties":
{
"geometry":
{
"type": "geo_shape",
}
}
}
}
}
}
""")
ds = gdal.OpenEx("ES:/vsimem/fakeelasticsearch")
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/non_standard_geometries/my_mapping/_search?scroll=1m&size=100""", """{
"hits":
{
"hits":[
{
"_source": {
"geometry": {
"type": "envelope",
"coordinates": [[2,49],[3,50]]
}
}
},
{
"_source": {
"geometry": {
"type": "circle",
"coordinates": [2,49],
"radius": 100
}
}
},
{
"_source": {
"geometry": {
"type": "circle",
"coordinates": [2,49],
"radius": "100m"
}
}
},
{
"_source": {
"geometry": {
"type": "circle",
"coordinates": [2,49],
"radius": "0.1km"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
if f['geometry'].ExportToWkt() != 'POLYGON ((2 49,3 49,3 50,2 50,2 49))':
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
ref_txt = f['geometry'].ExportToWkt()
if not ref_txt.startswith('POLYGON (('):
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f['geometry'].ExportToWkt() != ref_txt:
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f['geometry'].ExportToWkt() != ref_txt:
f.DumpReadable()
pytest.fail()
###############################################################################
# Test WRITE_MAPPING option
def test_ogr_elasticsearch_7():
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch/_stats",
"""{"_shards":{"total":0,"successful":0,"failed":0},"indices":{}}""")
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/test_write_mapping&CUSTOMREQUEST=PUT', '{}')
lyr = ds.CreateLayer('test_write_mapping', srs=ogrtest.srs_wgs84, options=[
'WRITE_MAPPING=/vsimem/map.txt', 'FID='])
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
ds = None
f = gdal.VSIFOpenL('/vsimem/map.txt', 'rb')
assert f is not None
data = gdal.VSIFReadL(1, 10000, f).decode('ascii')
gdal.VSIFCloseL(f)
gdal.Unlink('/vsimem/map.txt')
assert data == '{ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { } }, "geometry": { "type": "geo_shape" } } } }'
###############################################################################
# Test SRS support
def test_ogr_elasticsearch_8():
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch/_stats",
"""{"_shards":{"total":0,"successful":0,"failed":0},"indices":{}}""")
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
# No SRS
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/no_srs&CUSTOMREQUEST=PUT', '{}')
# Will emit a warning
gdal.ErrorReset()
with gdaltest.error_handler():
lyr = ds.CreateLayer('no_srs')
assert gdal.GetLastErrorType() == gdal.CE_Warning, 'warning expected'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (-100 -200)'))
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/no_srs/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { } }, "geometry": { "type": "geo_shape" } }, "_meta": { "fid": "ogc_fid" } } }""", '{}')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_bulk&POSTFIELDS={"index" :{"_index":"no_srs", "_type":"FeatureCollection"}}
{ "ogc_fid": 1, "geometry": { "type": "point", "coordinates": [ -100.0, -200.0 ] }, "type": "Feature", "properties": { } }
""", "{}")
# Will emit a warning
gdal.ErrorReset()
with gdaltest.error_handler():
ret = lyr.CreateFeature(f)
assert gdal.GetLastErrorType() == gdal.CE_Warning, 'warning expected'
assert ret == 0
# Non EPSG-4326 SRS
other_srs = osr.SpatialReference()
other_srs.ImportFromEPSG(32631)
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/other_srs&CUSTOMREQUEST=PUT', "{}")
lyr = ds.CreateLayer('other_srs', srs=other_srs)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (500000 0)'))
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/other_srs/_mapping/FeatureCollection&POSTFIELDS={ "FeatureCollection": { "properties": { "type": { "type": "string" }, "properties": { "properties": { } }, "geometry": { "type": "geo_shape" } }, "_meta": { "fid": "ogc_fid" } } }""", '{}')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_bulk&POSTFIELDS={"index" :{"_index":"other_srs", "_type":"FeatureCollection"}}
{ "ogc_fid": 1, "geometry": { "type": "point", "coordinates": [ 3.0, 0.0 ] }, "type": "Feature", "properties": { } }
""", "{}")
ret = lyr.CreateFeature(f)
assert ret == 0
###############################################################################
# Test Elasticsearch 5.X
def test_ogr_elasticsearch_9():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"5.0.0"}}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", 'a_layer \n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/_mapping?pretty""", """
{
"a_layer":
{
"mappings":
{
"FeatureCollection":
{
"properties":
{
"type": { "type": "text" },
"a_geoshape":
{
"type": "geo_shape",
},
"properties" :
{
"properties":
{
"str_field": { "type": "text"}
}
}
}
}
}
}
}
""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"a_geoshape": {
"type": "point",
"coordinates": [2.5,49.5]
},
"str_field": "foo"
}
}
}]
}
}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_search/scroll?scroll=1m&scroll_id=my_scrollid""", """{}""")
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/_search/scroll?scroll_id=my_scrollid&CUSTOMREQUEST=DELETE', '{}')
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
lyr = ds.GetLayer(0)
lyr.SetSpatialFilterRect(2, 49, 3, 50)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_count?pretty&POSTFIELDS={ "query": { "constant_score" : { "filter": { "geo_shape": { "a_geoshape": { "shape": { "type": "envelope", "coordinates": [ [ 2.0, 50.0 ], [ 3.0, 49.0 ] ] } } } } } } }""",
"""{
"count" : 2
}""")
count = lyr.GetFeatureCount()
assert count == 2
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "geo_shape": { "a_geoshape": { "shape": { "type": "envelope", "coordinates": [ [ 2.0, 50.0 ], [ 3.0, 49.0 ] ] } } } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"a_geoshape": {
"type": "point",
"coordinates": [2.5,49.5]
},
"properties": {
"str_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
###############################################################################
# Test SQL
def test_ogr_elasticsearch_10():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"5.0.0"}}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", 'a_layer \n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/_mapping?pretty""", """
{
"a_layer":
{
"mappings":
{
"FeatureCollection":
{
"properties":
{
"type": { "type": "text" },
"a_geoshape":
{
"type": "geo_shape",
},
"properties" :
{
"properties":
{
"text_field": { "type": "text"},
"text_field_with_raw": { "type": "text", "fields" : { "raw" : { "type": "keyword" } } },
"keyword_field": { "type": "keyword"},
"int_field": { "type": "integer"},
"long_field": { "type": "long"},
"double_field": { "type": "double"},
"dt_field": { "type": "date"},
"date_field": { "type": "date", "format": "yyyy\\/MM\\/dd"},
"time_field": { "type": "date", "format": "HH:mm:ss.SSS"},
}
}
}
}
}
}
}
""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{}""")
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/_search/scroll?scroll_id=my_scrollid&CUSTOMREQUEST=DELETE', '{}')
ds = ogr.Open('ES:/vsimem/fakeelasticsearch')
lyr = ds.GetLayer(0)
lyr.SetAttributeFilter(
"keyword_field = 'foo' AND keyword_field IS NOT NULL")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "bool": { "must": [ { "term": { "properties.keyword_field": "foo" } }, { "exists": { "field": "properties.keyword_field" } } ] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("text_field = 'foo'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "match": { "properties.text_field": "foo" } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("CAST(text_field AS CHARACTER) = 'foo_cast'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "match": { "properties.text_field": "foo_cast" } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field": "foo_cast"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("text_field_with_raw = 'foo'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "term": { "properties.text_field_with_raw.raw": "foo" } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field_with_raw": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("\"_id\" = 'my_id2'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "ids": { "values": [ "my_id2" ] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id2",
"_source": {
"type": "Feature",
"properties": {
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field != 'foo'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "bool": { "must_not": { "term": { "properties.keyword_field": "foo" } } } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "bar"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field IS NULL")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "bool": { "must_not": { "exists": { "field": "properties.keyword_field" } } } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field BETWEEN 'bar' AND 'foo'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "range": { "properties.keyword_field": { "gte": "bar", "lte": "foo" } } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "baz"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field IN ('foo', 'bar')")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "terms": { "properties.keyword_field": [ "foo", "bar" ] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("text_field IN ('foo', 'bar')")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "bool": { "should": [ { "match": { "properties.text_field": "foo" } }, { "match": { "properties.text_field": "bar" } } ] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("text_field_with_raw IN ('foo', 'bar')")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "terms": { "properties.text_field_with_raw.raw": [ "foo", "bar" ] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field_with_raw": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("\"_id\" IN ('my_id', 'bar')")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "ids": { "values": [ "my_id", "bar" ] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter(
"int_field >= 2 OR long_field >= 9876543210 OR double_field <= 3.123456")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "bool": { "should": [ { "bool": { "should": [ { "range": { "properties.int_field": { "gte": 2 } } }, { "range": { "properties.long_field": { "gte": 9876543210 } } } ] } }, { "range": { "properties.double_field": { "lte": 3.123456 } } } ] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"double_field": 3,
"int_field": 2,
"long_field": 9876543210
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("dt_field > '2016/01/01 12:34:56.123'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "range": { "properties.dt_field": { "gt": "2016\\/01\\/01 12:34:56.123" } } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"dt_field": '2016/01/01 12:34:56.124'
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("NOT dt_field < '2016/01/01 12:34:56.123'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "bool": { "must_not": { "range": { "properties.dt_field": { "lt": "2016\\/01\\/01 12:34:56.123" } } } } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"dt_field": '2016/01/01 12:34:56.123'
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field LIKE '_o%'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "wildcard": { "properties.keyword_field": "?o*" } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
# Evaluated client-side since the pattern uses ? or *
lyr.SetAttributeFilter("text_field LIKE '?*'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field": "?*"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
# Evaluated client-side since the field is analyzed
lyr.SetAttributeFilter("text_field LIKE '_Z%'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field": "fZo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("text_field_with_raw LIKE '_xo%' ESCAPE 'x'")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "wildcard": { "properties.text_field_with_raw.raw": "?o*" } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"text_field_with_raw": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field = 'foo' AND 1 = 1")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "term": { "properties.keyword_field": "foo" } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "foo"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("1 = 1 AND keyword_field = 'foo'")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field = 'bar' OR 1 = 0")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "bar"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
lyr.SetAttributeFilter("keyword_field = 'foo2'")
lyr.SetSpatialFilterRect(2, 49, 2, 49)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "bool" : { "must" : [{ "geo_shape": { "a_geoshape": { "shape": { "type": "envelope", "coordinates": [ [ 2.0, 49.0 ], [ 2.0, 49.0 ] ] } } } }, { "term": { "properties.keyword_field": "foo2" } }] } } } } }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"a_geoshape": {
"type": "point",
"coordinates": [2.0,49.0]
},
"properties": {
"keyword_field": "foo2"
}
}
}]
}
}""")
f = lyr.GetNextFeature()
assert f is not None
# SQL with WHERE
sql_lyr = ds.ExecuteSQL(
"SELECT * FROM a_layer WHERE keyword_field = 'foo'")
f = sql_lyr.GetNextFeature()
assert f is not None
ds.ReleaseResultSet(sql_lyr)
# SQL with WHERE and ORDER BY
sql_lyr = ds.ExecuteSQL(
"SELECT * FROM a_layer WHERE keyword_field = 'foo' ORDER BY keyword_field, int_field DESC, \"_id\"")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "query": { "constant_score" : { "filter": { "term": { "properties.keyword_field": "foo" } } } }, "sort" : [ { "properties.keyword_field": { "order": "asc" } }, { "properties.int_field": { "order": "desc" } }, { "_uid": { "order": "asc" } } ] }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "foo"
}
}
}]
}
}""")
f = sql_lyr.GetNextFeature()
assert f is not None
ds.ReleaseResultSet(sql_lyr)
# SQL with ORDER BY only
sql_lyr = ds.ExecuteSQL("SELECT * FROM a_layer ORDER BY keyword_field")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "sort": [ { "properties.keyword_field": { "order": "asc" } } ] }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"keyword_field": "foo"
}
}
}]
}
}""")
f = sql_lyr.GetNextFeature()
assert f is not None
ds.ReleaseResultSet(sql_lyr)
# SQL with ORDER BY on a text field with a raw sub-field
sql_lyr = ds.ExecuteSQL(
"SELECT * FROM a_layer ORDER BY text_field_with_raw")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100&POSTFIELDS={ "sort": [ { "properties.text_field_with_raw.raw": { "order": "asc" } } ] }""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
}
}
}]
}
}""")
f = sql_lyr.GetNextFeature()
assert f is not None
ds.ReleaseResultSet(sql_lyr)
###############################################################################
# Test isnull and unset
def test_ogr_elasticsearch_11():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"5.0.0"}}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", 'a_layer \n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/_mapping?pretty""", """
{
"a_layer":
{
"mappings":
{
"FeatureCollection":
{
"properties":
{
"type": { "type": "text" },
"properties" :
{
"properties":
{
"str_field": { "type": "text"}
}
}
}
}
}
}
}
""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""", """{}""")
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/_search/scroll?scroll_id=my_scrollid&CUSTOMREQUEST=DELETE', '{}')
ds = ogr.Open('ES:/vsimem/fakeelasticsearch', update=1)
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_search?scroll=1m&size=100""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"str_field": "foo"
}
}
},
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
"str_field": null
}
}
},
{
"_id": "my_id",
"_source": {
"type": "Feature",
"properties": {
}
}
}
]
}
}""")
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/_search/scroll?scroll=1m&scroll_id=my_scrollid', '{}')
f = lyr.GetNextFeature()
if f['str_field'] != 'foo':
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f['str_field'] is not None:
f.DumpReadable()
pytest.fail()
f = lyr.GetNextFeature()
if f.IsFieldSet('str_field'):
f.DumpReadable()
pytest.fail()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/FeatureCollection/_count?pretty""", """{
"hits":
{
"count": 0
}
}""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_bulk&POSTFIELDS={"index" :{"_index":"a_layer", "_type":"FeatureCollection"}}
{ "properties": { "str_field": null } }
{"index" :{"_index":"a_layer", "_type":"FeatureCollection"}}
{ "properties": { } }
""", '{}')
f = ogr.Feature(lyr.GetLayerDefn())
f.SetFieldNull('str_field')
ret = lyr.CreateFeature(f)
assert ret == 0
f = None
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
assert ret == 0
f = None
assert lyr.SyncToDisk() == 0
###############################################################################
# Test Elasticsearch 7.x (ignore MAPPING_NAME)
def test_ogr_elasticsearch_12():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"7.0.0"}}""")
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
assert ds is not None
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/foo&CUSTOMREQUEST=PUT', '{}')
lyr = ds.CreateLayer('foo', srs=ogrtest.srs_wgs84, options=[
'WRITE_MAPPING=/vsimem/map.txt', 'FID='])
assert lyr is not None
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
ds = None
f = gdal.VSIFOpenL('/vsimem/map.txt', 'rb')
assert f is not None
data = gdal.VSIFReadL(1, 10000, f).decode('ascii')
gdal.VSIFCloseL(f)
gdal.Unlink('/vsimem/map.txt')
assert data == '{ "properties": { "geometry": { "type": "geo_shape" } } }'
###############################################################################
# Test authentication
def test_ogr_elasticsearch_authentication():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer(
"/vsimem/fakeelasticsearch&USERPWD=user:pwd", """{"version":{"number":"5.0.0"}}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i&USERPWD=user:pwd""", 'a_layer \n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/a_layer/_mapping?pretty&USERPWD=user:pwd""", """
{
"a_layer":
{
"mappings":
{
"FeatureCollection":
{
"properties":
{
"type": { "type": "text" },
"properties" :
{
"properties":
{
"str_field": { "type": "text"}
}
}
}
}
}
}
}
""")
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['USERPWD=user:pwd'])
assert ds is not None
###############################################################################
# Test FORWARD_HTTP_HEADERS_FROM_ENV
def test_ogr_elasticsearch_http_headers_from_env():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch&HEADERS=Bar: value_of_bar\nFoo: value_of_foo\n",
"""{"version":{"number":"5.0.0"}}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i&HEADERS=Bar: value_of_bar\nFoo: value_of_foo\n""", '')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_search?scroll=1m&size=100&POSTFIELDS={ 'FOO' : 'BAR' }&HEADERS=Content-Type: application/json; charset=UTF-8\nBar: value_of_bar\nFoo: value_of_foo\n""", """{
"hits":
{
"hits":[
{
"_index": "some_layer",
"_type": "some_type",
"_source": {
"some_field": 5
},
}
]
}
}""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_mapping/some_type?pretty&HEADERS=Bar: value_of_bar\nFoo: value_of_foo\n""", """
{
"some_layer":
{
"mappings":
{
"some_type":
{
"properties":
{
"some_field": { "type": "string"}
}
}
}
}
}
""")
with gdaltest.config_options({ 'CPL_CURL_VSIMEM_PRINT_HEADERS': 'YES',
'FOO': 'value_of_foo',
'BAR': 'value_of_bar' }):
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['FORWARD_HTTP_HEADERS_FROM_ENV=Foo=FOO,Bar=BAR,Baz=I_AM_NOT_SET'])
assert ds is not None
sql_lyr = ds.ExecuteSQL("{ 'FOO' : 'BAR' }", dialect='ES')
f = sql_lyr.GetNextFeature()
assert f['some_field'] == '5'
ds.ReleaseResultSet(sql_lyr)
###############################################################################
# Test GeoShape WKT support
def test_ogr_elasticsearch_geo_shape_wkt():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"7.0.0"}}""")
ds = ogrtest.elasticsearch_drv.CreateDataSource(
"/vsimem/fakeelasticsearch")
assert ds is not None
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/geo_shape_wkt&CUSTOMREQUEST=PUT', "{}")
lyr = ds.CreateLayer('geo_shape_wkt', srs=ogrtest.srs_wgs84, options=['GEO_SHAPE_ENCODING=WKT'])
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/geo_shape_wkt/_mapping&POSTFIELDS={ "properties": { "geometry": { "type": "geo_shape" } }, "_meta": { "fid": "ogc_fid" } }""", '{}')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_bulk&POSTFIELDS={"index" :{"_index":"geo_shape_wkt"}}
{ "ogc_fid": 1, "geometry": "POINT (2 49)" }
""", "{}")
ret = lyr.CreateFeature(f)
assert ret == 0
lyr.ResetReading()
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/geo_shape_wkt/_search?scroll=1m&size=100""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_id": "my_id",
"_source": {
"geometry": "POINT (2 49)"
}
}
]
}
}""")
gdal.FileFromMemBuffer(
'/vsimem/fakeelasticsearch/_search/scroll?scroll_id=my_scrollid&CUSTOMREQUEST=DELETE', '{}')
f = lyr.GetNextFeature()
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
###############################################################################
# Test _TIMEOUT / _TERMINATE_AFTER
def test_ogr_elasticsearch_timeout_terminate_after():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"7.0.0"}}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices?h=i""", 'some_layer\n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_search?scroll=1m&size=100&POSTFIELDS={ 'FOO' : 'BAR' }""", """{
"hits":
{
"hits":[
{
"_index": "some_layer",
"_source": {
"some_field": 5,
"geometry": [2, 49]
},
}
]
}
}""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_mapping?pretty""", """
{
"some_layer":
{
"mappings":
{
"properties":
{
"some_field": { "type": "string", "index": "not_analyzed" },
"geometry": { "type": "geo_point" },
}
}
}
}
""")
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['SINGLE_QUERY_TERMINATE_AFTER=10', 'SINGLE_QUERY_TIMEOUT=0.5', 'FEATURE_ITERATION_TERMINATE_AFTER=2', 'FEATURE_ITERATION_TIMEOUT=0.1' ])
assert ds is not None
sql_lyr = ds.ExecuteSQL("{ 'FOO' : 'BAR' }", dialect='ES')
f = sql_lyr.GetNextFeature()
assert f['some_field'] == '5'
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/_search?pretty&timeout=500ms&terminate_after=10&POSTFIELDS={ "size": 0 , 'FOO' : 'BAR' }""", """
{
"took" : 1,
"timed_out" : false,
"terminated_early" : true,
"hits" : {
"total" : {
"value" : 4,
"relation" : "eq"
},
"max_score" : null,
"hits" : [ ]
}
}
""")
assert sql_lyr.GetFeatureCount() == 4
ds.ReleaseResultSet(sql_lyr)
sql_lyr = None
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_search?scroll=1m&size=100""", """{
"hits":
{
"hits":[
{
"_source": {
"some_field": 5,
"geometry": [2, 49]
},
},
{
"_source": {
"some_field": 7,
"geometry": [2, 49]
},
},
{
"_source": {
"some_field": 8,
"geometry": [2, 49]
},
}
]
}
}""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_search?pretty&timeout=500ms&terminate_after=10&POSTFIELDS={ "size": 0 }""", """
{
"took" : 1,
"timed_out" : false,
"terminated_early" : true,
"hits" : {
"total" : {
"value" : 2,
"relation" : "eq"
},
"max_score" : null,
"hits" : [ ]
}
}
""")
assert lyr.GetFeatureCount() == 2
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_search?pretty&timeout=500ms&terminate_after=10&POSTFIELDS={ "size": 0, "query": { "constant_score" : { "filter": { "term": { "some_field": "6" } } } } }""", """
{
"took" : 1,
"timed_out" : false,
"terminated_early" : true,
"hits" : {
"total" : {
"value" : 3,
"relation" : "eq"
},
"max_score" : null,
"hits" : [ ]
}
}
""")
lyr.SetAttributeFilter( "some_field = '6'" )
assert lyr.GetFeatureCount() == 3
lyr.SetAttributeFilter(None)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_search?pretty&timeout=500ms&terminate_after=10&POSTFIELDS={ "size": 0, "foo": "bar" }""", """
{
"took" : 1,
"timed_out" : false,
"terminated_early" : true,
"hits" : {
"total" : {
"value" : 4,
"relation" : "eq"
},
"max_score" : null,
"hits" : [ ]
}
}
""")
lyr.SetAttributeFilter( '{ "foo": "bar" }' )
assert lyr.GetFeatureCount() == 4
lyr.SetAttributeFilter(None)
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/some_layer/_search?pretty&timeout=500ms&terminate_after=10&POSTFIELDS={ "size": 0, "aggs" : { "bbox" : { "geo_bounds" : { "field" : "geometry" } } } }""", """
{
"aggregations" : {
"bbox" : {
"bounds" : {
"top_left" : {
"lat" : 10,
"lon" : 1
},
"bottom_right" : {
"lat" : 9,
"lon" : 2
}
}
}
}
}""")
bbox = lyr.GetExtent()
assert bbox == (1.0, 2.0, 9.0, 10.0)
# Check FEATURE_ITERATION_TERMINATE_AFTER
lyr.ResetReading()
assert lyr.GetNextFeature() is not None
assert lyr.GetNextFeature() is not None
assert lyr.GetNextFeature() is None
# Check FEATURE_ITERATION_TIMEOUT
lyr.ResetReading()
assert lyr.GetNextFeature() is not None
time.sleep(0.15)
assert lyr.GetNextFeature() is None
###############################################################################
# Test aggregation
def test_ogr_elasticsearch_aggregation_minimum():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"6.8.0"}}""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/test/_mapping?pretty""", """
{
"test":
{
"mappings":
{
"default":
{
"properties":
{
"a_geopoint":
{
"properties":
{
"coordinates":
{
"type": "geo_point"
}
}
}
}
}
}
}
}
""")
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['AGGREGATION={"index":"test"}'])
assert ds is not None
lyr = ds.GetLayer(0)
assert lyr.TestCapability(ogr.OLCStringsAsUTF8) == 1
response = {
"aggregations":
{
"grid":
{
"buckets": [
{
"key": "dummy_key",
"doc_count": 9876543210,
"centroid": {
"location": {
"lat": 60,
"lon": 50
},
"count": 9876543210
}
},
{
"key": "dummy_key2",
"doc_count": 1,
"centroid": {
"location": {
"lat": -60.5,
"lon": -50.5
},
"count": 1
}
},
]
}
}
}
request = """/vsimem/fakeelasticsearch/test/_search&POSTFIELDS={"size":0,"aggs":{"grid":{"geohash_grid":{"field":"a_geopoint.coordinates","precision":2,"size":10000},"aggs":{"centroid":{"geo_centroid":{"field":"a_geopoint.coordinates"}}}}}}"""
gdal.FileFromMemBuffer(request, json.dumps(response))
assert lyr.GetFeatureCount() == 2
gdal.Unlink(request)
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['AGGREGATION={"index":"test"}'])
assert ds is not None
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer(request, json.dumps(response))
f = lyr.GetNextFeature()
gdal.Unlink(request)
assert f is not None
assert f['key'] == 'dummy_key'
assert f['doc_count'] == 9876543210
assert f.GetGeometryRef().ExportToWkt() == 'POINT (50 60)'
f = lyr.GetNextFeature()
assert f is not None
assert f['key'] == 'dummy_key2'
assert f.GetGeometryRef().ExportToWkt() == 'POINT (-50.5 -60.5)'
assert lyr.GetFeatureCount() == 2
# Test spatial filter coordinate clamping
lyr.SetSpatialFilterRect(-200,-200,200,200)
lyr.ResetReading()
gdal.FileFromMemBuffer(request, json.dumps(response))
assert lyr.GetFeatureCount() == 2
gdal.Unlink(request)
# Test normal spatial filter
lyr.SetSpatialFilterRect(1,2,3,4)
lyr.ResetReading()
request = """/vsimem/fakeelasticsearch/test/_search&POSTFIELDS={"size":0,"aggs":{"filtered":{"filter":{"geo_bounding_box":{"a_geopoint.coordinates":{"top_left":{"lat":4.0,"lon":1.0},"bottom_right":{"lat":2.0,"lon":3.0}}}},"aggs":{"grid":{"geohash_grid":{"field":"a_geopoint.coordinates","precision":5,"size":10000},"aggs":{"centroid":{"geo_centroid":{"field":"a_geopoint.coordinates"}}}}}}}}"""
response = {
"aggregations":
{
"filtered":
{
"grid":
{
"buckets": [
{
"key": "dummy_key3",
"doc_count": 1,
"centroid": {
"location": {
"lat": 3.0,
"lon": 2.0
}
}
}
]
}
}
}
}
gdal.FileFromMemBuffer(request, json.dumps(response))
f = lyr.GetNextFeature()
gdal.Unlink(request)
assert f is not None
assert f['key'] == 'dummy_key3'
###############################################################################
# Test aggregation
def test_ogr_elasticsearch_aggregation_all_options():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"6.8.0"}}""")
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch',
open_options=['AGGREGATION={"index":"test","geohash_grid":{"size":100,"precision":4},"fields":{"min":["a", "f"],"max":["b"],"avg":["c"],"sum":["d"],"count":["e"],"stats":["f"]}}'])
assert ds is not None
lyr = ds.GetLayer(0)
assert lyr.GetLayerDefn().GetFieldCount() == 12
response = {
"aggregations":
{
"grid":
{
"buckets": [
{
"key": "dummy_key",
"doc_count": 9876543210,
"centroid": {
"location": {
"lat": 60,
"lon": 50
},
"count": 9876543210
},
"a_min": { "value": 1.5 },
"b_max": { "value": 2.5 },
"c_avg": { "value": 3.5 },
"d_sum": { "value": 4.5 },
"e_count": { "value": 9876543211 },
"f_stats": {
"min": 1,
"max": 2,
"avg": 3,
"sum": 4,
"count": 9876543212
}
},
]
}
}
}
request = """/vsimem/fakeelasticsearch/test/_search&POSTFIELDS={"size":0,"aggs":{"grid":{"geohash_grid":{"field":"a_geopoint.coordinates","precision":4,"size":100},"aggs":{"centroid":{"geo_centroid":{"field":"a_geopoint.coordinates"}},"f_stats":{"stats":{"field":"f"}},"a_min":{"min":{"field":"a"}},"b_max":{"max":{"field":"b"}},"c_avg":{"avg":{"field":"c"}},"d_sum":{"sum":{"field":"d"}},"e_count":{"value_count":{"field":"e"}}}}}}"""
gdal.FileFromMemBuffer(request, json.dumps(response))
f = lyr.GetNextFeature()
gdal.Unlink(request)
assert f['key'] == 'dummy_key'
assert f['doc_count'] == 9876543210
assert f['a_min'] == 1.5
assert f['b_max'] == 2.5
assert f['c_avg'] == 3.5
assert f['d_sum'] == 4.5
assert f['e_count'] == 9876543211
assert f['f_min'] == 1
assert f['f_max'] == 2
assert f['f_avg'] == 3
assert f['f_sum'] == 4
assert f['f_count'] == 9876543212
assert f.GetGeometryRef().ExportToWkt() == 'POINT (50 60)'
###############################################################################
# Test GetLayerByName() with a wildcard name
def test_ogr_elasticsearch_wildcard_layer_name():
ogr_elasticsearch_delete_files()
gdal.FileFromMemBuffer("/vsimem/fakeelasticsearch",
"""{"version":{"number":"6.8.0"}}""")
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch')
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_cat/indices/test*?h=i""", 'test1\ntest2\n')
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/test1/_mapping?pretty""", """
{
"test1":
{
"mappings":
{
"default":
{
"properties":
{
"a_geopoint":
{
"properties":
{
"coordinates":
{
"type": "geo_point"
}
}
},
"str_field": { "type": "string"},
"str_field2": { "type": "string"}
}
}
}
}
}
""")
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/test1/default/_search?scroll=1m&size=100""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_index": "test1",
"_id": "my_id",
"_source": {
"a_geopoint": {
"type": "Point",
"coordinates": [2.0,49.0]
},
"str_field": "foo",
"str_field2": "bar"
}
}]
}
}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_search/scroll?scroll=1m&scroll_id=my_scrollid""", "{}")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_search/scroll?scroll_id=my_scrollid&CUSTOMREQUEST=DELETE""", '{}')
lyr = ds.GetLayerByName('test*,-test3')
assert lyr.GetLayerDefn().GetFieldCount() == 3
assert lyr.GetLayerDefn().GetGeomFieldCount() == 1
gdal.FileFromMemBuffer("""/vsimem/fakeelasticsearch/test*,-test3/default/_search?scroll=1m&size=100""",
"""{
"_scroll_id": "my_scrollid",
"hits":
{
"hits":[
{
"_index": "test1",
"_id": "my_id",
"_source": {
"a_geopoint": {
"type": "Point",
"coordinates": [2.0,49.0]
},
"str_field": "foo",
"str_field2": "bar"
}
},
{
"_index": "test2",
"_id": "my_id2",
"_source": {
"a_geopoint": {
"type": "Point",
"coordinates": [3.0,50.0]
},
"str_field": "foo2",
"str_field2": "bar2"
}
}
]
}
}""")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_search/scroll?scroll=1m&scroll_id=my_scrollid""", "{}")
gdal.FileFromMemBuffer(
"""/vsimem/fakeelasticsearch/_search/scroll?scroll_id=my_scrollid&CUSTOMREQUEST=DELETE""", '{}')
f = lyr.GetNextFeature()
assert f['_id'] == 'my_id'
assert f['str_field'] == 'foo'
assert f['str_field2'] == 'bar'
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
f = lyr.GetNextFeature()
assert f['_id'] == 'my_id2'
assert f['str_field'] == 'foo2'
assert f['str_field2'] == 'bar2'
assert f.GetGeometryRef().ExportToWkt() == 'POINT (3 50)'
# Test with ADD_SOURCE_INDEX_NAME
ds = gdal.OpenEx('ES:/vsimem/fakeelasticsearch', open_options = ['ADD_SOURCE_INDEX_NAME=YES'])
lyr = ds.GetLayerByName('test*,-test3')
assert lyr.GetLayerDefn().GetFieldCount() == 4
assert lyr.GetLayerDefn().GetGeomFieldCount() == 1
f = lyr.GetNextFeature()
assert f['_index'] == 'test1'
assert f['_id'] == 'my_id'
assert f['str_field'] == 'foo'
assert f['str_field2'] == 'bar'
assert f.GetGeometryRef().ExportToWkt() == 'POINT (2 49)'
f = lyr.GetNextFeature()
assert f['_index'] == 'test2'
assert f['_id'] == 'my_id2'
assert f['str_field'] == 'foo2'
assert f['str_field2'] == 'bar2'
assert f.GetGeometryRef().ExportToWkt() == 'POINT (3 50)'
| 35.304829
| 1,260
| 0.508553
|
9e53ff93583876cb8f8c424caaf8360089e866ce
| 16,352
|
py
|
Python
|
fhir/resources/STU3/supplyrequest.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/STU3/supplyrequest.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/STU3/supplyrequest.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/SupplyRequest
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import typing
from pydantic import Field, root_validator
from . import backboneelement, domainresource, fhirtypes
class SupplyRequest(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Request for a medication, substance or device.
A record of a request for a medication, substance or device used in the
healthcare setting.
"""
resource_type = Field("SupplyRequest", const=True)
authoredOn: fhirtypes.DateTime = Field(
None,
alias="authoredOn",
title="When the request was made",
description=None,
# if property is element of this resource.
element_property=True,
)
authoredOn__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_authoredOn", title="Extension field for ``authoredOn``."
)
category: fhirtypes.CodeableConceptType = Field(
None,
alias="category",
title="The kind of supply (central, non-stock, etc.)",
description=(
"Category of supply, e.g. central, non-stock, etc. This is used to "
"support work flows associated with the supply process."
),
# if property is element of this resource.
element_property=True,
)
deliverFrom: fhirtypes.ReferenceType = Field(
None,
alias="deliverFrom",
title="The origin of the supply",
description="Where the supply is expected to come from.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization", "Location"],
)
deliverTo: fhirtypes.ReferenceType = Field(
None,
alias="deliverTo",
title="The destination of the supply",
description="Where the supply is destined to go.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization", "Location", "Patient"],
)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Unique identifier",
description="Unique identifier for this supply request.",
# if property is element of this resource.
element_property=True,
)
occurrenceDateTime: fhirtypes.DateTime = Field(
None,
alias="occurrenceDateTime",
title="When the request should be fulfilled",
description=None,
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e occurrence[x]
one_of_many="occurrence",
one_of_many_required=False,
)
occurrenceDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_occurrenceDateTime",
title="Extension field for ``occurrenceDateTime``.",
)
occurrencePeriod: fhirtypes.PeriodType = Field(
None,
alias="occurrencePeriod",
title="When the request should be fulfilled",
description=None,
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e occurrence[x]
one_of_many="occurrence",
one_of_many_required=False,
)
occurrenceTiming: fhirtypes.TimingType = Field(
None,
alias="occurrenceTiming",
title="When the request should be fulfilled",
description=None,
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e occurrence[x]
one_of_many="occurrence",
one_of_many_required=False,
)
orderedItem: fhirtypes.SupplyRequestOrderedItemType = Field(
None,
alias="orderedItem",
title="The item being requested",
description=None,
# if property is element of this resource.
element_property=True,
)
priority: fhirtypes.Code = Field(
None,
alias="priority",
title="routine | urgent | asap | stat",
description=(
"Indicates how quickly this SupplyRequest should be addressed with "
"respect to other requests."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["routine", "urgent", "asap", "stat"],
)
priority__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_priority", title="Extension field for ``priority``."
)
reasonCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="reasonCodeableConcept",
title="Why the supply item was requested",
description=None,
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e reason[x]
one_of_many="reason",
one_of_many_required=False,
)
reasonReference: fhirtypes.ReferenceType = Field(
None,
alias="reasonReference",
title="Why the supply item was requested",
description=None,
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e reason[x]
one_of_many="reason",
one_of_many_required=False,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
requester: fhirtypes.SupplyRequestRequesterType = Field(
None,
alias="requester",
title="Who/what is requesting service",
description=(
"The individual who initiated the request and has responsibility for "
"its activation."
),
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | active | suspended +",
description="Status of the supply request.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "suspended", "+"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
supplier: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="supplier",
title="Who is intended to fulfill the request",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``SupplyRequest`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"identifier",
"status",
"category",
"priority",
"orderedItem",
"occurrenceDateTime",
"occurrencePeriod",
"occurrenceTiming",
"authoredOn",
"requester",
"supplier",
"reasonCodeableConcept",
"reasonReference",
"deliverFrom",
"deliverTo",
]
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1597(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"occurrence": [
"occurrenceDateTime",
"occurrencePeriod",
"occurrenceTiming",
],
"reason": ["reasonCodeableConcept", "reasonReference"],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class SupplyRequestOrderedItem(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The item being requested.
"""
resource_type = Field("SupplyRequestOrderedItem", const=True)
itemCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="itemCodeableConcept",
title="Medication, Substance, or Device requested to be supplied",
description=(
"The item that is requested to be supplied. This is either a link to a "
"resource representing the details of the item or a code that "
"identifies the item from a known list."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e item[x]
one_of_many="item",
one_of_many_required=False,
)
itemReference: fhirtypes.ReferenceType = Field(
None,
alias="itemReference",
title="Medication, Substance, or Device requested to be supplied",
description=(
"The item that is requested to be supplied. This is either a link to a "
"resource representing the details of the item or a code that "
"identifies the item from a known list."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e item[x]
one_of_many="item",
one_of_many_required=False,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Medication", "Substance", "Device"],
)
quantity: fhirtypes.QuantityType = Field(
...,
alias="quantity",
title="The requested amount of the item indicated",
description="The amount that is being ordered of the indicated item.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``SupplyRequestOrderedItem`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"quantity",
"itemCodeableConcept",
"itemReference",
"itemReference",
"itemReference",
]
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_2698(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"item": ["itemCodeableConcept", "itemReference"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class SupplyRequestRequester(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Who/what is requesting service.
The individual who initiated the request and has responsibility for its
activation.
"""
resource_type = Field("SupplyRequestRequester", const=True)
agent: fhirtypes.ReferenceType = Field(
...,
alias="agent",
title="Individual making the request",
description="The device, practitioner, etc. who initiated the request.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Practitioner",
"Organization",
"Patient",
"RelatedPerson",
"Device",
],
)
onBehalfOf: fhirtypes.ReferenceType = Field(
None,
alias="onBehalfOf",
title="Organization agent is acting for",
description="The organization the device or practitioner was acting on behalf of.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization"],
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``SupplyRequestRequester`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "agent", "onBehalfOf"]
| 36.418708
| 91
| 0.614359
|
df9098d8f9504281b24fc5b32d8afa091d76f904
| 997
|
py
|
Python
|
ex24.py
|
Ma-Min-Min/python-exercises
|
1b0d63456d88b4750f89821782812becf4177375
|
[
"MIT"
] | null | null | null |
ex24.py
|
Ma-Min-Min/python-exercises
|
1b0d63456d88b4750f89821782812becf4177375
|
[
"MIT"
] | null | null | null |
ex24.py
|
Ma-Min-Min/python-exercises
|
1b0d63456d88b4750f89821782812becf4177375
|
[
"MIT"
] | null | null | null |
print ("Let's practice everything.")
print ('You\'d need to know \'bout escapes with \\ that do:')
print ('\n newlines and \t tabs.')
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print ("-----------------")
print (poem)
print ("-----------------")
five = 10 - 2 + 3 - 6
print (f"This should be five: {five}")
def secret_formula (started) :
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print ("With a starting point of: {}".format (start_point))
print(f"We'd have {beans} beans, {jars} jars, and {crates} crates")
start_point = start_point / 10
print ("We can also do that this way:")
formula = secret_formula(start_point)
print ("We'd have {} beans, {} jars, and {} crates.".format(*formula))
| 25.564103
| 70
| 0.665998
|
c41fc9859bccb42d33b981e3745b45516318516a
| 71
|
py
|
Python
|
api/database/__init__.py
|
masaiborg/fastapi-mysql-docker-template
|
af218847b6cb38e4193cda6805405ccfa6dd4f7c
|
[
"MIT"
] | 2
|
2022-02-15T05:40:55.000Z
|
2022-03-22T22:10:41.000Z
|
api/database/__init__.py
|
masaiborg/fastapi-mysql-docker-template
|
af218847b6cb38e4193cda6805405ccfa6dd4f7c
|
[
"MIT"
] | null | null | null |
api/database/__init__.py
|
masaiborg/fastapi-mysql-docker-template
|
af218847b6cb38e4193cda6805405ccfa6dd4f7c
|
[
"MIT"
] | null | null | null |
# database module
from .query import query_get, query_put, query_update
| 35.5
| 53
| 0.830986
|
dee3571efa3bca3468053368883cce33a186d551
| 7,929
|
py
|
Python
|
docs/conf.py
|
csu-hmc/GaitAnalysisToolKit
|
1757032cb9cfe62a747aaa53fa320a8d8600db58
|
[
"Apache-2.0"
] | 75
|
2015-03-07T19:38:13.000Z
|
2022-03-28T13:59:29.000Z
|
docs/conf.py
|
csu-hmc/GaitAnalysisToolKit
|
1757032cb9cfe62a747aaa53fa320a8d8600db58
|
[
"Apache-2.0"
] | 21
|
2015-01-13T00:51:22.000Z
|
2022-01-26T10:58:28.000Z
|
docs/conf.py
|
csu-hmc/GaitAnalysisToolKit
|
1757032cb9cfe62a747aaa53fa320a8d8600db58
|
[
"Apache-2.0"
] | 25
|
2015-05-25T21:28:08.000Z
|
2022-02-24T07:30:37.000Z
|
# -*- coding: utf-8 -*-
#
# GaitAnalysisToolKit documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 11 12:40:21 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from gaitanalysis import __version__
# This allows the Sphinx docs to build without the required modules.
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.integrate', 'scipy.interpolate',
'matplotlib', 'matplotlib.pyplot', 'matplotlib.ticker',
'tables', 'pandas', 'pyyaml', 'dtk', 'oct2py']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'numpydoc']
# Get rid of warnings:
# http://stackoverflow.com/questions/12206334/sphinx-autosummary-toctree-contains-reference-to-nonexisting-document-warnings
numpydoc_show_class_members = False
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GaitAnalysisToolKit'
copyright = u'2013-2021, Jason K. Moore'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GaitAnalysisToolKitdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GaitAnalysisToolKit.tex', u'GaitAnalysisToolKit Documentation',
u'Jason K. Moore', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gaitanalysistoolkit', u'GaitAnalysisToolKit Documentation',
[u'Jason K. Moore'], 1)
]
| 33.0375
| 124
| 0.720393
|
db5d04173a045320c576971e2a7ef9f36742fbe2
| 788
|
py
|
Python
|
src/phonebot/core/common/path.py
|
vi-robotics/pyphonebot-core
|
9b95dff2f2b3a7465ce10fb6562be82c6f9b4244
|
[
"MIT"
] | null | null | null |
src/phonebot/core/common/path.py
|
vi-robotics/pyphonebot-core
|
9b95dff2f2b3a7465ce10fb6562be82c6f9b4244
|
[
"MIT"
] | null | null | null |
src/phonebot/core/common/path.py
|
vi-robotics/pyphonebot-core
|
9b95dff2f2b3a7465ce10fb6562be82c6f9b4244
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
from os.path import abspath
from typing import AnyStr
class PhonebotPath():
"""Path configuration contains absolute paths to Phonebot directories.
"""
@staticmethod
def root() -> AnyStr @ abspath:
"""The root directory.
Returns:
str: The absolute path to the phonebot source directory.
"""
common = os.path.dirname(__file__)
return os.path.abspath(os.path.dirname(os.path.join(common, '../', '../')))
@staticmethod
def assets() -> AnyStr @ abspath:
"""The assets directory, which might not necessarily exist.
Returns:
str: The absolute path to the phonebot assets directory.
"""
return os.path.join(PhonebotPath.root(), 'assets')
| 28.142857
| 83
| 0.625635
|
807d7284422443396a86e2ea0fdb08749529d3f9
| 2,858
|
py
|
Python
|
examples/adspygoogle/dfa/v1_20/upload_in_stream_asset.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | null | null | null |
examples/adspygoogle/dfa/v1_20/upload_in_stream_asset.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | null | null | null |
examples/adspygoogle/dfa/v1_20/upload_in_stream_asset.py
|
cherry-wb/googleads-python-lib
|
24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04
|
[
"Apache-2.0"
] | 2
|
2020-04-02T19:00:31.000Z
|
2020-08-06T03:28:38.000Z
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example uploads an In-Stream video asset into an existing In-Stream
video creative. To create an In-Stream video creative, run
create_in_stream_video_creative.py.
This example creates a media file in the target creative because the
'mediaFile' flag on the InStreamAssetUploadRequest was set to 'true'. You can
use the same workflow to upload companion ads or non-linear ads to your creative
by setting the 'companion' or 'nonLinear' flags instead, respectively. Only one
flag may be set per upload request.
Tags: creative.uploadInStreamAsset
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import base64
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
from adspygoogle.common import Utils
# Set the parameters for the In-Stream video asset.
ASSET_NAME = 'INSERT_ASSET_NAME_HERE'
PATH_TO_FILE = 'INSERT_PATH_TO_FILE_HERE'
IN_STREAM_VIDEO_CREATIVE_ID = 'INSERT_IN_STREAM_VIDEO_CREATIVE_ID_HERE'
def main(client, asset_name, path_to_file, in_stream_video_creative_id):
# Initialize appropriate service.
creative_service = client.GetCreativeService(
'https://advertisersapitest.doubleclick.net', 'v1.20')
# Convert file into format that can be sent in SOAP messages.
content = Utils.ReadFile(path_to_file)
content = base64.encodestring(content)
# Create the In-Stream video creative asset.
in_stream_video_asset = {
'name': asset_name,
'content': content,
}
# Create an upload request to make this asset a media file for an existing
# In-Stream creative.
in_stream_asset_upload_request = {
'mediaFile': 'true',
'inStreamAsset': in_stream_video_asset,
'creativeId': in_stream_video_creative_id
}
# Save the media file.
result = creative_service.UploadInStreamAsset(
in_stream_asset_upload_request)[0]
# Display a success message.
print ('Added a media file to In-Stream video creative with ID \'%s\'.'
% result['Id'])
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, ASSET_NAME, PATH_TO_FILE, IN_STREAM_VIDEO_CREATIVE_ID)
| 34.02381
| 80
| 0.749125
|
65fec9814e339ba1288ec6b5542d1191f8f6b57a
| 408
|
py
|
Python
|
mayan/apps/mimetype/apps.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2021-06-17T18:24:25.000Z
|
2021-06-17T18:24:25.000Z
|
mayan/apps/mimetype/apps.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:01:04.000Z
|
2022-01-13T01:47:17.000Z
|
mayan/apps/mimetype/apps.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.apps import MayanAppConfig
from .dependencies import * # NOQA
class MIMETypesApp(MayanAppConfig):
name = 'mayan.apps.mimetype'
has_tests = True
verbose_name = _('MIME types')
def ready(self, *args, **kwargs):
super(MIMETypesApp, self).ready(*args, **kwargs)
| 24
| 56
| 0.730392
|
5d89f8a0635ce7bc07a26ed63a3c7d300222f57a
| 32,292
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20150615/security_rule.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20150615/security_rule.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20150615/security_rule.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['SecurityRuleInitArgs', 'SecurityRule']
@pulumi.input_type
class SecurityRuleInitArgs:
def __init__(__self__, *,
access: pulumi.Input[Union[str, 'SecurityRuleAccess']],
destination_address_prefix: pulumi.Input[str],
direction: pulumi.Input[Union[str, 'SecurityRuleDirection']],
network_security_group_name: pulumi.Input[str],
protocol: pulumi.Input[Union[str, 'SecurityRuleProtocol']],
resource_group_name: pulumi.Input[str],
source_address_prefix: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_port_range: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SecurityRule resource.
:param pulumi.Input[Union[str, 'SecurityRuleAccess']] access: The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
:param pulumi.Input[str] destination_address_prefix: The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[Union[str, 'SecurityRuleDirection']] direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
:param pulumi.Input[str] network_security_group_name: The name of the network security group.
:param pulumi.Input[Union[str, 'SecurityRuleProtocol']] protocol: Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource Identifier.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[int] priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[str] provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] security_rule_name: The name of the security rule.
:param pulumi.Input[str] source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
pulumi.set(__self__, "direction", direction)
pulumi.set(__self__, "network_security_group_name", network_security_group_name)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_port_range is not None:
pulumi.set(__self__, "destination_port_range", destination_port_range)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if security_rule_name is not None:
pulumi.set(__self__, "security_rule_name", security_rule_name)
if source_port_range is not None:
pulumi.set(__self__, "source_port_range", source_port_range)
@property
@pulumi.getter
def access(self) -> pulumi.Input[Union[str, 'SecurityRuleAccess']]:
"""
The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
return pulumi.get(self, "access")
@access.setter
def access(self, value: pulumi.Input[Union[str, 'SecurityRuleAccess']]):
pulumi.set(self, "access", value)
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> pulumi.Input[str]:
"""
The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@destination_address_prefix.setter
def destination_address_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_address_prefix", value)
@property
@pulumi.getter
def direction(self) -> pulumi.Input[Union[str, 'SecurityRuleDirection']]:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
"""
return pulumi.get(self, "direction")
@direction.setter
def direction(self, value: pulumi.Input[Union[str, 'SecurityRuleDirection']]):
pulumi.set(self, "direction", value)
@property
@pulumi.getter(name="networkSecurityGroupName")
def network_security_group_name(self) -> pulumi.Input[str]:
"""
The name of the network security group.
"""
return pulumi.get(self, "network_security_group_name")
@network_security_group_name.setter
def network_security_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "network_security_group_name", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'SecurityRuleProtocol']]:
"""
Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'SecurityRuleProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> pulumi.Input[str]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@source_address_prefix.setter
def source_address_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "source_address_prefix", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[pulumi.Input[str]]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@destination_port_range.setter
def destination_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_port_range", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Identifier.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="securityRuleName")
def security_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the security rule.
"""
return pulumi.get(self, "security_rule_name")
@security_rule_name.setter
def security_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_rule_name", value)
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[pulumi.Input[str]]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@source_port_range.setter
def source_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_port_range", value)
class SecurityRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'SecurityRuleAccess']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityRuleDirection']]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityRuleProtocol']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Network security rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'SecurityRuleAccess']] access: The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_address_prefix: The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[str] destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Union[str, 'SecurityRuleDirection']] direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource Identifier.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] network_security_group_name: The name of the network security group.
:param pulumi.Input[int] priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[Union[str, 'SecurityRuleProtocol']] protocol: Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
:param pulumi.Input[str] provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_rule_name: The name of the security rule.
:param pulumi.Input[str] source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[str] source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityRuleInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Network security rule.
:param str resource_name: The name of the resource.
:param SecurityRuleInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityRuleInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'SecurityRuleAccess']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityRuleDirection']]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityRuleProtocol']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityRuleInitArgs.__new__(SecurityRuleInitArgs)
if access is None and not opts.urn:
raise TypeError("Missing required property 'access'")
__props__.__dict__["access"] = access
__props__.__dict__["description"] = description
if destination_address_prefix is None and not opts.urn:
raise TypeError("Missing required property 'destination_address_prefix'")
__props__.__dict__["destination_address_prefix"] = destination_address_prefix
__props__.__dict__["destination_port_range"] = destination_port_range
if direction is None and not opts.urn:
raise TypeError("Missing required property 'direction'")
__props__.__dict__["direction"] = direction
__props__.__dict__["etag"] = etag
__props__.__dict__["id"] = id
__props__.__dict__["name"] = name
if network_security_group_name is None and not opts.urn:
raise TypeError("Missing required property 'network_security_group_name'")
__props__.__dict__["network_security_group_name"] = network_security_group_name
__props__.__dict__["priority"] = priority
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["security_rule_name"] = security_rule_name
if source_address_prefix is None and not opts.urn:
raise TypeError("Missing required property 'source_address_prefix'")
__props__.__dict__["source_address_prefix"] = source_address_prefix
__props__.__dict__["source_port_range"] = source_port_range
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20150615:SecurityRule"), pulumi.Alias(type_="azure-native:network:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20150501preview:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20160330:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160330:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20160601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20160901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160901:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20161201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20161201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170301:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170901:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20171001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171001:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20171101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180401:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180701:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20181001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181001:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20181101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20181201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190401:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190701:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190901:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20191101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20191201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200301:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200401:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200501:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200501:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200701:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20201101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20201101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20210201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20210201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20210301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20210301:SecurityRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityRule, __self__).__init__(
'azure-native:network/v20150615:SecurityRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityRule':
"""
Get an existing SecurityRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SecurityRuleInitArgs.__new__(SecurityRuleInitArgs)
__props__.__dict__["access"] = None
__props__.__dict__["description"] = None
__props__.__dict__["destination_address_prefix"] = None
__props__.__dict__["destination_port_range"] = None
__props__.__dict__["direction"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["priority"] = None
__props__.__dict__["protocol"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["source_address_prefix"] = None
__props__.__dict__["source_port_range"] = None
return SecurityRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def access(self) -> pulumi.Output[str]:
"""
The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> pulumi.Output[str]:
"""
The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[str]:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[int]]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> pulumi.Output[str]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
| 60.813559
| 5,245
| 0.689985
|
30aacfc1f1939accbdee0e28ec46d9abf2b01918
| 6,304
|
py
|
Python
|
pybelt.py
|
hermanj13/Pybelt
|
050fe8dce8107d9b80dda4bb15dc466104f9ba47
|
[
"MIT"
] | null | null | null |
pybelt.py
|
hermanj13/Pybelt
|
050fe8dce8107d9b80dda4bb15dc466104f9ba47
|
[
"MIT"
] | null | null | null |
pybelt.py
|
hermanj13/Pybelt
|
050fe8dce8107d9b80dda4bb15dc466104f9ba47
|
[
"MIT"
] | null | null | null |
import argparse
import random
import sys
import getpass
# Pointers
from lib.pointers import run_proxy_finder
from lib.pointers import run_xss_scan
from lib.pointers import run_sqli_scan
from lib.pointers import run_dork_checker
from lib.pointers import run_hash_cracker
from lib.pointers import run_hash_verification
from lib.pointers import run_port_scan
# Shell
from lib.shell import pybelt_shell
# Settings
from lib.core.settings import LOGGER
from lib.core.settings import VERSION_STRING
from lib.core.settings import WORDLIST_LINKS
from lib.core.settings import create_wordlist
from lib.core.settings import hide_banner
from lib.core.settings import integrity_check
from lib.core.settings import update_pybelt
if __name__ == '__main__':
opts = argparse.ArgumentParser()
opts.add_argument('-d', '--dork-check', metavar='DORK', dest="dorkcheck",
help="Provide a Google dork to check for possible injectable sites")
opts.add_argument('-c', '--hash-crack', metavar="HASH", dest="hash", nargs=1,
help="Specify a hash to crack and a hash type, IE: -c <HASH>:md5 (default all)")
opts.add_argument('-p', '--port-scan', metavar="HOST", dest="portscan",
help="Provide a host to scan for open ports")
opts.add_argument('-s', '--sqli-scanner', metavar="URL", dest="sqliscan",
help="Provide a URL to scan for SQL injection flaws")
opts.add_argument("-v", '--verify-hash', metavar="HASH", dest="hashcheck",
help="Verify a given hash type. (MD5, WHIRLPOOL, SHA256, etc..)")
opts.add_argument("-f", "--find-proxies", action="store_true", dest="proxysearch",
help="Attempt to find some proxies automatically")
opts.add_argument('-x', '--xss', metavar="URL", dest="xssScan",
help="Check if a URL is vulnerable to XSS")
opts.add_argument('--sql-list', metavar="FILE", dest="sqliList",
help="Pass a file path with URLS to scan for SQLi vulnerabilities")
opts.add_argument('--xss-list', metavar="FILE", dest="xssList",
help="Pass a file path with URLS to scan for XSS vulnerabilities")
opts.add_argument("--proxy", metavar="PROXY", dest="configProxy",
help="Configure the program to use a proxy when connecting")
opts.add_argument('--banner', action="store_true", dest="banner",
help="Hide the banner")
opts.add_argument('-l', '--legal', action="store_true", dest="legal",
help="Display the legal information")
opts.add_argument('--version', action="store_true", dest="version",
help="Show the version number and exit")
opts.add_argument('--update', action="store_true", dest="update",
help="Update the program to the latest version")
opts.add_argument('--rand-wordlist', action="store_true", dest="random_wordlist",
help="Create a random wordlist to use for dictionary attacks"),
opts.add_argument('--rand-agent', action="store_true", dest="randomUserAgent",
help="Use a random user agent from a file list")
opts.add_argument('--anon', metavar="ANON", dest="anonLvl",
help=argparse.SUPPRESS)
opts.add_argument('--hash-list', metavar="FILE", dest="hashList",
help=argparse.SUPPRESS)
opts.add_argument('--dork-list', metavar="FILE", dest="dorkList",
help=argparse.SUPPRESS)
opts.add_argument('--tamper', metavar="SCRIPT", dest="tamper",
help=argparse.SUPPRESS)
args = opts.parse_args()
hide_banner(hide=True if args.banner else False,
legal=True if args.legal else False) if args.version is False else hide_banner(hide=True)
LOGGER.info("Checking program integrity..")
integrity_check()
try:
if len(sys.argv) == 1: # If you failed to provide an argument
prompt = pybelt_shell.PybeltConsole() # Launch the shell
prompt.prompt = "{}@pybelt > ".format(getpass.getuser())
info_message = "You have failed to provide a flag so you have been "
info_message += "redirected to the Pybelt Console. For available "
info_message += "flags type: 'run -hh', to see help type: 'help' "
info_message += "to exit the console type: 'quit'"
try:
prompt.cmdloop(LOGGER.info(info_message))
except TypeError as e:
LOGGER.info("Terminating session...")
exit(0)
if args.update is True: # Update the program
update_pybelt()
if args.version is True: # Show the version number and exit
hide_banner(hide=True)
LOGGER.info(VERSION_STRING)
sys.exit(0)
if args.random_wordlist is True: # Create a random wordlist
LOGGER.info("Creating a random wordlist..")
create_wordlist(random.choice(WORDLIST_LINKS))
LOGGER.info("Wordlist created, resuming process..")
if args.proxysearch is True: # Find some proxies
run_proxy_finder()
if args.hashcheck is not None: # Check what hash type you have
run_hash_verification(args.hashcheck)
if args.sqliscan is not None: # SQLi scanning
run_sqli_scan(args.sqliscan)
if args.sqliList is not None: # SQLi file scanning
run_sqli_scan(None, url_file=args.sqliList)
if args.dorkcheck is not None: # Dork checker, check if your dork isn't shit
run_dork_checker(args.dorkcheck)
if args.hash is not None: # Try and crack a hash
run_hash_cracker(args.hash)
if args.portscan is not None: # Scan a given host for open ports
run_port_scan(args.portscan)
if args.xssScan is not None: # Scan a URL for XSS vulnerabilities
run_xss_scan(args.xssScan, args.configProxy, args.randomUserAgent)
if args.xssList is not None: # Run a through a file list for XSS vulns
run_xss_scan(None, url_file=args.xssList)
except KeyboardInterrupt: # Why you abort me?! :c
LOGGER.error("User aborted.")
| 46.352941
| 105
| 0.637214
|
5e7b5f60b9cd22a6ce1fdbd6d5081462be1a2408
| 1,886
|
py
|
Python
|
src/serenity/data/batch/load_sharadar_institutional_holdings.py
|
dunetz/serenity
|
b53adb8cb76b176665a517972982cb90367b810f
|
[
"Apache-2.0"
] | null | null | null |
src/serenity/data/batch/load_sharadar_institutional_holdings.py
|
dunetz/serenity
|
b53adb8cb76b176665a517972982cb90367b810f
|
[
"Apache-2.0"
] | null | null | null |
src/serenity/data/batch/load_sharadar_institutional_holdings.py
|
dunetz/serenity
|
b53adb8cb76b176665a517972982cb90367b810f
|
[
"Apache-2.0"
] | 1
|
2021-09-23T10:47:35.000Z
|
2021-09-23T10:47:35.000Z
|
from serenity.data.batch.load_sharadar_tickers import LoadSharadarTickersTask
from serenity.data.batch.utils import LoadSharadarTableTask, ExportQuandlTableTask
from serenity.data.sharadar_api import clean_nulls
from serenity.data.sharadar_holdings import InstitutionalInvestor, SecurityType, InstitutionalHoldings
from serenity.data.sharadar_refdata import Ticker
class LoadInstitutionalHoldingsTask(LoadSharadarTableTask):
def requires(self):
return [
LoadSharadarTickersTask(start_date=self.start_date, end_date=self.end_date),
ExportQuandlTableTask(table_name=self.get_workflow_name(), date_column='calendardate',
start_date=self.start_date, end_date=self.end_date)
]
def process_row(self, index, row):
ticker_code = row['ticker']
ticker = Ticker.find_by_ticker(self.session, ticker_code)
investor_name = row['investorname']
investor = InstitutionalInvestor.get_or_create(self.session, investor_name)
security_type_code = row['securitytype']
security_type = SecurityType.get_or_create(self.session, security_type_code)
calendar_date = row['calendardate']
value = row['value']
units = row['units']
price = clean_nulls(row['price'])
holdings = InstitutionalHoldings.find(self.session, ticker_code, investor, security_type, calendar_date)
if holdings is None:
holdings = InstitutionalHoldings(ticker=ticker, investor=investor, security_type=security_type,
calendar_date=calendar_date, value=value, units=units, price=price)
else:
holdings.value = value
holdings.units = units
holdings.price = price
self.session.add(holdings)
def get_workflow_name(self):
return 'SHARADAR/SF3'
| 42.863636
| 112
| 0.699894
|
d9b7f2326ebee76f1dee39307a828d211df9aab7
| 5,851
|
py
|
Python
|
test/test_phonology.py
|
defseg/PyLaut
|
e0d84d49189aceffdfaf8ccd94f16f4797dbbd80
|
[
"MIT"
] | 4
|
2015-01-05T19:28:16.000Z
|
2019-09-01T05:02:15.000Z
|
test/test_phonology.py
|
defseg/PyLaut
|
e0d84d49189aceffdfaf8ccd94f16f4797dbbd80
|
[
"MIT"
] | 153
|
2016-07-20T18:38:37.000Z
|
2021-03-25T22:29:47.000Z
|
test/test_phonology.py
|
defseg/PyLaut
|
e0d84d49189aceffdfaf8ccd94f16f4797dbbd80
|
[
"MIT"
] | 5
|
2015-01-05T19:28:21.000Z
|
2019-04-22T16:40:19.000Z
|
"""
Test module for phonology.py
"""
import pytest
from pylaut.language.phonology import phonology
@pytest.fixture
def vowels():
return {"aː", "iː", "uː", "a", "i", "u", "ə"}
@pytest.fixture
def consonants():
return {"p", "t", "k", "s", "x", "r", "l", "w"}
@pytest.fixture
def sample_phonology(vowels, consonants):
return phonology.Phonology(vowels | consonants)
@pytest.fixture
def sample_phonology_with_subsystems(sample_phonology):
sample_phonology.define_vowel_subsystem('long', autoadd=True)
return sample_phonology
@pytest.fixture
def long_vowels():
return {"aː", "iː", "uː"}
@pytest.fixture
def phoneme():
return phonology.Phoneme("a")
def test_vowel_subsystems_keys(sample_phonology):
sample_phonology.define_vowel_subsystem("long", autoadd=True)
assert '+long' in sample_phonology.vowel_subsystems
assert '-long' in sample_phonology.vowel_subsystems
def test_vowel_subsystems_contents(sample_phonology, long_vowels):
sample_phonology.define_vowel_subsystem("long", autoadd=True)
assert {ph.symbol
for ph in sample_phonology.vowel_subsystems['+long']
} == long_vowels
def test_json(sample_phonology):
json = sample_phonology.to_json()
new_phonology = phonology.Phonology()
new_phonology.from_json(json)
assert ({ph.symbol
for ph in new_phonology.phonemes} == {
ph.symbol
for ph in sample_phonology.phonemes
})
def test_add_phoneme(sample_phonology, phoneme):
sample_phonology.add_phoneme(phoneme)
assert phoneme in sample_phonology.phonemes
def test_add_phoneme_errors(sample_phonology):
with pytest.raises(TypeError):
sample_phonology.add_phoneme("a")
def test_get_vowels(sample_phonology):
vwls = {ph.symbol for ph in sample_phonology.get_vowels()}
assert vwls == {"aː", "iː", "uː", "a", "i", "u", "ə"}
def test_get_consonants(sample_phonology):
vwls = {ph.symbol for ph in sample_phonology.get_consonants()}
assert vwls == {"p", "t", "k", "s", "x", "r", "l", "w"}
def test_get_phoneme(sample_phonology):
ph = sample_phonology.get_phoneme("a")
assert ph
def test_get_phoneme_errors(sample_phonology):
with pytest.raises(Exception):
sample_phonology.get_phoneme("y")
def test_get_phonemes_with_feature(sample_phonology, consonants):
cps = sample_phonology.get_phonemes_with_feature('consonantal', '+')
cs = {ph.symbol for ph in cps}
assert cs == consonants
def test_get_phonemes_with_features(sample_phonology, long_vowels):
lvp = sample_phonology.get_phonemes_with_features({
'consonantal': '-',
'long': '+'
})
lv = {ph.symbol for ph in lvp}
assert lv == long_vowels
def test_get_phoneme_dictionary_keys(sample_phonology, vowels, consonants):
pdict = sample_phonology.get_phoneme_dictionary()
assert set(pdict.keys()) == vowels | consonants
def test_get_phoneme_dictionary_types(sample_phonology, vowels, consonants):
pdict = sample_phonology.get_phoneme_dictionary()
assert all(isinstance(k, str) for k in pdict.keys())
assert all(isinstance(v, phonology.Phoneme) for v in pdict.values())
def test_set_phoneme_frequency_from_list_errors(sample_phonology):
with pytest.raises(Exception):
sample_phonology.set_phoneme_frequency_from_list('nuculus', [])
def test_set_phoneme_frequency_from_list(sample_phonology, vowels):
plist = [phonology.Phoneme(p) for p in vowels]
sample_phonology.set_phoneme_frequency_from_list('nucleus', plist)
pf = sample_phonology.nucleus_frequencies
for phon, freq in pf.items():
assert freq == 1 / len(vowels)
def test_get_total_phoneme_frequency(sample_phonology, vowels):
plist = [phonology.Phoneme(p) for p in vowels]
sample_phonology.set_phoneme_frequency_from_list('onset', plist)
sample_phonology.set_phoneme_frequency_from_list('nucleus', plist)
sample_phonology.set_phoneme_frequency_from_list('coda', plist)
tf = sample_phonology.get_phoneme_frequency_total(
sample_phonology.get_phoneme('a'))
assert tf == 1 / len(vowels)
def test_get_total_phoneme_frequency_error(sample_phonology):
with pytest.raises(TypeError):
sample_phonology.get_phoneme_frequency_total("a")
def test_assign_vowel_to_subsystem_not_phoneme(
sample_phonology_with_subsystems):
with pytest.raises(Exception):
sample_phonology_with_subsystems.assign_vowel_to_subsystem(
'a', 'long', '+')
def test_assign_vowel_to_subsystem_not_vowel(sample_phonology_with_subsystems):
with pytest.raises(Exception):
sample_phonology_with_subsystems.assign_vowel_to_subsystem(
sample_phonology_with_subsystems.get_phoneme('p'), 'long', '+')
def test_assign_vowel_to_subsystem_invalid_subsystem(
sample_phonology_with_subsystems):
with pytest.raises(Exception):
sample_phonology_with_subsystems.assign_vowel_to_subsystem(
sample_phonology_with_subsystems.get_phoneme('a'), 'fruity', '+')
def test_assign_vowel_to_subsystem(sample_phonology_with_subsystems):
sample_phonology_with_subsystems.assign_vowel_to_subsystem(
sample_phonology_with_subsystems.get_phoneme('a'), 'long', '-')
def test_get_vowels_in_subsystem(sample_phonology_with_subsystems):
longs = sample_phonology_with_subsystems.get_vowels_in_subsystem(
'long', '+')
assert isinstance(longs, set)
def test_get_vowel_subsystems(sample_phonology_with_subsystems):
subs = sample_phonology_with_subsystems.get_vowel_subsystems()
assert subs == ['long']
def test_count_vowels(sample_phonology_with_subsystems):
cmp_dict = {'total': 7, 'long': {'+': 3, '-': 4}}
count_dict = sample_phonology_with_subsystems.count_vowels()
assert count_dict == cmp_dict
| 31.12234
| 79
| 0.735772
|
414072e3fc9aa9894f5b0a73cb0be246a1b7e96e
| 586
|
py
|
Python
|
my_env/Lib/site-packages/sklearn/linear_model/bayes.py
|
obulrdy6881/Drowsinss
|
61cb9281d7dd22aee282b517e2fbf500f0ff9935
|
[
"MIT"
] | 2
|
2021-05-02T07:59:56.000Z
|
2021-12-14T19:53:13.000Z
|
Web application/env/Lib/site-packages/sklearn/linear_model/bayes.py
|
arpit0891/Covid-19-and-Pneumonia-detection-from-X-Ray
|
6b2756e4672ab25083a0a50f44f36bec1833e789
|
[
"MIT"
] | 7
|
2021-06-08T21:46:24.000Z
|
2022-03-12T00:35:31.000Z
|
my_env/Lib/site-packages/sklearn/linear_model/bayes.py
|
obulrdy6881/Drowsinss
|
61cb9281d7dd22aee282b517e2fbf500f0ff9935
|
[
"MIT"
] | 1
|
2021-05-02T07:59:59.000Z
|
2021-05-02T07:59:59.000Z
|
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
# mypy error: Module X has no attribute y (typically for C extensions)
from . import _bayes # type: ignore
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.linear_model.bayes'
correct_import_path = 'sklearn.linear_model'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_bayes, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| 30.842105
| 71
| 0.774744
|
4adcdd0112b264ba0d8cd22d3b1e20956399f7fb
| 960
|
py
|
Python
|
pandas/tests/indexes/multi/test_astype.py
|
LauraCollard/pandas
|
b1c3a9031569334cafc4e8d45d35408421f7dea4
|
[
"BSD-3-Clause"
] | 5
|
2019-07-26T15:22:41.000Z
|
2021-09-28T09:22:17.000Z
|
pandas/tests/indexes/multi/test_astype.py
|
LauraCollard/pandas
|
b1c3a9031569334cafc4e8d45d35408421f7dea4
|
[
"BSD-3-Clause"
] | 16
|
2021-03-19T09:44:52.000Z
|
2022-03-12T00:22:14.000Z
|
pandas/tests/indexes/multi/test_astype.py
|
LauraCollard/pandas
|
b1c3a9031569334cafc4e8d45d35408421f7dea4
|
[
"BSD-3-Clause"
] | 9
|
2020-02-05T10:24:12.000Z
|
2020-02-10T13:08:50.000Z
|
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.util.testing import assert_copy
def test_astype(idx):
expected = idx.copy()
actual = idx.astype("O")
assert_copy(actual.levels, expected.levels)
assert_copy(actual.codes, expected.codes)
assert [level.name for level in actual.levels] == list(expected.names)
with pytest.raises(TypeError, match="^Setting.*dtype.*object"):
idx.astype(np.dtype(int))
@pytest.mark.parametrize("ordered", [True, False])
def test_astype_category(idx, ordered):
# GH 18630
msg = "> 1 ndim Categorical are not supported at this time"
with pytest.raises(NotImplementedError, match=msg):
idx.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with pytest.raises(NotImplementedError, match=msg):
idx.astype("category")
| 30.967742
| 74
| 0.710417
|
873cb87fd2245d18113c3ddd753c6e4a13104b7d
| 22,701
|
py
|
Python
|
indicators/models.py
|
AkshJain99/Activity-CE
|
cbd1aa94a115a5bdba5e69fa3030335fa58c695b
|
[
"Apache-2.0"
] | 1
|
2021-07-07T07:31:53.000Z
|
2021-07-07T07:31:53.000Z
|
indicators/models.py
|
AkshJain99/Activity-CE
|
cbd1aa94a115a5bdba5e69fa3030335fa58c695b
|
[
"Apache-2.0"
] | null | null | null |
indicators/models.py
|
AkshJain99/Activity-CE
|
cbd1aa94a115a5bdba5e69fa3030335fa58c695b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from django.utils import timezone
import uuid
from simple_history.models import HistoricalRecords
from decimal import Decimal
from datetime import datetime, timedelta
from workflow.models import (
Program, Sector, SiteProfile, ProjectAgreement, ProjectComplete,
Country, Documentation, ActivityUser)
class ActivityTable(models.Model):
name = models.CharField(max_length=255, blank=True)
table_id = models.IntegerField(blank=True, null=True)
owner = models.ForeignKey(ActivityUser, on_delete=models.CASCADE)
remote_owner = models.CharField(max_length=255, blank=True)
country = models.ManyToManyField(Country, blank=True)
url = models.CharField(max_length=255, blank=True)
unique_count = models.IntegerField(blank=True, null=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.name
class ActivityTableAdmin(admin.ModelAdmin):
list_display = ('name', 'country', 'owner',
'url', 'create_date', 'edit_date')
search_fields = ('country', 'name')
list_filter = ('country__country',)
display = 'Activity Table'
class IndicatorType(models.Model):
indicator_type = models.CharField(max_length=135, blank=True)
description = models.TextField(max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.indicator_type
class IndicatorTypeAdmin(admin.ModelAdmin):
list_display = ('indicator_type', 'description',
'create_date', 'edit_date')
display = 'Indicator Type'
class StrategicObjective(models.Model):
name = models.CharField(max_length=135, blank=True)
country = models.ForeignKey(
Country, null=True, blank=True, on_delete=models.SET_NULL)
description = models.TextField(max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ('country', 'name')
def __str__(self):
return self.name
def save(self):
if self.create_date is None:
self.create_date = datetime.now()
super(StrategicObjective, self).save()
class StrategicObjectiveAdmin(admin.ModelAdmin):
list_display = ('country', 'name')
search_fields = ('country__country', 'name')
list_filter = ('country__country',)
display = 'Strategic Objectives'
class Objective(models.Model):
name = models.CharField(max_length=135, blank=True)
program = models.ForeignKey(
Program, null=True, blank=True, on_delete=models.SET_NULL)
description = models.TextField(max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ('program', 'name')
def __str__(self):
return self.name
def save(self):
if self.create_date is None:
self.create_date = datetime.now()
super(Objective, self).save()
class ObjectiveAdmin(admin.ModelAdmin):
list_display = ('program', 'name')
search_fields = ('name', 'program__name')
list_filter = ('program__country__country',)
display = 'Objectives'
class Level(models.Model):
name = models.CharField(max_length=135, blank=True)
description = models.TextField(max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.name
def save(self):
if self.create_date is None:
self.create_date = datetime.now()
super(Level, self).save()
class LevelAdmin(admin.ModelAdmin):
list_display = 'name'
display = 'Levels'
class DisaggregationType(models.Model):
disaggregation_type = models.CharField(max_length=135, blank=True)
description = models.CharField(max_length=765, blank=True)
country = models.ForeignKey(
Country, null=True, blank=True, on_delete=models.SET_NULL)
standard = models.BooleanField(
default=False, verbose_name="Standard (Activity Admins Only)")
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.disaggregation_type
class DisaggregationTypeAdmin(admin.ModelAdmin):
list_display = ('disaggregation_type', 'country',
'standard', 'description')
list_filter = ('country', 'standard', 'disaggregation_type')
display = 'Disaggregation Type'
class DisaggregationLabel(models.Model):
disaggregation_type = models.ForeignKey(
DisaggregationType, on_delete=models.CASCADE)
label = models.CharField(max_length=765, blank=True)
customsort = models.IntegerField(blank=True, null=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.label
class DisaggregationLabelAdmin(admin.ModelAdmin):
list_display = ('disaggregation_type', 'customsort', 'label',)
display = 'Disaggregation Label'
list_filter = ('disaggregation_type__disaggregation_type',)
class DisaggregationValue(models.Model):
disaggregation_label = models.ForeignKey(
DisaggregationLabel, on_delete=models.CASCADE)
value = models.CharField(max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.value
class DisaggregationValueAdmin(admin.ModelAdmin):
list_display = ('disaggregation_label', 'value',
'create_date', 'edit_date')
list_filter = (
'disaggregation_label__disaggregation_type__disaggregation_type', 'disaggregation_label')
display = 'Disaggregation Value'
class ReportingFrequency(models.Model):
frequency = models.CharField(max_length=135, blank=True)
description = models.CharField(max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.frequency
class DataCollectionFrequency(models.Model):
frequency = models.CharField(max_length=135, blank=True, null=True)
description = models.CharField(max_length=255, blank=True, null=True)
numdays = models.PositiveIntegerField(
default=0, verbose_name="Frequency in number of days")
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.frequency
class DataCollectionFrequencyAdmin(admin.ModelAdmin):
list_display = ('frequency', 'description', 'create_date', 'edit_date')
display = 'Data Collection Frequency'
class ReportingPeriod(models.Model):
frequency = models.ForeignKey(
ReportingFrequency, null=True, on_delete=models.SET_NULL)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.frequency
class ReportingPeriodAdmin(admin.ModelAdmin):
list_display = ('frequency', 'create_date', 'edit_date')
display = 'Reporting Frequency'
class ExternalService(models.Model):
name = models.CharField(max_length=255, blank=True)
url = models.CharField(max_length=765, blank=True)
feed_url = models.CharField(max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.name
class ExternalServiceAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'feed_url', 'create_date', 'edit_date')
display = 'External Indicator Data Service'
class ExternalServiceRecord(models.Model):
external_service = models.ForeignKey(
ExternalService, blank=True, null=True, on_delete=models.SET_NULL)
full_url = models.CharField(max_length=765, blank=True)
record_id = models.CharField("Unique ID", max_length=765, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.full_url
class ExternalServiceRecordAdmin(admin.ModelAdmin):
list_display = ('external_service', 'full_url',
'record_id', 'create_date', 'edit_date')
display = 'External Indicator Data Service'
class IndicatorManager(models.Manager):
def get_queryset(self):
return super(IndicatorManager, self).get_queryset().prefetch_related('program').select_related('sector')
class Indicator(models.Model):
LOP = 1
MID_END = 2
ANNUAL = 3
SEMI_ANNUAL = 4
TRI_ANNUAL = 5
QUARTERLY = 6
MONTHLY = 7
EVENT = 8
TARGET_FREQUENCIES = (
(LOP, 'Life of Program (LoP) only'),
(MID_END, 'Midline and endline'),
(ANNUAL, 'Annual'),
(SEMI_ANNUAL, 'Semi-annual'),
(TRI_ANNUAL, 'Tri-annual'),
(QUARTERLY, 'Quarterly'),
(MONTHLY, 'Monthly'),
(EVENT, 'Event')
)
indicator_key = models.UUIDField(
default=uuid.uuid4, unique=True, help_text=" "),
indicator_type = models.ManyToManyField(
IndicatorType, blank=True, help_text=" ")
level = models.ManyToManyField(Level, blank=True, help_text=" ")
objectives = models.ManyToManyField(
Objective, blank=True, verbose_name="Program Objective", related_name="obj_indicator", help_text=" ")
strategic_objectives = models.ManyToManyField(
StrategicObjective, verbose_name="Country Strategic Objective", blank=True,
related_name="strat_indicator", help_text=" ")
name = models.CharField(verbose_name="Name",
max_length=255, null=False, help_text=" ")
number = models.CharField(
max_length=255, null=True, blank=True, help_text=" ")
source = models.CharField(
max_length=255, null=True, blank=True, help_text=" ")
definition = models.TextField(null=True, blank=True, help_text=" ")
justification = models.TextField(max_length=500, null=True, blank=True,
verbose_name="Rationale or Justification for Indicator", help_text=" ")
unit_of_measure = models.CharField(
max_length=135, null=True, blank=True, verbose_name="Unit of measure*", help_text=" ")
disaggregation = models.ManyToManyField(
DisaggregationType, blank=True, help_text=" ")
baseline = models.CharField(
verbose_name="Baseline*", max_length=255, null=True, blank=True, help_text=" ")
baseline_na = models.BooleanField(
verbose_name="Not applicable", default=False, help_text=" ")
lop_target = models.CharField(verbose_name="Life of Program (LoP) target*",
max_length=255, null=True, blank=True, help_text=" ")
rationale_for_target = models.TextField(
max_length=255, null=True, blank=True, help_text=" ")
target_frequency = models.IntegerField(
blank=False, null=True, choices=TARGET_FREQUENCIES, verbose_name="Target frequency", help_text=" ")
target_frequency_custom = models.CharField(
null=True, blank=True, max_length=100, verbose_name="First event name*", help_text=" ")
target_frequency_start = models.DateField(
blank=True, null=True, auto_now=False, auto_now_add=False,
verbose_name="First target period begins*", help_text=" ")
target_frequency_num_periods = models.IntegerField(
blank=True, null=True, verbose_name="Number of target periods*", help_text=" ")
means_of_verification = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Means of Verification / Data Source", help_text=" ")
data_collection_method = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Data Collection Method", help_text=" ")
data_collection_frequency = models.ForeignKey(
DataCollectionFrequency, null=True, blank=True, verbose_name="Frequency of Data Collection",
help_text=" ", on_delete=models.SET_NULL)
data_points = models.TextField(
max_length=500, null=True, blank=True, verbose_name="Data Points", help_text=" ")
responsible_person = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Responsible Person(s) and Team", help_text=" ")
method_of_analysis = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Method of Analysis", help_text=" ")
information_use = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Information Use", help_text=" ")
reporting_frequency = models.ForeignKey(ReportingFrequency, null=True, blank=True,
verbose_name="Frequency of Reporting",
help_text=" ", on_delete=models.SET_NULL)
quality_assurance = models.TextField(
max_length=500, null=True, blank=True, verbose_name="Quality Assurance Measures", help_text=" ")
data_issues = models.TextField(
max_length=500, null=True, blank=True, verbose_name="Data Issues", help_text=" ")
indicator_changes = models.TextField(
max_length=500, null=True, blank=True, verbose_name="Changes to Indicator", help_text=" ")
comments = models.TextField(
max_length=255, null=True, blank=True, help_text=" ")
program = models.ManyToManyField(Program, help_text=" ")
sector = models.ForeignKey(
Sector, null=True, blank=True, help_text=" ", on_delete=models.SET_NULL)
key_performance_indicator = models.BooleanField(
"Key Performance Indicator for this program?", default=False, help_text=" ")
approved_by = models.ForeignKey(ActivityUser, blank=True, null=True,
related_name="approving_indicator", help_text=" ", on_delete=models.SET_NULL)
approval_submitted_by = models.ForeignKey(
ActivityUser, blank=True, null=True, related_name="indicator_submitted_by",
help_text=" ", on_delete=models.SET_NULL)
external_service_record = models.ForeignKey(
ExternalServiceRecord, verbose_name="External Service ID", blank=True, null=True,
help_text=" ", on_delete=models.SET_NULL)
create_date = models.DateTimeField(null=True, blank=True, help_text=" ")
edit_date = models.DateTimeField(null=True, blank=True, help_text=" ")
history = HistoricalRecords()
notes = models.TextField(max_length=500, null=True, blank=True)
# optimize query for class based views etc.
objects = IndicatorManager()
class Meta:
ordering = ('create_date',)
def save(self, *args, **kwargs):
# onsave add create date or update edit date
if self.create_date is None:
self.create_date = datetime.now()
self.edit_date = datetime.now()
super(Indicator, self).save(*args, **kwargs)
@property
def is_target_frequency_time_aware(self):
return self.target_frequency in (self.ANNUAL, self.SEMI_ANNUAL, self.TRI_ANNUAL, self.QUARTERLY, self.MONTHLY)
@property
def just_created(self):
if self.create_date >= timezone.now() - timedelta(minutes=5):
return True
return False
@property
def name_clean(self):
return self.name.encode('ascii', 'ignore')
@property
def objectives_list(self):
return ', '.join([x.name for x in self.objectives.all()])
@property
def strategicobjectives_list(self):
return ', '.join([x.name for x in self.strategic_objectives.all()])
@property
def programs(self):
return ', '.join([x.name for x in self.program.all()])
@property
def indicator_types(self):
return ', '.join([x.indicator_type for x in self.indicator_type.all()])
@property
def levels(self):
return ', '.join([x.name for x in self.level.all()])
@property
def disaggregations(self):
return ', '.join([x.disaggregation_type for x in self.disaggregation.all()])
@property
def get_target_frequency_label(self):
if self.target_frequency:
return Indicator.TARGET_FREQUENCIES[self.target_frequency-1][1]
return None
def __str__(self):
return self.name
class PeriodicTarget(models.Model):
indicator = models.ForeignKey(
Indicator, null=False, blank=False, on_delete=models.CASCADE)
period = models.CharField(max_length=255, null=True, blank=True)
target = models.DecimalField(
max_digits=20, decimal_places=2, default=Decimal('0.00'))
start_date = models.DateField(
auto_now=False, auto_now_add=False, null=True, blank=True)
end_date = models.DateField(
auto_now=False, auto_now_add=False, null=True, blank=True)
customsort = models.IntegerField(blank=True, null=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)
def __str__(self):
if self.indicator.target_frequency == Indicator.LOP \
or self.indicator.target_frequency == Indicator.EVENT \
or self.indicator.target_frequency == Indicator.MID_END:
return self.period
if self.start_date and self.end_date:
return "%s (%s - %s)" % (self.period, self.start_date.strftime('%b %d, %Y'),
self.end_date.strftime('%b %d, %Y'))
return self.period
class Meta:
ordering = ('customsort', '-create_date')
@property
def start_date_formatted(self):
if self.start_date:
return self.start_date.strftime('%b %d, %Y').replace(" 0", " ")
return self.start_date
@property
def end_date_formatted(self):
if self.end_date:
return self.end_date.strftime('%b %d, %Y').replace(" 0", " ")
return self.end_date
class PeriodicTargetAdmin(admin.ModelAdmin):
list_display = ('period', 'target', 'customsort',)
display = 'Indicator Periodic Target'
list_filter = ('period',)
class CollectedDataManager(models.Manager):
def get_queryset(self):
return super(CollectedDataManager, self).get_queryset().prefetch_related('site', 'disaggregation_value')\
.select_related('program', 'indicator', 'agreement', 'complete', 'evidence', 'activity_table')
class CollectedData(models.Model):
data_key = models.UUIDField(
default=uuid.uuid4, unique=True, help_text=" "),
periodic_target = models.ForeignKey(
PeriodicTarget, null=True, blank=True, help_text=" ", on_delete=models.SET_NULL)
# targeted = models.DecimalField("Targeted", max_digits=20, decimal_places=2, default=Decimal('0.00'))
achieved = models.DecimalField(
"Achieved", max_digits=20, decimal_places=2, help_text=" ")
disaggregation_value = models.ManyToManyField(
DisaggregationValue, blank=True, help_text=" ")
description = models.TextField(
"Remarks/comments", blank=True, null=True, help_text=" ")
indicator = models.ForeignKey(
Indicator, help_text=" ", null=True, on_delete=models.SET_NULL)
agreement = models.ForeignKey(ProjectAgreement, blank=True, null=True, related_name="q_agreement2",
verbose_name="Project Initiation", help_text=" ", on_delete=models.SET_NULL)
complete = models.ForeignKey(ProjectComplete, blank=True, null=True,
related_name="q_complete2", on_delete=models.SET_NULL, help_text=" ")
program = models.ForeignKey(Program, blank=True, null=True,
related_name="i_program", help_text=" ", on_delete=models.SET_NULL)
date_collected = models.DateTimeField(null=True, blank=True, help_text=" ")
comment = models.TextField(
"Comment/Explanation", max_length=255, blank=True, null=True, help_text=" ")
evidence = models.ForeignKey(Documentation, null=True, blank=True,
verbose_name="Evidence Document or Link", help_text=" ", on_delete=models.SET_NULL)
approved_by = models.ForeignKey(ActivityUser, blank=True, null=True, verbose_name="Originated By",
related_name="approving_data", help_text=" ", on_delete=models.SET_NULL)
activity_table = models.ForeignKey(
ActivityTable, blank=True, null=True, help_text=" ", on_delete=models.SET_NULL)
update_count_activity_table = models.BooleanField(
"Would you like to update the achieved total with the row count from activitytables?", default=False, help_text=" ")
create_date = models.DateTimeField(null=True, blank=True, help_text=" ")
edit_date = models.DateTimeField(null=True, blank=True, help_text=" ")
site = models.ManyToManyField(SiteProfile, blank=True, help_text=" ")
history = HistoricalRecords()
objects = CollectedDataManager()
class Meta:
ordering = ('agreement', 'indicator', 'date_collected', 'create_date')
verbose_name_plural = "Indicator Output/Outcome Collected Data"
# onsave add create date or update edit date
def save(self, *args, **kwargs):
if self.create_date is None:
self.create_date = datetime.now()
self.edit_date = datetime.utcnow()
super(CollectedData, self).save()
# displayed in admin templates
def __str__(self):
return self.description
def achieved_sum(self):
achieved = CollectedData.targeted.filter(
indicator__id=self).sum('achieved')
return achieved
@property
def date_collected_formatted(self):
if self.date_collected:
return self.date_collected.strftime('%b %d, %Y').replace(" 0", " ")
return self.date_collected
@property
def disaggregations(self):
return ', '.join([y.disaggregation_label.label + ': ' + y.value for y in self.disaggregation_value.all()])
class CollectedDataAdmin(admin.ModelAdmin):
list_display = ('indicator', 'date_collected', 'create_date', 'edit_date')
list_filter = ['indicator__program__country__country']
display = 'Indicator Output/Outcome Collected Data'
| 40.610018
| 124
| 0.687238
|
abf92b8969204a5aa3d4a13bfb23b5c6157cf7ae
| 6,465
|
py
|
Python
|
docs/tutorials/project_allocation/data.py
|
samf1986/matching
|
c1ed91127ef73e22702c66d88a4c53464625b7a8
|
[
"MIT"
] | 94
|
2018-09-11T17:46:41.000Z
|
2022-03-23T09:35:22.000Z
|
docs/tutorials/project_allocation/data.py
|
samf1986/matching
|
c1ed91127ef73e22702c66d88a4c53464625b7a8
|
[
"MIT"
] | 54
|
2018-08-31T21:05:22.000Z
|
2021-09-26T10:26:13.000Z
|
docs/tutorials/project_allocation/data.py
|
samf1986/matching
|
c1ed91127ef73e22702c66d88a4c53464625b7a8
|
[
"MIT"
] | 36
|
2018-11-09T22:49:31.000Z
|
2022-01-31T10:09:27.000Z
|
""" A script to generate the dummy datasets used in `main.ipynb`. """
import string
import sys
import numpy as np
import pandas as pd
MAX_STUDENTS = 100
MAX_STUDENT_CHOICES = 25
MAX_SUPERVISOR_PROJECTS = 4
MAX_CAPACITY = 8
SEED = 2019
if len(sys.argv) == 6:
MAX_STUDENTS = int(sys.argv[1])
MAX_STUDENT_CHOICES = int(sys.argv[2])
MAX_SUPERVISOR_PROJECTS = int(sys.argv[3])
MAX_CAPACITY = int(sys.argv[4])
SEED = int(sys.argv[5])
student_names = [f"19{i:04d}" for i in range(MAX_STUDENTS)]
supervisor_names = list(string.ascii_uppercase)
def create_supervisor_to_projects_map():
"""Create a dictionary mapping supervisor names to their projects.
To do this, first sample the number of projects that each supervisor will
have from the discretised triangular distribution with mode
``.75 * MAX_SUPERVISOR_PROJECTS``."""
mode = MAX_SUPERVISOR_PROJECTS * 0.75
supervisor_project_numbers = (
np.random.triangular(
left=1,
mode=mode,
right=MAX_SUPERVISOR_PROJECTS,
size=len(supervisor_names),
)
.round()
.astype(int)
)
supervisor_to_projects = {}
for name, number_of_projects in zip(
supervisor_names, supervisor_project_numbers
):
supervisor_to_projects[name] = [
name + str(i) for i in range(number_of_projects)
]
return supervisor_to_projects
def create_player_to_capacity_maps(supervisor_to_projects):
"""Create dictionaries mapping supervisor names and project codes to their
respective capacities."""
supervisor_to_capacity, project_to_capacity = {}, {}
for supervisor, projects in supervisor_to_projects.items():
supervisor_capacity = np.random.randint(1, MAX_CAPACITY + 1)
supervisor_to_capacity[supervisor] = supervisor_capacity
for project in projects:
project_to_capacity[project] = np.random.randint(
1, supervisor_capacity + 2
)
return supervisor_to_capacity, project_to_capacity
def get_all_projects(supervisor_to_projects):
"""Get all of the project codes available using the supervisor to projects
map."""
return (
project
for supervisor_projects in supervisor_to_projects.values()
for project in supervisor_projects
)
def create_student_to_choices_map(projects):
"""Create a dictionary mapping student names to their choices of the
available projects. To do so, first sample the number of choices each
student makes from the discretised right-triangular distribution with
a maximum of ``MAX_STUDENT_CHOICES``."""
students_number_of_choices = (
np.random.triangular(
left=0,
mode=MAX_STUDENT_CHOICES,
right=MAX_STUDENT_CHOICES,
size=len(student_names),
)
.round()
.astype(int)
)
student_to_choices = {}
for name, number_of_choices in zip(
student_names, students_number_of_choices
):
student_choices = np.random.choice(projects, number_of_choices).tolist()
student_to_choices[name] = student_choices
return student_to_choices
def create_student_dataframe(student_to_choices):
"""Create a dataframe detailing the students' choices and assign them each
a rank."""
choice_columns = list(range(MAX_STUDENT_CHOICES))
df_students = pd.DataFrame(columns=["name"] + choice_columns)
df_students["name"] = student_to_choices.keys()
for i, student_choices in enumerate(student_to_choices.values()):
df_students.iloc[i, 1 : len(student_choices) + 1] = student_choices
student_ranks = list(df_students.index)
np.random.shuffle(student_ranks)
df_students["rank"] = student_ranks
df_students = df_students[["name", "rank"] + choice_columns]
idxs = df_students[df_students["rank"] > 50].sample(3).index
df_students.iloc[idxs, 2:] = np.nan
return df_students
def create_supervisor_dataframe(supervisor_to_capacity):
""" Create a dataframe detailing the supervisors' capacities. """
df_supervisors = pd.DataFrame.from_dict(
supervisor_to_capacity, orient="index", columns=["capacity"]
)
df_supervisors = df_supervisors.reset_index()
df_supervisors.columns = ["name", "capacity"]
return df_supervisors
def create_project_dataframe(project_to_capacity, supervisor_to_projects):
"""Create a dataframe detailing the projects' capacities and supervisor."""
df_project_capacities = pd.DataFrame.from_dict(
project_to_capacity, orient="index", columns=["capacity"]
)
project_to_supervisor = {
p: s for s, projects in supervisor_to_projects.items() for p in projects
}
df_project_supervisors = pd.DataFrame.from_dict(
project_to_supervisor, orient="index", columns=["supervisor"]
)
df_projects = pd.concat(
(df_project_capacities, df_project_supervisors), axis=1, sort=True
).reset_index()
df_projects.columns = ["code", "capacity", "supervisor"]
return df_projects
def save_dataframes(student_dataframe, supervisor_dataframe, project_dataframe):
""" Save the player dataframes. """
for df, name in zip(
(student_dataframe, supervisor_dataframe, project_dataframe),
("students", "supervisors", "projects"),
):
df.to_csv(f"{name}.csv", index=False)
def main():
"""Create the required maps to form the player dataframes, and then save
them."""
np.random.seed(SEED)
print("Seed set:", SEED)
supervisor_to_projects = create_supervisor_to_projects_map()
(
supervisor_to_capacity,
project_to_capacity,
) = create_player_to_capacity_maps(supervisor_to_projects)
print("Supervisor and project dictionaries created...")
all_projects = list(get_all_projects(supervisor_to_projects))
redacted_projects = [p for p in all_projects if p != "L1"]
student_to_choices = create_student_to_choices_map(redacted_projects)
print("Student choices assigned...")
df_students = create_student_dataframe(student_to_choices)
df_supervisors = create_supervisor_dataframe(supervisor_to_capacity)
df_projects = create_project_dataframe(
project_to_capacity, supervisor_to_projects
)
save_dataframes(df_students, df_supervisors, df_projects)
print("Dataframes saved.")
if __name__ == "__main__":
main()
| 30.21028
| 80
| 0.702243
|
e06a8a1d82bd6d1168e6d681ccd80483453dce69
| 2,946
|
py
|
Python
|
Grid/GridProcessing.py
|
kensukenk/optimized_dp
|
4771787366ca04139c168c8988dad378ad404ab6
|
[
"MIT"
] | 41
|
2020-06-23T01:58:03.000Z
|
2022-03-28T01:45:12.000Z
|
Grid/GridProcessing.py
|
kensukenk/optimized_dp
|
4771787366ca04139c168c8988dad378ad404ab6
|
[
"MIT"
] | 1
|
2021-08-01T06:58:57.000Z
|
2021-08-01T06:58:57.000Z
|
Grid/GridProcessing.py
|
kensukenk/optimized_dp
|
4771787366ca04139c168c8988dad378ad404ab6
|
[
"MIT"
] | 20
|
2020-06-05T20:52:02.000Z
|
2022-03-01T03:17:39.000Z
|
import numpy as np
import math
class Grid:
def __init__(self, minBounds, maxBounds, dims, pts_each_dim, periodicDims=[]):
"""
Args:
minBounds (list): The lower bounds of each dimension in the grid
maxBounds (list): The upper bounds of each dimension in the grid
dims (int): The dimension of grid
pts_each_dim (list): The number of points for each dimension in the grid
periodicDim (list, optional): A list of periodic dimentions (0-indexed). Defaults to [].
"""
self.max = maxBounds
self.min = minBounds
self.dims = len(pts_each_dim)
self.pts_each_dim = pts_each_dim
self.pDim = periodicDims
# Exclude the upper bounds for periodic dimensions is not included
# e.g. [-pi, pi)
for dim in self.pDim:
self.max[dim] = self.min[dim] + \
(self.max[dim] - self.min[dim]) * \
(1 - 1/self.pts_each_dim[dim])
self.dx = (self.max - self.min) / (self.pts_each_dim - 1.0)
"""
Below is re-shaping the self.vs so that we can make use of broadcasting
self.vs[i] is reshape into (1,1, ... , pts_each_dim[i], ..., 1) such that pts_each_dim[i] is used in ith position
"""
self.vs = []
self.grid_points = []
for i in range(dims):
tmp = np.linspace(self.min[i], self.max[i],
num=self.pts_each_dim[i])
broadcast_map = np.ones(self.dims, dtype=int)
broadcast_map[i] = self.pts_each_dim[i]
self.grid_points.append(tmp)
# in order to add our range of points to our grid
# we need to modify the shape of tmp in order to match
# the size of the grid for one of the axis
tmp = np.reshape(tmp, tuple(broadcast_map))
self.vs.append(tmp)
def get_index(self, state):
""" Returns a tuple of the closest index of each state in the grid
Args:
state (tuple): state of dynamic object
"""
index = []
for i, s in enumerate(state):
idx = np.searchsorted(self.grid_points[i], s)
if idx > 0 and (
idx == len(self.grid_points[i])
or math.fabs(s - self.grid_points[i][idx - 1])
< math.fabs(s - self.grid_points[i][idx])
):
index.append(idx - 1)
else:
index.append(idx)
return tuple(index)
def get_value(self, V, state):
"""Obtain the approximate value of a state
Assumes that the state is within the bounds of the grid
Args:
V (np.array): value function of solved HJ PDE
state (tuple): state of dynamic object
Returns:
[float]: V(state)
"""
index = self.get_index(state)
return V[index]
| 35.071429
| 121
| 0.549559
|
6a01019bb0185d0d7524389fa49677afc7b7e5ff
| 516
|
py
|
Python
|
data/train/python/6a01019bb0185d0d7524389fa49677afc7b7e5ffserver.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/6a01019bb0185d0d7524389fa49677afc7b7e5ffserver.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/6a01019bb0185d0d7524389fa49677afc7b7e5ffserver.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from flask import Flask
from flask.ext.restful import Api
from api import IndexApi, RoadsApi, CitiesApi, CitiesIdApi, CountriesApi
app = Flask(__name__)
app.config['DEBUG'] = True
api = Api(app)
api.add_resource(IndexApi, '/', '/index')
api.add_resource(RoadsApi, '/', '/api/roads')
api.add_resource(CitiesApi, '/', '/api/cities')
api.add_resource(CitiesIdApi, '/', '/api/cities/<id>')
api.add_resource(CountriesApi, '/', '/api/countries')
if __name__ == "__main__":
print "App is running!"
app.run()
| 23.454545
| 72
| 0.699612
|
4a8fbf09b2b638c9036ddad104d018ccd18568d8
| 17,071
|
py
|
Python
|
src/core/toga/app.py
|
simonw/toga
|
8b52479c5d9960c5f3af960b5837ecc467c0bc95
|
[
"BSD-3-Clause"
] | 3
|
2020-12-09T02:13:55.000Z
|
2021-02-18T00:41:36.000Z
|
src/core/toga/app.py
|
simonw/toga
|
8b52479c5d9960c5f3af960b5837ecc467c0bc95
|
[
"BSD-3-Clause"
] | 1
|
2021-05-23T04:04:58.000Z
|
2021-05-25T22:08:14.000Z
|
src/core/toga/app.py
|
simonw/toga
|
8b52479c5d9960c5f3af960b5837ecc467c0bc95
|
[
"BSD-3-Clause"
] | null | null | null |
import signal
import sys
import warnings
import webbrowser
from builtins import id as identifier
from email.message import Message
from toga.command import CommandSet
from toga.handlers import wrapped_handler
from toga.icons import Icon
from toga.platform import get_platform_factory
from toga.window import Window
try:
from importlib import metadata as importlib_metadata
except ImportError:
# Backwards compatibility - imporlib.metadata was added in Python 3.8
import importlib_metadata
# Make sure deprecation warnings are shown by default
warnings.filterwarnings("default", category=DeprecationWarning)
class MainWindow(Window):
_WINDOW_CLASS = 'MainWindow'
def __init__(self, id=None, title=None, position=(100, 100), size=(640, 480),
toolbar=None, resizeable=True, minimizable=True,
factory=None):
super().__init__(
id=id, title=title, position=position, size=size, toolbar=toolbar,
resizeable=resizeable, closeable=True, minimizable=minimizable,
factory=factory
)
class App:
"""
The App is the top level of any GUI program. It is the manager of all the
other bits of the GUI app: the main window and events that window generates
like user input.
When you create an App you need to provide it a name, an id for uniqueness
(by convention, the identifier is a reversed domain name.) and an
optional startup function which should run once the App has initialised.
The startup function typically constructs some initial user interface.
If the name and app_id are *not* provided, the application will attempt
to find application metadata. This process will determine the module in
which the App class is defined, and look for a ``.dist-info`` file
matching that name.
Once the app is created you should invoke the main_loop() method, which
will hand over execution of your program to Toga to make the App interface
do its thing.
The absolute minimum App would be::
>>> app = toga.App(formal_name='Empty App', app_id='org.beeware.empty')
>>> app.main_loop()
:param formal_name: The formal name of the application. Will be derived from
packaging metadata if not provided.
:param app_id: The unique application identifier. This will usually be a
reversed domain name, e.g. 'org.beeware.myapp'. Will be derived from
packaging metadata if not provided.
:param app_name: The name of the Python module containing the app.
Will be derived from the module defining the instance of the App class
if not provided.
:param id: The DOM identifier for the app (optional)
:param icon: Identifier for the application's icon.
:param author: The person or organization to be credited as the author
of the application. Will be derived from application metadata if not
provided.
:param version: The version number of the app. Will be derived from
packaging metadata if not provided.
:param home_page: A URL for a home page for the app. Used in autogenerated
help menu items. Will be derived from packaging metadata if not
provided.
:param description: A brief (one line) description of the app. Will be
derived from packaging metadata if not provided.
:param startup: The callback method before starting the app, typically to
add the components. Must be a ``callable`` that expects a single
argument of :class:`toga.App`.
:param factory: A python module that is capable to return a implementation
of this class with the same name. (optional & normally not needed)
"""
app = None
def __init__(
self,
formal_name=None,
app_id=None,
app_name=None,
id=None,
icon=None,
author=None,
version=None,
home_page=None,
description=None,
startup=None,
on_exit=None,
factory=None,
):
# Keep an accessible copy of the app instance
App.app = self
# We need a module name to load app metadata. If an app_name has been
# provided, we can set the app name now, and derive the module name
# from there.
if app_name:
self._app_name = app_name
else:
# If the code is contained in appname.py, and you start the app
# using `python -m appname`, the main module package will report
# as ''. Set the initial app name as None.
# If the code is contained in appname.py, and you start the app
# using `python appname.py`, the main module will report as None.
# If the code is contained in a folder, and you start the app
# using `python -m appname`, the main module will report as the
# name of the folder.
main_module_pkg = sys.modules['__main__'].__package__
if main_module_pkg == '':
self._app_name = None
else:
self._app_name = main_module_pkg
# During tests, and when running from a prompt, there won't be
# a __main__ module.
# Try deconstructing the app name from the app ID
if self._app_name is None and app_id:
self._app_name = app_id.split('.')[-1]
# Load the app metdata (if it is available)
# Apps packaged with Briefcase will have this metadata.
try:
self.metadata = importlib_metadata.metadata(self.module_name)
except importlib_metadata.PackageNotFoundError:
self.metadata = Message()
# Now that we have metadata, we can fix the app name (in the case
# where the app name and the module name differ - e.g., an app name
# of ``hello-world`` will have a module name of ``hello_world``).
# We use the PEP566-compliant key ``Name```, rather than the internally
# consistent key ``App-Name```.
if self.metadata['Name'] is not None:
self._app_name = self.metadata['Name']
# Whatever app name has been given, speculatively attempt to import
# the app module. Single-file apps won't have an app folder; apps with
# misleading or misconfigured app names haven't given us enough
# metadata to determine the app folder. In those cases, fall back to
# an app name that *will* exist (``toga```)
try:
sys.modules[self.module_name]
except KeyError:
# Well that didn't work...
self._app_name = 'toga'
# If a name has been provided, use it; otherwise, look to
# the module metadata. However, a name *must* be provided.
if formal_name:
self._formal_name = formal_name
else:
self._formal_name = self.metadata['Formal-Name']
if self._formal_name is None:
raise RuntimeError('Toga application must have a formal name')
# If an app_id has been provided, use it; otherwise, look to
# the module metadata. However, an app_id *must* be provied
if app_id:
self._app_id = app_id
else:
self._app_id = self.metadata.get('App-ID', None)
if self._app_id is None:
raise RuntimeError('Toga application must have an App ID')
# If an author has been provided, use it; otherwise, look to
# the module metadata.
if author:
self._author = author
else:
self._author = self.metadata.get('Author', None)
# If a version has been provided, use it; otherwise, look to
# the module metadata.
if version:
self._version = version
else:
self._version = self.metadata.get('Version', None)
# If a home_page has been provided, use it; otherwise, look to
# the module metadata.
if home_page:
self._home_page = home_page
else:
self._home_page = self.metadata.get('Home-page', None)
# If a description has been provided, use it; otherwise, look to
# the module metadata.
if description:
self._description = description
else:
self._description = self.metadata.get('Summary', None)
# Set the application DOM ID; create an ID if one hasn't been provided.
self._id = id if id else identifier(self)
# Get a platform factory, and a paths instance from the factory.
self.factory = get_platform_factory(factory)
self.paths = self.factory.paths
# If an icon (or icon name) has been explicitly provided, use it;
# otherwise, the icon will be based on the app name.
if icon:
self.icon = icon
else:
self.icon = 'resources/{app_name}'.format(app_name=self.app_name)
self.commands = CommandSet(factory=self.factory)
self._startup_method = startup
self._main_window = None
self._on_exit = None
self._full_screen_windows = None
self._impl = self._create_impl()
self.on_exit = on_exit
def _create_impl(self):
return self.factory.App(interface=self)
@property
def name(self):
"""
The formal name of the app.
:returns: The formal name of the app, as a ``str``.
"""
return self._formal_name
@property
def formal_name(self):
"""
The formal name of the app.
:returns: The formal name of the app, as a ``str``.
"""
return self._formal_name
@property
def app_name(self):
"""
The machine-readable, PEP508-compliant name of the app.
:returns: The machine-readable app name, as a ``str``.
"""
return self._app_name
@property
def module_name(self):
"""
The module name for the app
:returns: The module name for the app, as a ``str``.
"""
try:
return self._app_name.replace('-', '_')
except AttributeError:
# If the app was created from an interactive prompt,
# there won't be a module name.
return None
@property
def app_id(self):
"""
The identifier for the app.
This is a reversed domain name, often used for targetting resources,
etc.
:returns: The identifier as a ``str``.
"""
return self._app_id
@property
def author(self):
"""
The author of the app. This may be an organization name
:returns: The author of the app, as a ``str``.
"""
return self._author
@property
def version(self):
"""
The version number of the app.
:returns: The version numberof the app, as a ``str``.
"""
return self._version
@property
def home_page(self):
"""
The URL of a web page for the app.
:returns: The URL of the app's home page, as a ``str``.
"""
return self._home_page
@property
def description(self):
"""
A brief description of the app.
:returns: A brief description of the app, as a ``str``.
"""
return self._description
@property
def id(self):
"""
The DOM identifier for the app.
This id can be used to target CSS directives.
:returns: A DOM identifier for the app.
"""
return self._id
@property
def icon(self):
"""
The Icon for the app.
:returns: A ``toga.Icon`` instance for the app's icon.
"""
return self._icon
@icon.setter
def icon(self, icon_or_name):
if isinstance(icon_or_name, Icon):
self._icon = icon_or_name
else:
self._icon = Icon(icon_or_name)
@property
def main_window(self):
"""
The main windows for the app.
:returns: The main Window of the app.
"""
return self._main_window
@main_window.setter
def main_window(self, window):
self._main_window = window
window.app = self
self._impl.set_main_window(window)
@property
def current_window(self):
"""Return the currently active content window"""
return self._impl.current_window().interface
@property
def is_full_screen(self):
"""Is the app currently in full screen mode?"""
return self._full_screen_windows is not None
def set_full_screen(self, *windows):
"""Make one or more windows full screen.
Full screen is not the same as "maximized"; full screen mode
is when all window borders and other chrome is no longer
visible.
Args:
windows: The list of windows to go full screen,
in order of allocation to screens. If the number of
windows exceeds the number of available displays,
those windows will not be visible. If no windows
are specified, the app will exit full screen mode.
"""
if not windows:
self.exit_full_screen()
else:
self._impl.enter_full_screen(windows)
self._full_screen_windows = windows
def exit_full_screen(self):
"""Exit full screen mode."""
if self.is_full_screen:
self._impl.exit_full_screen(self._full_screen_windows)
self._full_screen_windows = None
def show_cursor(self):
"""Show cursor."""
self._impl.show_cursor()
def hide_cursor(self):
"""Hide cursor from view."""
self._impl.hide_cursor()
def startup(self):
"""Create and show the main window for the application
"""
self.main_window = MainWindow(title=self.formal_name, factory=self.factory)
if self._startup_method:
self.main_window.content = self._startup_method(self)
self.main_window.show()
def about(self):
"""Display the About dialog for the app.
Default implementation shows a platform-appropriate about dialog
using app metadata. Override if you want to display a custom About
dialog.
"""
self._impl.show_about_dialog()
def visit_homepage(self):
"""Open the application's homepage in the default browser.
If the application metadata doesn't define a homepage, this is a no-op.
"""
if self.home_page is not None:
webbrowser.open(self.home_page)
def main_loop(self):
""" Invoke the application to handle user input.
This method typically only returns once the application is exiting.
"""
# Modify signal handlers to make sure Ctrl-C is caught and handled.
signal.signal(signal.SIGINT, signal.SIG_DFL)
self._impl.main_loop()
def exit(self):
""" Quit the application gracefully.
"""
self._impl.exit()
@property
def on_exit(self):
"""The handler to invoke before the application exits.
Returns:
The function ``callable`` that is called on application exit.
"""
return self._on_exit
@on_exit.setter
def on_exit(self, handler):
"""Set the handler to invoke before the app exits.
Args:
handler (:obj:`callable`): The handler to invoke before the app exits.
"""
self._on_exit = wrapped_handler(self, handler)
self._impl.set_on_exit(self._on_exit)
def add_background_task(self, handler):
self._impl.add_background_task(handler)
class DocumentApp(App):
"""
A document-based application.
Definition and arguments are the same as a base App, plus the following:
Args:
document_types (:obj:`list` of :obj:`str`): Document types.
"""
def __init__(
self,
formal_name=None,
app_id=None,
app_name=None,
id=None,
icon=None,
author=None,
version=None,
home_page=None,
description=None,
startup=None,
document_types=None,
on_exit=None,
factory=None,
):
self.document_types = document_types
self._documents = []
super().__init__(
formal_name=formal_name,
app_id=app_id,
app_name=app_name,
id=id,
icon=icon,
author=author,
version=version,
home_page=home_page,
description=description,
startup=startup,
on_exit=on_exit,
factory=factory,
)
def _create_impl(self):
return self.factory.DocumentApp(interface=self)
@property
def documents(self):
"""
Return the list of documents associated with this app.
Returns:
A ``list`` of ``str``.
"""
return self._documents
| 31.848881
| 83
| 0.613731
|
689660c370a8344787ca88afe9cd9f98b7020317
| 6,706
|
py
|
Python
|
src/antispoof_processor.py
|
MiVaVo/Antispoof-3d
|
03ee614f4daf85069ce22c80cb6ed4642bdf762e
|
[
"CC0-1.0"
] | 1
|
2020-06-01T13:24:06.000Z
|
2020-06-01T13:24:06.000Z
|
src/antispoof_processor.py
|
MiVaVo/Antispoof-3d
|
03ee614f4daf85069ce22c80cb6ed4642bdf762e
|
[
"CC0-1.0"
] | null | null | null |
src/antispoof_processor.py
|
MiVaVo/Antispoof-3d
|
03ee614f4daf85069ce22c80cb6ed4642bdf762e
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import absolute_import
import os
import re
from datetime import datetime as dt
from pickle import UnpicklingError
import numpy as np
from src import icp
from src.icp import draw_registration_result
from src.process_data import Landmarks3DFinder
from src.process_data import LandmarksFinderDlib
from src.streaming import RSStreaming
from src.utils import save_ds, absoluteFilePaths, get_image_depth, classify, timing
class ProcessAntiSpoof():
def __init__(self, mode, source_coords, path_to_folder=None,temporal_smoothing=1):
self.path_to_folder = path_to_folder
self.dlib_lands = LandmarksFinderDlib()
self.source_coords = source_coords
self.mode = mode
self.temporal_smoothing=temporal_smoothing
if self.mode in ['data_processing_to_features', 'prediction_from_folder', 'prediction_from_image_and_depth']:
self.landmarks_3d = Landmarks3DFinder(rs_streaming=None)
if path_to_folder is None and self.mode != 'prediction_from_image_and_depth':
raise ValueError("path_to_folder")
else:
self.path_gen = absoluteFilePaths(self.path_to_folder)
if self.mode in ['data_processing_to_features']:
self.features = []
elif self.mode in ['prediction_from_camera', 'visualize', 'data_collection_from_camera']:
self.rs_streaming = RSStreaming(temporal_smoothing=temporal_smoothing)
self.landmarks_3d = Landmarks3DFinder(rs_streaming=self.rs_streaming)
else:
raise NotImplementedError(f"Mode {self.mode} not implemented")
@timing
def get_frameset(self):
if self.mode in ['data_processing_to_features', 'prediction_from_folder']:
try:
image, depth_frame = get_image_depth(next(self.path_gen))
except StopIteration:
return None, None
elif self.mode in ['prediction_from_camera', 'data_collection_from_camera', 'visualize']:
depth_frame, image_frame = self.rs_streaming.get_filtered_frameset()
image = np.asanyarray(image_frame.get_data())
depth_frame = np.asanyarray(depth_frame.get_data())
elif self.mode in ['prediction_from_image_and_depth']:
raise ImportError(f"Mode {self.mode} does not require gettinf frameset as it already should exist")
return image, depth_frame
@timing
def get_dets_shapes(self, image):
dets, shapes = self.dlib_lands.get_landmarks(image)
return dets, shapes
@timing
def get_distances_from_icp(self, shapes, depth_frame, source_raw, draw_icp=False, landmarks_fildering_type='all'):
# print(len(dets))
target_raw = self.landmarks_3d.get_3d_landmarks(depth_frame, shapes)
source_raw, target_raw = self.landmarks_3d.normalize_landmars(source_raw, target_raw)
# print(target_3d_coords)
source, current = Landmarks3DFinder.filter_landmarks(target_raw, source_raw, type=landmarks_fildering_type)
# draw_registration_result(current, source, None)
T, distances, iterations = icp.icp(np.asanyarray(current).T, np.asanyarray(source).T, max_iterations=100,
tolerance=0.00001)
if draw_icp:
draw_registration_result(current, source, T)
return T, distances, iterations
def do_the_work(self, draw_icp=False, **kwargs):
if self.mode in ['prediction_from_image_and_depth']:
image = kwargs['image']
depth = kwargs['depth']
dets, shapes = self.get_dets_shapes(image)
if not shapes:
print("No face found")
return 0, None
T, distances, iterations = self.get_distances_from_icp(shapes, depth, self.source_coords, draw_icp=draw_icp)
classifier = kwargs['classifier']
prob_of_fake = classify(classifier, distances)
return 0, prob_of_fake
if self.mode in ['data_collection_from_camera']:
image, depth = self.get_frameset()
RSStreaming.visualize_img(image)
RSStreaming.visualize_depth(depth)
data_collection_type = kwargs['ds_type']
prob_of_save = kwargs['prob_of_save'] if "prob_of_save" in kwargs.keys() else 0.5
if data_collection_type == 'train':
path_to_save_data = '/old/ds/fake_train'
elif data_collection_type == 'test':
path_to_save_data = '/old/ds/fake_test'
else:
raise NotImplementedError
save_ds(path_to_save_data, image, depth) if np.random.rand() > prob_of_save else None
return 0, None
elif self.mode in ['visualize']:
image, depth = self.get_frameset()
RSStreaming.visualize_img(image)
RSStreaming.visualize_depth(depth)
elif self.mode in ['prediction_from_camera', 'prediction_from_folder']:
image, depth = self.get_frameset()
if image is None:
return 1, None
RSStreaming.visualize_img(image)
RSStreaming.visualize_depth(depth)
dets, shapes = self.get_dets_shapes(image)
if not shapes:
# print("No face found")
return 0, None,dets, shapes,image
T, distances, iterations = self.get_distances_from_icp(shapes, depth, self.source_coords, draw_icp=draw_icp)
classifier = kwargs['classifier']
prob_of_fake = classify(classifier, distances)
return 0, prob_of_fake,dets, shapes,image
elif self.mode in ['data_processing_to_features']:
try:
image, depth = self.get_frameset()
except EOFError:
return 0, None
except UnpicklingError:
return 0, None
csv_ds_folder = "/home/maksym/Documents/proj_3d/ds/csv_ds"
if image is None:
np.savetxt(
f'{os.path.join(csv_ds_folder, self.path_to_folder.split("/")[-1])}_{re.sub("[^0-9]", "", str(dt.now()))}.csv',
np.asanyarray(self.features), delimiter=';')
return 1, None
RSStreaming.visualize_img(image)
RSStreaming.visualize_depth(depth)
dets, shapes = self.get_dets_shapes(image)
if not shapes:
print("No face found")
return 0, None
T, distances, iterations = self.get_distances_from_icp(shapes, depth, self.source_coords, draw_icp=draw_icp)
self.features.append(distances)
return 0, None
| 42.987179
| 131
| 0.642708
|
1468fcd35ed13914e4a7818e7928786fe8ee06d6
| 9,450
|
py
|
Python
|
readthedocs/doc_builder/backends/mkdocs.py
|
ktdreyer/readthedocs.org
|
d5e0631e4c1f5da061bdf851a1efe48394fd6b5c
|
[
"MIT"
] | null | null | null |
readthedocs/doc_builder/backends/mkdocs.py
|
ktdreyer/readthedocs.org
|
d5e0631e4c1f5da061bdf851a1efe48394fd6b5c
|
[
"MIT"
] | 4
|
2021-02-08T21:06:18.000Z
|
2021-06-10T23:24:55.000Z
|
readthedocs/doc_builder/backends/mkdocs.py
|
ktdreyer/readthedocs.org
|
d5e0631e4c1f5da061bdf851a1efe48394fd6b5c
|
[
"MIT"
] | 1
|
2018-07-02T19:17:55.000Z
|
2018-07-02T19:17:55.000Z
|
"""
MkDocs_ backend for building docs.
.. _MkDocs: http://www.mkdocs.org/
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import yaml
from django.conf import settings
from django.template import loader as template_loader
from readthedocs.doc_builder.base import BaseBuilder
from readthedocs.doc_builder.exceptions import BuildEnvironmentError
log = logging.getLogger(__name__)
def get_absolute_media_url():
"""
Get the fully qualified media URL from settings.
Mkdocs needs a full domain because it tries to link to local media files.
"""
media_url = settings.MEDIA_URL
if not media_url.startswith('http'):
domain = getattr(settings, 'PRODUCTION_DOMAIN')
media_url = 'http://{}{}'.format(domain, media_url)
return media_url
class BaseMkdocs(BaseBuilder):
"""Mkdocs builder."""
use_theme = True
# The default theme for mkdocs (outside of RTD) is the 'mkdocs' theme
# For RTD, our default is the 'readthedocs' theme
READTHEDOCS_THEME_NAME = 'readthedocs'
# Overrides for the 'readthedocs' theme that include
# search utilities and version selector
READTHEDOCS_TEMPLATE_OVERRIDE_DIR = (
'%s/readthedocs/templates/mkdocs/readthedocs' % settings.SITE_ROOT
)
def __init__(self, *args, **kwargs):
super(BaseMkdocs, self).__init__(*args, **kwargs)
self.old_artifact_path = os.path.join(
self.version.project.checkout_path(self.version.slug),
self.build_dir)
self.root_path = self.version.project.checkout_path(self.version.slug)
self.yaml_file = self.get_yaml_config()
def get_yaml_config(self):
"""Find the ``mkdocs.yml`` file in the project root."""
# TODO: try to load from the configuration file first.
test_path = os.path.join(
self.project.checkout_path(self.version.slug),
'mkdocs.yml'
)
if os.path.exists(test_path):
return test_path
return None
def load_yaml_config(self):
"""
Load a YAML config.
Raise BuildEnvironmentError if failed due to syntax errors.
"""
try:
return yaml.safe_load(
open(self.yaml_file, 'r')
)
except IOError:
return {
'site_name': self.version.project.name,
}
except yaml.YAMLError as exc:
note = ''
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
note = ' (line %d, column %d)' % (mark.line + 1, mark.column + 1)
raise BuildEnvironmentError(
'Your mkdocs.yml could not be loaded, '
'possibly due to a syntax error{note}'.format(note=note)
)
def append_conf(self, **__):
"""Set mkdocs config values."""
if not self.yaml_file:
self.yaml_file = os.path.join(self.root_path, 'mkdocs.yml')
user_config = self.load_yaml_config()
# Handle custom docs dirs
user_docs_dir = user_config.get('docs_dir')
docs_dir = self.docs_dir(docs_dir=user_docs_dir)
self.create_index(extension='md')
user_config['docs_dir'] = docs_dir
# Set mkdocs config values
media_url = get_absolute_media_url()
user_config.setdefault('extra_javascript', []).extend([
'readthedocs-data.js',
'%sstatic/core/js/readthedocs-doc-embed.js' % media_url,
'%sjavascript/readthedocs-analytics.js' % media_url,
])
user_config.setdefault('extra_css', []).extend([
'%scss/badge_only.css' % media_url,
'%scss/readthedocs-doc-embed.css' % media_url,
])
docs_path = os.path.join(self.root_path, docs_dir)
# RTD javascript writing
rtd_data = self.generate_rtd_data(
docs_dir=docs_dir,
mkdocs_config=user_config
)
with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:
f.write(rtd_data)
# Use Read the Docs' analytics setup rather than mkdocs'
# This supports using RTD's privacy improvements around analytics
user_config['google_analytics'] = None
# If using the readthedocs theme, apply the readthedocs.org overrides
# These use a global readthedocs search
# and customize the version selector.
self.apply_theme_override(user_config)
# Write the modified mkdocs configuration
yaml.safe_dump(
user_config,
open(self.yaml_file, 'w')
)
# Write the mkdocs.yml to the build logs
self.run(
'cat',
os.path.relpath(self.yaml_file, self.root_path),
cwd=self.root_path,
)
def generate_rtd_data(self, docs_dir, mkdocs_config):
"""Generate template properties and render readthedocs-data.js."""
# Use the analytics code from mkdocs.yml
# if it isn't set already by Read the Docs,
analytics_code = self.version.project.analytics_code
if not analytics_code and mkdocs_config.get('google_analytics'):
# http://www.mkdocs.org/user-guide/configuration/#google_analytics
analytics_code = mkdocs_config['google_analytics'][0]
# Will be available in the JavaScript as READTHEDOCS_DATA.
readthedocs_data = {
'project': self.version.project.slug,
'version': self.version.slug,
'language': self.version.project.language,
'programming_language': self.version.project.programming_language,
'page': None,
'theme': self.get_theme_name(mkdocs_config),
'builder': "mkdocs",
'docroot': docs_dir,
'source_suffix': ".md",
'api_host': getattr(settings, 'PUBLIC_API_URL', 'https://readthedocs.org'),
'commit': self.version.project.vcs_repo(self.version.slug).commit,
'global_analytics_code': getattr(settings, 'GLOBAL_ANALYTICS_CODE', 'UA-17997319-1'),
'user_analytics_code': analytics_code,
}
data_json = json.dumps(readthedocs_data, indent=4)
data_ctx = {
'data_json': data_json,
'current_version': readthedocs_data['version'],
'slug': readthedocs_data['project'],
'html_theme': readthedocs_data['theme'],
'pagename': None,
}
tmpl = template_loader.get_template('doc_builder/data.js.tmpl')
return tmpl.render(data_ctx)
def build(self):
checkout_path = self.project.checkout_path(self.version.slug)
build_command = [
'python',
self.python_env.venv_bin(filename='mkdocs'),
self.builder,
'--clean',
'--site-dir', self.build_dir,
'--config-file', self.yaml_file,
]
if self.use_theme:
build_command.extend(['--theme', 'readthedocs'])
cmd_ret = self.run(
*build_command,
cwd=checkout_path,
bin_path=self.python_env.venv_bin()
)
return cmd_ret.successful
def get_theme_name(self, mkdocs_config):
"""
Get the theme configuration in the mkdocs_config
In v0.17.0, the theme configuration switched
from two separate configs (both optional) to a nested directive.
:see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164
:returns: the name of the theme RTD will use
"""
theme_setting = mkdocs_config.get('theme')
if isinstance(theme_setting, dict):
# Full nested theme config (the new configuration)
return theme_setting.get('name') or self.READTHEDOCS_THEME_NAME
if theme_setting:
# A string which is the name of the theme
return theme_setting
theme_dir = mkdocs_config.get('theme_dir')
if theme_dir:
# Use the name of the directory in this project's custom theme directory
return theme_dir.rstrip('/').split('/')[-1]
return self.READTHEDOCS_THEME_NAME
def apply_theme_override(self, mkdocs_config):
"""
Apply theme overrides for the RTD theme (modifies the ``mkdocs_config`` parameter)
In v0.17.0, the theme configuration switched
from two separate configs (both optional) to a nested directive.
How to override the theme depends on whether the new or old configuration
is used.
:see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164
"""
if self.get_theme_name(mkdocs_config) == self.READTHEDOCS_THEME_NAME:
# Overriding the theme is only necessary
# if the 'readthedocs' theme is used.
theme_setting = mkdocs_config.get('theme')
if isinstance(theme_setting, dict):
theme_setting['custom_dir'] = self.READTHEDOCS_TEMPLATE_OVERRIDE_DIR
else:
mkdocs_config['theme_dir'] = self.READTHEDOCS_TEMPLATE_OVERRIDE_DIR
class MkdocsHTML(BaseMkdocs):
type = 'mkdocs'
builder = 'build'
build_dir = '_build/html'
class MkdocsJSON(BaseMkdocs):
type = 'mkdocs_json'
builder = 'json'
build_dir = '_build/json'
use_theme = False
| 35.393258
| 97
| 0.624339
|
97decb861385f63bf14d50f2bff6f2e0bb784520
| 758
|
py
|
Python
|
test/util/bombz/mockserver/mockserver/wsgiapp.py
|
bischjer/auxiliary
|
e42d8a4af43c9bd4d816c03edc2465640635b46b
|
[
"BSD-3-Clause"
] | null | null | null |
test/util/bombz/mockserver/mockserver/wsgiapp.py
|
bischjer/auxiliary
|
e42d8a4af43c9bd4d816c03edc2465640635b46b
|
[
"BSD-3-Clause"
] | null | null | null |
test/util/bombz/mockserver/mockserver/wsgiapp.py
|
bischjer/auxiliary
|
e42d8a4af43c9bd4d816c03edc2465640635b46b
|
[
"BSD-3-Clause"
] | null | null | null |
from paste.deploy.config import ConfigMiddleware
import sampleapp
def make_app(
global_conf,
# Optional and required configuration parameters
# can go here, or just **kw; greeting is required:
greeting,
**kw):
# This is a WSGI application:
app = sampleapp.application
# Here we merge all the keys into one configuration
# dictionary; you don't have to do this, but this
# can be convenient later to add ad hoc configuration:
conf = global_conf.copy()
conf.update(kw)
conf['greeting'] = greeting
# ConfigMiddleware means that paste.deploy.CONFIG will,
# during this request (threadsafe) represent the
# configuration dictionary we set up:
app = ConfigMiddleware(app, conf)
return app
| 30.32
| 59
| 0.703166
|
561b3904465075d3c46e7d335a1ab2bd05641341
| 10,593
|
py
|
Python
|
benchmarks/bm_scimark_small.py
|
usalko/nogil
|
7a480f849f1f12a159a10bb0aa0f3431f24ce5f6
|
[
"0BSD"
] | 1
|
2021-11-19T02:20:24.000Z
|
2021-11-19T02:20:24.000Z
|
benchmarks/bm_scimark_small.py
|
usalko/nogil
|
7a480f849f1f12a159a10bb0aa0f3431f24ce5f6
|
[
"0BSD"
] | null | null | null |
benchmarks/bm_scimark_small.py
|
usalko/nogil
|
7a480f849f1f12a159a10bb0aa0f3431f24ce5f6
|
[
"0BSD"
] | null | null | null |
from array import array
import math
import time
class Array2D(object):
def __init__(self, w, h, data=None):
self.width = w
self.height = h
self.data = array('d', [0]) * (w * h)
if data is not None:
self.setup(data)
def _idx(self, x, y):
if 0 <= x < self.width and 0 <= y < self.height:
return y * self.width + x
raise IndexError
def __getitem__(self, x_y):
(x, y) = x_y
return self.data[self._idx(x, y)]
def __setitem__(self, x_y, val):
(x, y) = x_y
self.data[self._idx(x, y)] = val
def setup(self, data):
for y in range(self.height):
for x in range(self.width):
self[x, y] = data[y][x]
return self
def indexes(self):
for y in range(self.height):
for x in range(self.width):
yield x, y
def copy_data_from(self, other):
self.data[:] = other.data[:]
class Random(object):
MDIG = 32
ONE = 1
m1 = (ONE << (MDIG - 2)) + ((ONE << (MDIG - 2)) - ONE)
m2 = ONE << MDIG // 2
dm1 = 1.0 / float(m1)
def __init__(self, seed):
self.initialize(seed)
self.left = 0.0
self.right = 1.0
self.width = 1.0
self.haveRange = False
def initialize(self, seed):
self.seed = seed
seed = abs(seed)
jseed = min(seed, self.m1)
if (jseed % 2 == 0):
jseed -= 1
k0 = 9069 % self.m2
k1 = 9069 / self.m2
j0 = jseed % self.m2
j1 = jseed / self.m2
self.m = array('d', [0]) * 17
for iloop in range(17):
jseed = j0 * k0
j1 = (jseed / self.m2 + j0 * k1 + j1 * k0) % (self.m2 / 2)
j0 = jseed % self.m2
self.m[iloop] = j0 + self.m2 * j1
self.i = 4
self.j = 16
def nextDouble(self):
I, J, m = self.i, self.j, self.m
k = m[I] - m[J]
if (k < 0):
k += self.m1
self.m[J] = k
if (I == 0):
I = 16
else:
I -= 1
self.i = I
if (J == 0):
J = 16
else:
J -= 1
self.j = J
if (self.haveRange):
return self.left + self.dm1 * float(k) * self.width
else:
return self.dm1 * float(k)
def RandomMatrix(self, a):
for x, y in a.indexes():
a[x, y] = self.nextDouble()
return a
def RandomVector(self, n):
return array('d', [self.nextDouble() for i in range(n)])
def copy_vector(vec):
# Copy a vector created by Random.RandomVector()
vec2 = array('d')
vec2[:] = vec[:]
return vec2
class ArrayList(Array2D):
def __init__(self, w, h, data=None):
self.width = w
self.height = h
self.data = [array('d', [0]) * w for y in range(h)]
if data is not None:
self.setup(data)
def __getitem__(self, idx):
if isinstance(idx, tuple):
return self.data[idx[1]][idx[0]]
else:
return self.data[idx]
def __setitem__(self, idx, val):
if isinstance(idx, tuple):
self.data[idx[1]][idx[0]] = val
else:
self.data[idx] = val
def copy_data_from(self, other):
for l1, l2 in zip(self.data, other.data):
l1[:] = l2
def SOR_execute(omega, G, cycles, Array):
for p in range(cycles):
for y in range(1, G.height - 1):
for x in range(1, G.width - 1):
G[x, y] = (omega * 0.25 * (G[x, y - 1] + G[x, y + 1] + G[x - 1, y]
+ G[x + 1, y])
+ (1.0 - omega) * G[x, y])
def bench_SOR(loops, n, cycles, Array):
range_it = range(loops)
t0 = time.perf_counter()
for _ in range_it:
G = Array(n, n)
SOR_execute(1.25, G, cycles, Array)
return time.perf_counter() - t0
def SparseCompRow_matmult(M, y, val, row, col, x, num_iterations):
range_it = range(num_iterations)
t0 = time.perf_counter()
for _ in range_it:
for r in range(M):
sa = 0.0
for i in range(row[r], row[r + 1]):
sa += x[col[i]] * val[i]
y[r] = sa
return time.perf_counter() - t0
def bench_SparseMatMult(cycles, N, nz):
x = array('d', [0]) * N
y = array('d', [0]) * N
nr = nz // N
anz = nr * N
val = array('d', [0]) * anz
col = array('i', [0]) * nz
row = array('i', [0]) * (N + 1)
row[0] = 0
for r in range(N):
rowr = row[r]
step = r // nr
row[r + 1] = rowr + nr
if step < 1:
step = 1
for i in range(nr):
col[rowr + i] = i * step
return SparseCompRow_matmult(N, y, val, row, col, x, cycles)
def MonteCarlo(Num_samples):
rnd = Random(113)
under_curve = 0
for count in range(Num_samples):
x = rnd.nextDouble()
y = rnd.nextDouble()
if x * x + y * y <= 1.0:
under_curve += 1
return float(under_curve) / Num_samples * 4.0
def bench_MonteCarlo(loops, Num_samples):
range_it = range(loops)
t0 = time.perf_counter()
for _ in range_it:
MonteCarlo(Num_samples)
return time.perf_counter() - t0
def LU_factor(A, pivot):
M, N = A.height, A.width
minMN = min(M, N)
for j in range(minMN):
jp = j
t = abs(A[j][j])
for i in range(j + 1, M):
ab = abs(A[i][j])
if ab > t:
jp = i
t = ab
pivot[j] = jp
if A[jp][j] == 0:
raise Exception("factorization failed because of zero pivot")
if jp != j:
A[j], A[jp] = A[jp], A[j]
if j < M - 1:
recp = 1.0 / A[j][j]
for k in range(j + 1, M):
A[k][j] *= recp
if j < minMN - 1:
for ii in range(j + 1, M):
for jj in range(j + 1, N):
A[ii][jj] -= A[ii][j] * A[j][jj]
def LU(lu, A, pivot):
lu.copy_data_from(A)
LU_factor(lu, pivot)
def bench_LU(cycles, N):
rnd = Random(7)
A = rnd.RandomMatrix(ArrayList(N, N))
lu = ArrayList(N, N)
pivot = array('i', [0]) * N
range_it = range(cycles)
t0 = time.perf_counter()
for _ in range_it:
LU(lu, A, pivot)
return time.perf_counter() - t0
def int_log2(n):
k = 1
log = 0
while k < n:
k *= 2
log += 1
if n != 1 << log:
raise Exception("FFT: Data length is not a power of 2: %s" % n)
return log
def FFT_num_flops(N):
return (5.0 * N - 2) * int_log2(N) + 2 * (N + 1)
def FFT_transform_internal(N, data, direction):
n = N // 2
bit = 0
dual = 1
if n == 1:
return
logn = int_log2(n)
if N == 0:
return
FFT_bitreverse(N, data)
# apply fft recursion
# this loop executed int_log2(N) times
bit = 0
while bit < logn:
w_real = 1.0
w_imag = 0.0
theta = 2.0 * direction * math.pi / (2.0 * float(dual))
s = math.sin(theta)
t = math.sin(theta / 2.0)
s2 = 2.0 * t * t
for b in range(0, n, 2 * dual):
i = 2 * b
j = 2 * (b + dual)
wd_real = data[j]
wd_imag = data[j + 1]
data[j] = data[i] - wd_real
data[j + 1] = data[i + 1] - wd_imag
data[i] += wd_real
data[i + 1] += wd_imag
for a in range(1, dual):
tmp_real = w_real - s * w_imag - s2 * w_real
tmp_imag = w_imag + s * w_real - s2 * w_imag
w_real = tmp_real
w_imag = tmp_imag
for b in range(0, n, 2 * dual):
i = 2 * (b + a)
j = 2 * (b + a + dual)
z1_real = data[j]
z1_imag = data[j + 1]
wd_real = w_real * z1_real - w_imag * z1_imag
wd_imag = w_real * z1_imag + w_imag * z1_real
data[j] = data[i] - wd_real
data[j + 1] = data[i + 1] - wd_imag
data[i] += wd_real
data[i + 1] += wd_imag
bit += 1
dual *= 2
def FFT_bitreverse(N, data):
n = N // 2
nm1 = n - 1
j = 0
for i in range(nm1):
ii = i << 1
jj = j << 1
k = n >> 1
if i < j:
tmp_real = data[ii]
tmp_imag = data[ii + 1]
data[ii] = data[jj]
data[ii + 1] = data[jj + 1]
data[jj] = tmp_real
data[jj + 1] = tmp_imag
while k <= j:
j -= k
k >>= 1
j += k
def FFT_transform(N, data):
FFT_transform_internal(N, data, -1)
def FFT_inverse(N, data):
n = N / 2
norm = 0.0
FFT_transform_internal(N, data, +1)
norm = 1 / float(n)
for i in range(N):
data[i] *= norm
def bench_FFT(loops, N, cycles):
twoN = 2 * N
init_vec = Random(7).RandomVector(twoN)
range_it = range(loops)
t0 = time.perf_counter()
for _ in range_it:
x = copy_vector(init_vec)
for i in range(cycles):
FFT_transform(twoN, x)
FFT_inverse(twoN, x)
return time.perf_counter() - t0
def add_cmdline_args(cmd, args):
if args.benchmark:
cmd.append(args.benchmark)
def run_benchmarks():
loops = 1
start = time.perf_counter()
print('sor', bench_SOR(5, 100, 10, Array2D))
print('sparse_mat_mult', bench_SparseMatMult(100, 1000, 50 * 1000))
print('monte_carlo', bench_MonteCarlo(10, 100 * 1000))
print('lu', bench_LU(5, 100))
print('fft', bench_FFT(2, 1024, 50))
end = time.perf_counter()
print('total', end - start)
run_benchmarks()
# BENCHMARKS = {
# # function name => arguments
# 'sor': (bench_SOR, 100, 10, Array2D),
# 'sparse_mat_mult': (bench_SparseMatMult, 1000, 50 * 1000),
# 'monte_carlo': (bench_MonteCarlo, 100 * 1000,),
# 'lu': (bench_LU, 100,),
# 'fft': (bench_FFT, 1024, 50),
# }
# if __name__ == "__main__":
# runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
# runner.argparser.add_argument("benchmark", nargs='?',
# choices=sorted(BENCHMARKS))
# args = runner.parse_args()
# if args.benchmark:
# benchmarks = (args.benchmark,)
# else:
# benchmarks = sorted(BENCHMARKS)
# for bench in benchmarks:
# name = 'scimark_%s' % bench
# args = BENCHMARKS[bench]
# runner.bench_time_func(name, *args)
| 24.924706
| 82
| 0.484943
|
64eb50b5b67b7d75e65ec6120aaebed11223ea73
| 13,590
|
py
|
Python
|
gs_quant/risk/core.py
|
shawlu95/gs-quant
|
8ba89986b3126c3f552555fb1c565a46a508da93
|
[
"Apache-2.0"
] | 1
|
2019-12-02T03:01:04.000Z
|
2019-12-02T03:01:04.000Z
|
gs_quant/risk/core.py
|
chrisdroukas/gs-quant
|
f80302d23c0d2e1195d4751ad2db8ab06c6299fa
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/risk/core.py
|
chrisdroukas/gs-quant
|
f80302d23c0d2e1195d4751ad2db8ab06c6299fa
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from concurrent.futures import Future
from copy import copy
from typing import Iterable, List, Optional, Tuple, Union
import dateutil
import pandas as pd
from gs_quant.common import AssetClass
from gs_quant.datetime import point_sort_order
from gs_quant.markets.core import PricingContext
from gs_quant.markets.historical import HistoricalPricingContext
from gs_quant.target.risk import RiskMeasure, RiskMeasureType, RiskMeasureUnit
__column_sort_fns = {
'label1': point_sort_order,
'mkt_point': point_sort_order,
'point': point_sort_order
}
__risk_columns = ('date', 'time', 'marketDataType', 'assetId', 'pointClass', 'point')
__crif_columns = ('date', 'time', 'riskType', 'amountCurrency', 'qualifier', 'bucket', 'label1', 'label2')
def sum_formatter(result: List) -> float:
return sum(r.get('value', result[0].get('Val')) for r in result)
def __flatten_result(item: Union[List, Tuple]):
rows = []
for elem in item:
if isinstance(elem, (list, tuple)):
rows.extend(__flatten_result(elem))
else:
excluded_fields = ['calculationTime', 'queueingTime']
if not issubclass(PricingContext.current.__class__, HistoricalPricingContext):
excluded_fields.append('date')
else:
date = elem.get('date')
if date is not None:
elem['date'] = dateutil.parser.isoparse(date).date()
for field in excluded_fields:
if field in elem:
elem.pop(field)
rows.append(elem)
return rows
def scalar_formatter(result: List) -> Optional[Union[float, pd.Series]]:
if not result:
return None
result = __flatten_result(result)
if len(result) > 1 and 'date' in result[0]:
series = pd.Series(
data=[r.get('value', r.get('Val')) for r in result],
index=[r['date'] for r in result]
)
return series.sort_index()
else:
return result[0].get('value', result[0].get('Val'))
def structured_formatter(result: List) -> Optional[pd.DataFrame]:
if not result:
return None
return sort_risk(pd.DataFrame.from_records(__flatten_result(result)))
def crif_formatter(result: List) -> Optional[pd.DataFrame]:
if not result:
return None
return sort_risk(pd.DataFrame.from_records(__flatten_result(result)), __crif_columns)
def aggregate_risk(results: Iterable[Union[pd.DataFrame, Future]], threshold: Optional[float] = None) -> pd.DataFrame:
"""
Combine the results of multiple Instrument.calc() calls, into a single result
:param results: An iterable of Dataframes and/or Futures (returned by Instrument.calc())
:param threshold: exclude values whose absolute value falls below this threshold
:return: A Dataframe with the aggregated results
**Examples**
>>> with PricingContext():
>>> delta_f = [inst.calc(risk.IRDelta) for inst in instruments]
>>> vega_f = [inst.calc(risk.IRVega) for inst in instruments]
>>>
>>> delta = aggregate_risk(delta_f, threshold=0.1)
>>> vega = aggregate_risk(vega_f)
delta_f and vega_f are lists of futures, where the result will be a Dataframe
delta and vega are Dataframes, representing the merged risk of the individual instruments
"""
dfs = [r.result() if isinstance(r, Future) else r for r in results]
result = pd.concat(dfs)
result = result.groupby([c for c in result.columns if c != 'value']).sum()
result = pd.DataFrame.from_records(result.to_records())
if threshold is not None:
result = result[result.value.abs() > threshold]
return sort_risk(result)
def aggregate_results(results: Iterable[Union[dict, float, str, pd.DataFrame, pd.Series]])\
-> Union[dict, float, str, pd.DataFrame, pd.Series]:
types = set(type(r) for r in results)
if str in types:
return next(r for r in results if isinstance(r, str))
elif len(types) > 1:
raise RuntimeError('Cannot aggregate heterogeneous types: {}'.format(tuple(types)))
inst = next(iter(results))
if isinstance(inst, dict):
return dict((k, aggregate_results([r[k] for r in results])) for k in inst.keys())
elif isinstance(inst, (float, pd.Series)):
return sum(results)
elif isinstance(inst, pd.DataFrame):
return aggregate_risk(results)
def subtract_risk(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
"""Subtract bucketed risk. Dimensions must be identical
:param left: Results to substract from
:param right: Results to substract
**Examples**
>>> ir_swap = IRSwap('Pay', '10y', 'USD')
>>> delta_today = ir_swap.calc(risk.IRDelta)
>>>
>>> with PricingContext(pricing_date=business_day_offset(datetime.date.today(), -1, roll='preceding')):
>>> delta_yday_f = ir_swap.calc(risk.IRDelta)
>>>
>>> delta_diff = subtract_risk(delta_today, delta_yday_f.result())
"""
assert(left.columns.names == right.columns.names)
assert('value' in left.columns.names)
right_negated = copy(right)
right_negated.value *= -1
return aggregate_risk((left, right_negated))
def sort_risk(df: pd.DataFrame, by: Tuple[str, ...] = __risk_columns) -> pd.DataFrame:
"""
Sort bucketed risk
:param df: Input Dataframe
:param by: Columns to sort by
:return: A sorted Dataframe
"""
columns = tuple(df.columns)
indices = [columns.index(c) if c in columns else -1 for c in by]
fns = [__column_sort_fns.get(c) for c in columns]
def cmp(row) -> tuple:
return tuple(fns[i](row[i]) if fns[i] else row[i] for i in indices if i != -1)
data = sorted((tuple(r)[1:] for r in df.to_records()), key=cmp)
fields = [f for f in by if f in columns]
fields.extend(f for f in columns if f not in fields)
result = pd.DataFrame.from_records(data, columns=columns)[fields]
if 'date' in result:
result = result.set_index('date')
return result
def __risk_measure_with_doc_string(
name: str,
doc: str,
measure_type: RiskMeasureType,
asset_class: Optional[AssetClass] = None,
unit: Optional[RiskMeasureUnit] = None
) -> RiskMeasure:
measure = RiskMeasure(measure_type=measure_type, asset_class=asset_class, unit=unit, name=name)
measure.__doc__ = doc
return measure
DollarPrice = __risk_measure_with_doc_string('DollarPrice', 'Present value in USD', RiskMeasureType.Dollar_Price)
Price = __risk_measure_with_doc_string('Price', 'Present value in local currency', RiskMeasureType.PV)
ForwardPrice = __risk_measure_with_doc_string(
'ForwardPrice',
'Forward price',
RiskMeasureType.Forward_Price,
unit=RiskMeasureUnit.BPS)
Theta = __risk_measure_with_doc_string('Theta', '1 day Theta', RiskMeasureType.Theta)
EqDelta = __risk_measure_with_doc_string(
'EqDelta',
'Equity Delta',
RiskMeasureType.Delta,
asset_class=AssetClass.Equity)
EqGamma = __risk_measure_with_doc_string(
'EqGamma',
'Equity Gamma',
RiskMeasureType.Gamma,
asset_class=AssetClass.Equity)
EqVega = __risk_measure_with_doc_string('EqVega', 'Equity Vega', RiskMeasureType.Vega, asset_class=AssetClass.Equity)
EqSpot = __risk_measure_with_doc_string(
'EqSpot',
'Equity Spot Level',
RiskMeasureType.Spot, asset_class=AssetClass.Equity)
EqAnnualImpliedVol = __risk_measure_with_doc_string(
'EqAnnualImpliedVol',
'Equity Annual Implied Volatility (%)',
RiskMeasureType.Annual_Implied_Volatility,
asset_class=AssetClass.Equity,
unit=RiskMeasureUnit.Percent)
CommodDelta = __risk_measure_with_doc_string(
'CommodDelta',
'Commodity Delta',
RiskMeasureType.Delta,
asset_class=AssetClass.Commod)
CommodTheta = __risk_measure_with_doc_string(
'CommodTheta',
'Commodity Theta',
RiskMeasureType.Theta,
asset_class=AssetClass.Commod)
CommodVega = __risk_measure_with_doc_string(
'CommodVega',
'Commodity Vega',
RiskMeasureType.Vega,
asset_class=AssetClass.Commod)
FairVolStrike = __risk_measure_with_doc_string(
'FairVolStrike',
'Fair Volatility Strike Value of a Variance Swap',
RiskMeasureType.FairVolStrike)
FairVarStrike = __risk_measure_with_doc_string(
'FairVarStrike',
'Fair Variance Strike Value of a Variance Swap',
RiskMeasureType.FairVarStrike)
FXDelta = __risk_measure_with_doc_string('FXDelta', 'FX Delta', RiskMeasureType.Delta, asset_class=AssetClass.FX)
FXGamma = __risk_measure_with_doc_string('FXGamma', 'FX Gamma', RiskMeasureType.Gamma, asset_class=AssetClass.FX)
FXVega = __risk_measure_with_doc_string('FXVega', 'FX Vega', RiskMeasureType.Vega, asset_class=AssetClass.FX)
FXSpot = __risk_measure_with_doc_string('FXSpot', 'FX Spot Rate', RiskMeasureType.Spot, asset_class=AssetClass.FX)
IRBasis = __risk_measure_with_doc_string(
'IRBasis',
'Interest Rate Basis',
RiskMeasureType.Basis,
asset_class=AssetClass.Rates)
IRDelta = __risk_measure_with_doc_string(
'IRDelta',
'Interest Rate Delta',
RiskMeasureType.Delta,
asset_class=AssetClass.Rates)
IRDeltaParallel = __risk_measure_with_doc_string(
'IRDeltaParallel',
'Interest Rate Parallel Delta',
RiskMeasureType.ParallelDelta,
asset_class=AssetClass.Rates)
IRDeltaLocalCcy = __risk_measure_with_doc_string(
'IRDeltaLocalCcy',
'Interest Rate Delta (Local Ccy)',
RiskMeasureType.DeltaLocalCcy,
asset_class=AssetClass.Rates)
IRDeltaParallelLocalCcy = __risk_measure_with_doc_string(
'IRDeltaParallelLocalCcy',
'Interest Rate Parallel Delta (Local Ccy)',
RiskMeasureType.ParallelDeltaLocalCcy,
asset_class=AssetClass.Rates)
IRGamma = __risk_measure_with_doc_string(
'IRGamma',
'Interest Rate Gamma',
RiskMeasureType.Gamma,
asset_class=AssetClass.Rates)
IRVega = __risk_measure_with_doc_string(
'IRVega',
'Interest Rate Vega',
RiskMeasureType.Vega,
asset_class=AssetClass.Rates)
IRVegaParallel = __risk_measure_with_doc_string(
'IRVegaParallel',
'Interest Rate Parallel Vega',
RiskMeasureType.ParallelVega,
asset_class=AssetClass.Rates)
IRVegaLocalCcy = __risk_measure_with_doc_string(
'IRVegaLocalCcy',
'Interest Rate Vega (Local Ccy)',
RiskMeasureType.VegaLocalCcy,
asset_class=AssetClass.Rates)
IRVegaParallelLocalCcy = __risk_measure_with_doc_string(
'IRVegaParallelLocalCcy',
'Interest Rate Parallel Vega (Local Ccy)',
RiskMeasureType.ParallelVegaLocalCcy,
asset_class=AssetClass.Rates)
IRAnnualImpliedVol = __risk_measure_with_doc_string(
'IRAnnualImpliedVol',
'Interest Rate Annual Implied Volatility (%)',
RiskMeasureType.Annual_Implied_Volatility,
asset_class=AssetClass.Rates,
unit=RiskMeasureUnit.Percent)
IRAnnualATMImpliedVol = __risk_measure_with_doc_string(
'IRAnnualATMImpliedVol',
'Interest Rate Annual Implied At-The-Money Volatility (%)',
RiskMeasureType.Annual_ATMF_Implied_Volatility,
asset_class=AssetClass.Rates,
unit=RiskMeasureUnit.Percent)
IRDailyImpliedVol = __risk_measure_with_doc_string(
'IRDailyImpliedVol',
'Interest Rate Daily Implied Volatility (bps)',
RiskMeasureType.Annual_ATMF_Implied_Volatility,
asset_class=AssetClass.Rates,
unit=RiskMeasureUnit.BPS)
IRSpotRate = __risk_measure_with_doc_string(
'IRSpotRate',
'At-The-Money Spot Rate (%)',
RiskMeasureType.Spot_Rate,
asset_class=AssetClass.Rates,
unit=RiskMeasureUnit.Percent)
IRFwdRate = __risk_measure_with_doc_string(
'IRFwdRate',
'Par Rate (%)',
RiskMeasureType.Forward_Rate,
asset_class=AssetClass.Rates,
unit=RiskMeasureUnit.Percent)
CRIFIRCurve = __risk_measure_with_doc_string(
'CRIFIRCurve',
'CRIF IR Curve',
RiskMeasureType.CRIF_IRCurve)
Formatters = {
DollarPrice: scalar_formatter,
Price: scalar_formatter,
ForwardPrice: scalar_formatter,
Theta: scalar_formatter,
EqDelta: scalar_formatter,
EqGamma: scalar_formatter,
EqVega: sum_formatter,
EqSpot: scalar_formatter,
EqAnnualImpliedVol: scalar_formatter,
CommodDelta: scalar_formatter,
CommodVega: scalar_formatter,
CommodTheta: scalar_formatter,
FairVarStrike: scalar_formatter,
FairVolStrike: scalar_formatter,
FXDelta: structured_formatter,
FXGamma: structured_formatter,
FXVega: structured_formatter,
FXSpot: scalar_formatter,
IRBasis: structured_formatter,
IRDelta: structured_formatter,
IRDeltaParallel: scalar_formatter,
IRDeltaLocalCcy: structured_formatter,
IRDeltaParallelLocalCcy: scalar_formatter,
IRGamma: scalar_formatter,
IRVega: structured_formatter,
IRVegaParallel: scalar_formatter,
IRVegaLocalCcy: structured_formatter,
IRVegaParallelLocalCcy: scalar_formatter,
IRAnnualImpliedVol: scalar_formatter,
IRDailyImpliedVol: scalar_formatter,
IRAnnualATMImpliedVol: scalar_formatter,
IRSpotRate: scalar_formatter,
IRFwdRate: scalar_formatter,
CRIFIRCurve: crif_formatter
}
| 35.483029
| 118
| 0.724135
|
66278be713a2341dc7d97681d87e654c4e95a4e0
| 5,943
|
py
|
Python
|
test/redshift/test_target_table.py
|
Dwolla/arbalest
|
5516aa11a24012a6222acc3b583261ff90ee450f
|
[
"MIT"
] | 46
|
2015-11-01T19:37:46.000Z
|
2021-04-14T02:41:10.000Z
|
test/redshift/test_target_table.py
|
Dwolla/arbalest
|
5516aa11a24012a6222acc3b583261ff90ee450f
|
[
"MIT"
] | 1
|
2016-04-20T16:56:44.000Z
|
2016-04-20T16:56:44.000Z
|
test/redshift/test_target_table.py
|
Dwolla/arbalest
|
5516aa11a24012a6222acc3b583261ff90ee450f
|
[
"MIT"
] | 9
|
2015-10-31T23:01:50.000Z
|
2021-08-02T21:15:25.000Z
|
import unittest
from mock import patch, Mock
from arbalest.redshift import TargetTable
from arbalest.redshift.schema import JsonObject, Property
from arbalest.sql import Database
from test import TABLE_NAME
class TargetTableShould(unittest.TestCase):
def setUp(self):
self.schema = JsonObject(TABLE_NAME, Property('id', 'VARCHAR(36)'))
def test_create_when_column_name_not_defined(self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)'),
Property('property2', 'TIMESTAMP'))
table = TargetTable(schema, Database(Mock()))
table.create()
expected_sql = 'CREATE TABLE {0} (property1 VARCHAR(10), ' \
'property2 TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(expected_sql)
def test_create_when_column_name_defined(self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)',
'someColumn'),
Property('property2', 'TIMESTAMP',
'anotherColumn'))
table = TargetTable(schema, Database(Mock()))
table.create()
expected_sql = 'CREATE TABLE {0} (someColumn VARCHAR(10), ' \
'anotherColumn TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(expected_sql)
def test_create_when_column_name_not_defined_for_nested_property(
self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)'),
Property('property2',
Property('timestamp', 'TIMESTAMP')))
table = TargetTable(schema, Database(Mock()))
table.create()
expected_sql = 'CREATE TABLE {0} (property1 VARCHAR(10), ' \
'property2_timestamp TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(
expected_sql)
def test_create_when_column_name_defined_for_nested_property(self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)'),
Property('property2',
Property('timestamp', 'TIMESTAMP',
'anotherColumn')))
table = TargetTable(schema, Database(Mock()))
table.create()
expected_sql = 'CREATE TABLE {0} (property1 VARCHAR(10), ' \
'anotherColumn TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(
expected_sql)
def test_stage_update_when_column_name_not_defined(self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)'),
Property('property2', 'TIMESTAMP'))
table = TargetTable(schema, Database(Mock()))
table.stage_update()
expected_sql = 'CREATE TABLE {0}_update (property1 VARCHAR(10), ' \
'property2 TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(
expected_sql)
def test_stage_update_when_column_name_defined(self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)',
'someColumn'),
Property('property2', 'TIMESTAMP',
'anotherColumn'))
table = TargetTable(schema, Database(Mock()))
table.stage_update()
expected_sql = 'CREATE TABLE {0}_update (someColumn VARCHAR(10), ' \
'anotherColumn TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(expected_sql)
def test_stage_update_when_column_name_not_defined_for_nested_property(
self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)'),
Property('property2',
Property('timestamp', 'TIMESTAMP')))
table = TargetTable(schema, Database(Mock()))
table.stage_update()
expected_sql = 'CREATE TABLE {0}_update (property1 VARCHAR(10), ' \
'property2_timestamp TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(
expected_sql)
def test_stage_update_when_column_name_defined_for_nested_property(self):
with patch.object(Database, 'execute') as execute:
schema = JsonObject(TABLE_NAME,
Property('property1', 'VARCHAR(10)'),
Property('property2',
Property('timestamp', 'TIMESTAMP',
'anotherColumn')))
table = TargetTable(schema, Database(Mock()))
table.stage_update()
expected_sql = 'CREATE TABLE {0}_update (property1 VARCHAR(10), ' \
'anotherColumn TIMESTAMP)'.format(TABLE_NAME)
execute.assert_called_once_with(
expected_sql)
| 42.148936
| 80
| 0.540131
|
b08d4116fa6263dec021d4efd16144d8878dfdd9
| 3,739
|
py
|
Python
|
python/cmsaa_180.py
|
jaelle/cmsaa
|
23f6715385dd065648d800bd94a166e9bbd85d8e
|
[
"MIT"
] | null | null | null |
python/cmsaa_180.py
|
jaelle/cmsaa
|
23f6715385dd065648d800bd94a166e9bbd85d8e
|
[
"MIT"
] | null | null | null |
python/cmsaa_180.py
|
jaelle/cmsaa
|
23f6715385dd065648d800bd94a166e9bbd85d8e
|
[
"MIT"
] | null | null | null |
from cmsaa import attentional_bias, optimize_prioritymap, rmse,plot_results,plot_results_w_test
from cmsaa import GoalMap, SaliencyMap, PriorityMap
import matplotlib.pyplot as plt
import csv
from scipy.integrate import simps
import numpy as np
data_180 = {}
test_180 = {}
stimuli_locations_180 = [-90,-45,0,45,90]
np_data = np.loadtxt('data/180degree.csv',delimiter=',',skiprows=1)
partitions = np.loadtxt('data/bootstrap_partitions.csv',dtype=int,delimiter=',')
x = np.array(stimuli_locations_180)
# Standard model
#init_vals = [0.75, 125, 0.75, 125]
#min_bounds = [0.70, 0, 0.70, 0]
#max_bounds = [0.8,200,0.78,200]
# GM Only model:
#init_vals = [0.7662, 125]
#min_bounds = [0.5, 50]
#max_bounds = [0.8,200]
# Constant SM
#init_vals = [0.4, 125, 0.4]
#min_bounds = [0.3, 0, 0.3]
#max_bounds = [0.5, 200, 0.5]
# Inhibited GM
init_vals = [0.4, 0.4, 125, 125, 0.4]
min_bounds = [0.3, 0.3, 0, 0, 0.3]
max_bounds = [0.5,0.5, 200, 200, 0.5]
save_rows = []
for i in range(len(partitions)):
training_set = []
for col in partitions[i]:
training_set += [np_data[col]]
training_set = np.array(training_set)
bootstrap_means = np.mean(training_set,axis=0)
alldata_means = np.mean(np_data,axis=0)
data_180['-90'] = bootstrap_means[0:5]
data_180['0'] = bootstrap_means[5:10]
data_180['90'] = bootstrap_means[10:15]
test_180['-90'] = alldata_means[0:5]
test_180['0'] = alldata_means[5:10]
test_180['90'] = alldata_means[10:15]
for attended_location in [-90,0,90]:
# attentional bias derived from the mean reaction times at the attended location
y = np.array(attentional_bias(data_180[str(attended_location)]))
# print(y)
best_vals = optimize_prioritymap(attended_location, x, y, init_vals, min_bounds, max_bounds)
degrees = np.arange(x[0],x[4],1)
pm = PriorityMap(attended_location)
#pm.standard(degrees,*best_vals)
# pm.gmonly(degrees,*best_vals)
pm.inhibitedgm(degrees,*best_vals)
#pm.constantsm(degrees,*best_vals)
train_error = rmse(pm.prioritymap,y)
auc = simps(np.append(pm.prioritymap,[0.65,0.65,0.65]))
test_y = np.array(attentional_bias(test_180[str(attended_location)]))
test_error = rmse(pm.prioritymap,test_y)
plot_results_w_test(x, y, test_y, pm, 'results_inhibitedgm/images/' + str(attended_location) + '/180_' + str(attended_location) + '_bootstrap_' + str(i) + '_b.png')
save_cols = [180,attended_location,i]
save_cols = np.append(save_cols,best_vals)
save_cols = np.append(save_cols,[train_error,test_error,auc])
save_cols = np.array(save_cols,dtype=np.str)
save_rows += [save_cols]
save_rows = np.array(save_rows).tolist()
# Standard Model:
# save_rows = [['standard location','stimuli location','bootstrap row','gm mag','gm stdev','sm mag','sm stdev','train error','test error','auc']] + save_rows
# GM Only Model:
# save_rows = [['standard location','stimuli location','bootstrap row','gm mag','gm stdev','train error','test error','auc']] + save_rows
# Constant SM
#save_rows = [['standard location','stimuli location','bootstrap row','gm mag','gm stdev','sm mag','train error','test error','auc']] + save_rows
# Inhibited GM
save_rows = [['standard location','stimuli location','bootstrap row','gm mag', 'gm mag2', 'gm stdev', 'gm stdev2', 'sm mag','train error','test error','auc']] + save_rows
with open('results_inhibitedgm/180_params_inhibitedgm.csv','w') as fp:
writer = csv.writer(fp,lineterminator='\n')
writer.writerows(save_rows)
print('Done!')
| 34.943925
| 173
| 0.654453
|
99c74bf7b2588740e6937c3a529e30916c4060f0
| 679
|
py
|
Python
|
products/migrations/0008_auto_20151124_1903.py
|
n2o/guhema
|
eb390cbb5213a5ae16539ea46d473a5dc1866415
|
[
"MIT"
] | null | null | null |
products/migrations/0008_auto_20151124_1903.py
|
n2o/guhema
|
eb390cbb5213a5ae16539ea46d473a5dc1866415
|
[
"MIT"
] | 2
|
2016-01-20T22:21:33.000Z
|
2016-01-29T08:50:21.000Z
|
products/migrations/0008_auto_20151124_1903.py
|
n2o/guhema
|
eb390cbb5213a5ae16539ea46d473a5dc1866415
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9c1 on 2015-11-24 19:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0007_auto_20151124_1902'),
]
operations = [
migrations.AlterField(
model_name='indicator',
name='diameter',
field=models.FloatField(blank=True, default=0.0, verbose_name='Durchmesser'),
),
migrations.AlterField(
model_name='indicator',
name='value',
field=models.CharField(max_length=1024, verbose_name='Kennziffer'),
),
]
| 26.115385
| 89
| 0.615611
|
9eed6895e29bf7e7d93de367ceed1c8dbecd3fe2
| 8,473
|
py
|
Python
|
t/qry.py
|
APNIC-Labs/rtmfp-probe
|
c15255ea242fe5c60ac33e7f9aaf0bff7884b7ad
|
[
"BSD-3-Clause"
] | 1
|
2015-04-29T04:40:58.000Z
|
2015-04-29T04:40:58.000Z
|
t/qry.py
|
APNIC-Labs/rtmfp-probe
|
c15255ea242fe5c60ac33e7f9aaf0bff7884b7ad
|
[
"BSD-3-Clause"
] | null | null | null |
t/qry.py
|
APNIC-Labs/rtmfp-probe
|
c15255ea242fe5c60ac33e7f9aaf0bff7884b7ad
|
[
"BSD-3-Clause"
] | 2
|
2015-05-08T03:39:51.000Z
|
2019-10-17T09:20:26.000Z
|
#!/usr/bin/env python
from dh import DiffieHellman
import crypto
import socket
import struct
from binascii import hexlify, unhexlify
UDP_IP='50.57.70.211'
UDP_IP='127.0.0.1'
UDP_PORT=1935 #macromedia-fcs
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 0))
print 'Listening on port ' + str(sock.getsockname()[1])
def hex_print(buf):
chx = ""
out = " "
for i in range(len(buf)):
if ord(buf[i]) > 31 and ord(buf[i]) < 127:
chx = chx + buf[i]
else:
chx = chx + '.'
out = out + ("%02X " % ord(buf[i]))
if (i % 16) == 15:
out = out + " " + chx
if i < len(buf) - 1: out = out + "\n "
chx = ""
if len(buf) % 16 != 15:
out = out + (" " * (len(buf) % 16)) + chx
print out
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def checksum(msg):
s = 0
for i in range(0, len(msg), 2):
w = ord(msg[i]) + (ord(msg[i+1]) << 8)
s = carry_around_add(s, w)
return ~s & 0xffff
def prep(msg, ssid, mode, key):
msg = struct.pack("!B", mode) + msg
if (len(msg)+2) % 16 != 0:
msg = msg + '\xff' * (16 - (len(msg)+2) % 16)
chksum = checksum(msg)
msg = struct.pack("=H", chksum) + msg
msg = crypto.encrypt(msg, key)
words = struct.unpack("!LL", msg[:8])
ssid = ssid ^ words[0] ^ words[1]
msg = struct.pack("!L", ssid) + msg
return msg
def unwrap(msg, key):
data = { }
words = struct.unpack("!LLL", msg[:12])
data['ssid'] = words[0] ^ words[1] ^ words[2]
orig = msg[4:]
msg = crypto.decrypt(msg[4:], key)
print 'decrypted:', repr(msg)
chksum = checksum(msg[2:])
if chksum != struct.unpack('=H', msg[:2])[0]:
msg = crypto.decrypt(orig, crypto.DEFAULT_KEY)
print "invalid checksum", data['ssid']
chksum = checksum(msg[2:])
if chksum == struct.unpack('=H', msg[:2])[0]:
print "default crypto key message:"
hex_print(msg)
return None
flags = ord(msg[2])
print 'Flags:', hex(flags)
msg = msg[3:]
if flags & 4: msg = msg[2:]
if flags & 8: msg = msg[2:]
data['flags'] = flags
chunks = []
while len(msg) > 3:
if msg[0] == '\xff':
msg = msg[1:]
continue
(chtype, chlength) = struct.unpack("!BH", msg[:3])
chunks.append((chtype, chlength, msg[3:chlength+3]))
msg = msg[chlength+3:]
data['chunks'] = chunks
return data
def packl(lnum, padmultiple=1):
"""Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size. 0
means no padding whatsoever, so that packing 0 result in an empty
string. The resulting byte string is the big-endian two's
complement representation of the passed in long."""
if lnum == 0:
return b'\0' * padmultiple
elif lnum < 0:
raise ValueError("Can only convert non-negative numbers.")
s = hex(lnum)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
s = unhexlify(s)
if (padmultiple != 1) and (padmultiple != 0):
filled_so_far = len(s) % padmultiple
if filled_so_far != 0:
s = b'\0' * (padmultiple - filled_so_far) + s
return s
def vread(msg):
value = 0
while len(msg) > 0 and ord(msg[0]) & 0x80 != 0:
value = value << 7
value = value + (ord(msg[0]) & 0x7f)
msg = msg[1:]
if len(msg) > 0:
value = value << 7
value = value + (ord(msg[0]) & 0x7f)
msg = msg[1:]
return (value, msg)
def vwrite(value):
if value <= 0: return '\0'
msg = ''
flag = 0
while value > 0:
msg = chr((value & 0x7f) | flag) + msg
flag = 0x80
value = value >> 7
return msg
epd = "\x16\x15\x0artmpf://cc.rtmfp.net"
IHello = prep("\x30" + struct.pack('!H', len(epd) + 16) + epd + "0123456789ABCDEF", 0, 3, crypto.DEFAULT_KEY)
sock.sendto(IHello, (UDP_IP, UDP_PORT))
msg, addr = sock.recvfrom(1024)
data = unwrap(msg, crypto.DEFAULT_KEY)
assert(data is not None)
assert(len(data['chunks']) == 1)
RHello = data['chunks'][0]
if RHello[0] != 0x70: print hexlify(msg), data
assert(RHello[0] == 0x70)
assert(RHello[1] == len(RHello[2]))
(taglen, msg) = vread(RHello[2])
assert(taglen == 16)
assert(msg[:16] == '0123456789ABCDEF')
(cookielen, msg) = vread(msg[16:])
cookie = msg[:cookielen]
# ignore RHello options, the server will be using an ephemeral key
dh = DiffieHellman()
pcert = '\x1d\x02' + packl(dh.publicKey)
pcert = vwrite(len(pcert)) + pcert
sknc = '\x02\x1d\x02\x03\x1a\x00\x00\x02\x1e\x00'
msg = '1234' + vwrite(cookielen) + cookie + vwrite(len(pcert)) + pcert + vwrite(len(sknc)) + sknc + 'X'
IIKeying = prep("\x38" + struct.pack('!H', len(msg)) + msg, 0, 3, crypto.DEFAULT_KEY)
sock.sendto(IIKeying, (UDP_IP, UDP_PORT))
msg, addr = sock.recvfrom(1024)
data = unwrap(msg, crypto.DEFAULT_KEY)
assert(data is not None)
assert(len(data['chunks']) == 1)
RIKeying = data['chunks'][0]
assert(RIKeying[0] == 0x78)
assert(RIKeying[1] == len(RIKeying[2]))
remote_ssid = struct.unpack('!L', RIKeying[2][:4])[0]
(skfcLength, msg) = vread(RIKeying[2][4:])
skfc = msg[:skfcLength]
kdata = skfc
shared = None
while len(kdata) > 3:
(optlen, kdata) = vread(kdata)
(opttype, odata) = vread(kdata[:optlen])
if opttype == 0x0d:
(_, odata) = vread(odata) # group ID
dh.genKey(long(hexlify(odata), 16))
shared = packl(dh.sharedSecret)
kdata = kdata[optlen:]
assert(shared is not None)
#print 'sknc:', hexlify(sknc)
#print 'skfc:', hexlify(skfc)
#print 'shared:', hexlify(shared)
(enc,dec) = crypto.makeKeys(sknc, skfc, shared)
#print hexlify(enc), hexlify(dec)
# we're up and running, send an RTMP message
invokeConnect = ('\x80' + # flags
'\x02\x01\x01' + # flow ID, seq#, fnsOffset
'\x05\x00TC\x04\x00\x00' + # metadata option
'\x14\x00\x00\x00\x00' + # RTMP.Invoke(AMF0)
'\x02\x00\x07connect' + # connect
'\x00\x3f\xf0\x00\x00\x00\x00\x00\x00' + # 1.0
'\x03' + # {
'\x00\x03app\x02\x00\x00' + # app: ""
'\x00\x0eobjectEncoding' + # objectEncoding:
'\x00\x40\x08\x00\x00\x00\x00\x00\x00' + # 3.0
'\x00\x00\x09') # }
Invoke = prep('\x10' + struct.pack('!H', len(invokeConnect)) + invokeConnect, remote_ssid, 1, enc)
invokePeerInfo = ('\x00' + # flags
'\x02\x02\x01' + # flow ID, seq#, fnsOffset
'\x11\x00\x00\x00\x00' + # RTMP.Invoke(AMF3)
'\x00\x02\x00\x0bsetPeerInfo' + # setPeerInfo
'\x40\x08\x00\x00\x00\x00\x00\x00\x00' + # 3.0
'\x05' + # NULL
'\x02\x00\x13203.119.42.20:55694' +
'\x02\x00\x2b[2001:dc0:a000:4:3e07:54ff:fe1b:5fad]:55695' +
'\x02\x00\x2b[2001:dc0:a000:4:5466:32d8:c3c6:19f7]:55695')
PeerInfo = prep('\x10' + struct.pack('!H', len(invokePeerInfo)) + invokePeerInfo, remote_ssid, 1, enc)
sock.sendto(Invoke, (UDP_IP, UDP_PORT))
messages = [PeerInfo]
ongoing = True
while ongoing:
msg, addr = sock.recvfrom(1024)
data = unwrap(msg, dec)
print 'Incoming message:', data
if data:
for ch in data['chunks']:
print 'Chunk type %02x:' % ch[0]
hex_print(ch[2])
if ch[0] == 0x10: # UserData, acknowledge
bobs = ch[2][1:]
(fid, bobs) = vread(bobs)
(seq, bobs) = vread(bobs)
echo = vwrite(fid) + '\x7f' + vwrite(seq)
ack = ('\x51\x00' + vwrite(len(echo)) + echo)
Ack = prep(ack, remote_ssid, 1, enc)
sock.sendto(Ack, (UDP_IP, UDP_PORT))
if ch[0] == 0x0c: # Terminate request, bomb out
ongoing = False
if len(messages) > 0:
print "Sending next message..."
sock.sendto(messages.pop(), (UDP_IP, UDP_PORT))
if __name__=="__fred__":
a = DiffieHellman()
b = DiffieHellman()
a.genKey(b.publicKey)
b.genKey(a.publicKey)
if(a.getKey() == b.getKey()):
print "Shared keys match."
print "Key:", hexlify(a.key)
else:
print "Shared secrets didn't match!"
print "Shared secret: ", a.genSecret(b.publicKey)
print "Shared secret: ", b.genSecret(a.publicKey)
| 32.968872
| 109
| 0.558716
|
bf13b713f9e08521fec8b5d666113313149700c3
| 2,172
|
py
|
Python
|
old/script/RegulatorPlot.py
|
schregardusc/dishtiny
|
b0b1841a457a955fa4c22f36a050d91f12484f9e
|
[
"MIT"
] | 1
|
2021-02-12T23:53:55.000Z
|
2021-02-12T23:53:55.000Z
|
old/script/RegulatorPlot.py
|
schregardusc/dishtiny
|
b0b1841a457a955fa4c22f36a050d91f12484f9e
|
[
"MIT"
] | null | null | null |
old/script/RegulatorPlot.py
|
schregardusc/dishtiny
|
b0b1841a457a955fa4c22f36a050d91f12484f9e
|
[
"MIT"
] | null | null | null |
# usage:
# dataframe_filename
import sys
import os
import seaborn as sns
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from keyname import keyname as kn
from fileshash import fileshash as fsh
matplotlib.rcParams['pdf.fonttype'] = 42
sns.set(style='whitegrid')
dataframe_filename = sys.argv[1]
df = pd.read_csv(dataframe_filename)
df['Treatment'] = df['Treatment'].apply(lambda raw : {
'resource-even__channelsense-no__nlev-two__mute' : 'Blind',
'resource-even__channelsense-no__nlev-two__mute__mixed' : 'Mixed',
'resource-even__channelsense-yes__nlev-two' : 'Even',
'resource-wave__channelsense-yes__nlev-onebig' : 'Flat',
'resource-wave__channelsense-yes__nlev-two' : 'Nested'
}[raw]
)
ax = sns.barplot(
x="Treatment",
y="Mean Set Regulators",
data=df
)
plt.xticks(rotation=30)
outfile = kn.pack({
'_data_hathash_hash' : fsh.FilesHash().hash_files([dataframe_filename]),
'_script_fullcat_hash' : fsh.FilesHash(
file_parcel="full_parcel",
files_join="cat_join"
).hash_files([sys.argv[0]]),
'_source_hash' :kn.unpack(dataframe_filename)['_source_hash'],
'title' : 'regulator',
'ext' : '.pdf'
})
ax.get_figure().savefig(
outfile,
transparent=True,
bbox_inches='tight',
pad_inches=0
)
print('Output saved to', outfile)
plt.clf()
ax = sns.barplot(
x="Treatment",
y="Mean Unique Regulators",
data=df
)
plt.xticks(rotation=30)
outfile = kn.pack({
'_data_hathash_hash' : fsh.FilesHash().hash_files([dataframe_filename]),
'_script_fullcat_hash' : fsh.FilesHash(
file_parcel="full_parcel",
files_join="cat_join"
).hash_files([sys.argv[0]]),
'_source_hash' :kn.unpack(dataframe_filename)['_source_hash'],
'title' : 'uniq_regulator',
'ext' : '.pdf'
})
ax.get_figure().savefig(
outfile,
transparent=True,
bbox_inches='tight',
pad_inches=0
)
print('Output saved to', outfile)
| 24.681818
| 76
| 0.63582
|
ca5099608d281b9cdd987c21a8a7e1d4449f91a4
| 2,598
|
py
|
Python
|
text_finder.py
|
Ry-Co/text-pair-parser
|
4fe7f84fefccde3b4a632a6a4689e6fb11612d1b
|
[
"MIT"
] | null | null | null |
text_finder.py
|
Ry-Co/text-pair-parser
|
4fe7f84fefccde3b4a632a6a4689e6fb11612d1b
|
[
"MIT"
] | null | null | null |
text_finder.py
|
Ry-Co/text-pair-parser
|
4fe7f84fefccde3b4a632a6a4689e6fb11612d1b
|
[
"MIT"
] | null | null | null |
import requests
import os
from bs4 import BeautifulSoup
from pprint import pprint
def main():
url_cnn = 'http://lite.cnn.com/en'
url_npr = 'https://text.npr.org/'
low_level_urls_npr = parse_base_urls(url_npr)
validURLs_npr = parse_urls_NPR(url_npr, low_level_urls_npr)
write_to_txt_NPR(validURLs_npr)
low_level_urls_cnn = parse_base_urls(url_cnn)
validURLs_cnn = parse_urls_CNN(url_cnn, low_level_urls_cnn)
write_to_txt_CNN(validURLs_cnn)
def parse_base_urls(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
links = soup.findAll('a', href=True)
base = []
for link in links:
base.append(link.get('href'))
return base
def parse_urls_NPR(urlRoot, urls):
validURLs = []
for url in urls:
if "/s." in url:
parsedURL = url.replace("/", "")
validURLs.append(urlRoot+parsedURL)
return validURLs
def write_to_txt_NPR(validURLs):
for article in validURLs:
print(article)
text = parse_page_NPR(article)
title = article.rsplit('=',1)[-1]
writePath = os.getcwd()+"\\txtfiles\\"+title+".txt"
with open(writePath,"w", encoding='utf-8') as text_file:
text_file.write(text)
def parse_page_NPR(page_url):
response = requests.get(page_url)
soup = BeautifulSoup(response.content, 'html.parser')
tags = soup.find_all('p')
text = ""
for tag in tags:
if 'NPR' not in tag.getText() and 'Home' not in tag.getText() and 'By' not in tag.getText():
text = text + tag.getText()
return text
def parse_urls_CNN(urlRoot,urls):
validURLs = []
for url in urls:
if "article" in url:
parsedURL = url.replace("/en", "")
validURLs.append(urlRoot+parsedURL)
return validURLs
def write_to_txt_CNN(validURLs):
for article in validURLs:
print(article)
text = parse_page_CNN(article)
title = article.rsplit('/',1)[-1]
writePath = os.getcwd()+"\\txtfiles\\"+title+".txt"
with open(writePath,"w", encoding='utf-8') as text_file:
text_file.write(text)
def parse_page_CNN(page_url):
response = requests.get(page_url)
soup = BeautifulSoup(response.content, 'html.parser')
tags = soup.find_all('p')
text = ""
for tag in tags:
if 'CNN' not in tag.getText() and 'Inc' not in tag.getText():
text = text + tag.getText()
return text
main()
| 28.549451
| 101
| 0.611239
|
299ccdd6e1a0972a6c8b00c58c9c178e605d97f8
| 1,268
|
py
|
Python
|
config_example.py
|
exunious/Noella-Bot
|
ef119cf1ffc102188954962d54f07d895f4a7a94
|
[
"MIT"
] | null | null | null |
config_example.py
|
exunious/Noella-Bot
|
ef119cf1ffc102188954962d54f07d895f4a7a94
|
[
"MIT"
] | 1
|
2019-01-08T21:53:38.000Z
|
2019-01-08T21:53:38.000Z
|
config_example.py
|
exunious/Noella-Bot
|
ef119cf1ffc102188954962d54f07d895f4a7a94
|
[
"MIT"
] | 3
|
2018-01-05T02:58:01.000Z
|
2018-06-14T20:56:51.000Z
|
#######################################
############# Config File #############
#######################################
from discord.ext import commands
from collections import Counter
from collections import OrderedDict, deque, Counter
from datetime import datetime as dt
import openweathermapy.core as owm
import cogs.utils.checks
import cogs.utils.db
import cogs.utils.formats
import time
import logging
import aiohttp
import discord
import sys
import asyncio
import datetime
import traceback
import copy
import unicodedata
import inspect
import os
import json
bot_version = "3.63" #Bot Version
dev_discord = "https://discord.gg/EVfHKKn"
client_id = 'BOTID' # your bot's client ID
token = 'TOKEN' # your bot's token
postgresql = 'postgresql://NAME:PASSWORD@LOCALHOST/DB' # your postgresql info from above
carbon_key = '' # your bot's key on carbon's site
bots_key = '' # your key on bots.discord.pw
challonge_api_key = '...' # for tournament cog
openweathermap_api = ''
youtube_api = ''
embed_color = 13454262 #Default Embed Color
embed_color_succes = 65280 #Default SuccesEmbed Color
embed_color_error = 13434880 #Default ErrorEmbed Color
embed_color_attention = 16776960 #Default AttentionEmbed Color
message_delete_time = 15 #Default Message Delete Time
| 27.565217
| 88
| 0.727129
|
09ac99b1a0535db29c6858515ce09e5dbb989af9
| 453
|
py
|
Python
|
Python/Samples/Template/DkoFixing.py
|
plasroom46/DesignPattern.Sample
|
86c05c5ae356cb01f3d075f248c45da3e6534d07
|
[
"MIT"
] | 9
|
2019-03-14T01:54:31.000Z
|
2021-11-26T13:00:32.000Z
|
Python/Samples/Template/DkoFixing.py
|
plasroom46/DesignPattern.Sample
|
86c05c5ae356cb01f3d075f248c45da3e6534d07
|
[
"MIT"
] | null | null | null |
Python/Samples/Template/DkoFixing.py
|
plasroom46/DesignPattern.Sample
|
86c05c5ae356cb01f3d075f248c45da3e6534d07
|
[
"MIT"
] | 2
|
2019-08-19T06:00:04.000Z
|
2021-07-15T01:23:52.000Z
|
from ProductFixingTemplate import ProductFixingTemplate
class DkoFixing(ProductFixingTemplate):
def findWorkOptionLeg(self):
print("DKO: Find Working Option Leg!")
def checkBarriers(self):
print("DKO: Check barries!")
def rebateBarriers(self):
print("DKO: Rebate barries!")
def fixingOptionLeg(self):
print("DKO: Fixing Option leg!")
def checkTriggers(self):
print("DKO: No trigger...")
| 23.842105
| 55
| 0.668874
|
1063ef7a1b27813a8463cdfe86ec411ed6cd64d8
| 2,066
|
py
|
Python
|
gateway/can/controllers/base.py
|
aceofwings/Cantactular
|
a6eb8d7128fd1388d3e75c1a8415123d1d5930e1
|
[
"MIT"
] | 3
|
2017-01-26T01:37:42.000Z
|
2018-07-22T02:42:52.000Z
|
gateway/can/controllers/base.py
|
aceofwings/Cantactular
|
a6eb8d7128fd1388d3e75c1a8415123d1d5930e1
|
[
"MIT"
] | 1
|
2017-07-07T18:02:20.000Z
|
2017-07-07T18:02:20.000Z
|
gateway/can/controllers/base.py
|
aceofwings/Evt-Gateway
|
a6eb8d7128fd1388d3e75c1a8415123d1d5930e1
|
[
"MIT"
] | null | null | null |
EVTCAN = "EVTCAN"
OPENCAN = "OPENCAN"
class HandlerAssociator(type):
def __new__(cls, clsname, bases, dct):
class_frame = type.__new__(cls, clsname, bases, dct)
handlers = []
for attribute in dct.values():
if(hasattr(attribute,'match')):
handlers.append(attribute)
class_frame.handlers = handlers
return class_frame
class Controller(object,metaclass=HandlerAssociator):
MSG_SEND_SIZE = 16
def __init__(self):
super().__init__()
def build_controller(self):
"""
Associate the defined handlers with the instance, return a list
assocaited handlers for further match definitions
"""
ctrl_funcs = []
for handler in self.handlers:
ctrl_funcs.append(handler.__get__(self))
return ctrl_funcs
class BaseController(Controller):
msg_type="CAN"
def __init__(self):
super().__init__()
def send_to_bus(self,message):
pass
def handle_message(self,message):
self.CC.handle(message)
#should figure out design to wrap as a debugger
def handler(p):
def _handle(function):
function.match = p
return function
return _handle
def handleEvt(messageid = None):
def _handle(function):
function.match = (messageid)
function.type = EVTCAN
return function
return _handle
def handleOpen(index = None, sub = None):
def _handle(function):
function.match = (index,sub)
function.type = OPENCAN
return function
return _handle
class EvtCanController(BaseController):
msg_type="EVTCAN"
@BaseController.handleEvt(13)
def handle_me(self,message):
pass
class OpenCanController(BaseController):
msg_type="OPENCAN"
@BaseController.handleOpen(0x1800,0x00)
def handle_you(self,message):
pass
class MiscController(BaseController):
msg_type="MISC"
| 24.891566
| 71
| 0.614714
|
b365d604c0a74f0fec6c4b0ddb14d5240098baa3
| 1,452
|
py
|
Python
|
removeUnmatchimgorxml.py
|
kevincao91/Tools
|
545901c682c20cd06256156dadc75b8e4e7df88c
|
[
"MIT"
] | null | null | null |
removeUnmatchimgorxml.py
|
kevincao91/Tools
|
545901c682c20cd06256156dadc75b8e4e7df88c
|
[
"MIT"
] | null | null | null |
removeUnmatchimgorxml.py
|
kevincao91/Tools
|
545901c682c20cd06256156dadc75b8e4e7df88c
|
[
"MIT"
] | null | null | null |
# encoding=utf8
import os
from tqdm import tqdm
from basicFun import FILES
imgDir=r"/media/kevin/娱乐/xizang_database/label_data/JPEGImages"
xmlDir=r"/media/kevin/娱乐/xizang_database/label_data/Annotations"
imgType="jpg"
xmlType="xml"
def remove_xml():
imgs = [x for x in FILES.get_sorted_files(imgDir) ]
xmls = [x for x in FILES.get_sorted_files(xmlDir) ]
# print(imgs)
# remove xml
count = 0
for xml in tqdm(xmls):
find_img_name=xml.replace(xmlType, imgType)
# print(find_img_name)
if find_img_name in imgs:
pass
# print('save %s'%xml)
else:
print('del %s'%xml)
os.remove(os.path.join(xmlDir, xml))
count+=1
print("remove {} xmls.\n".format(count))
def remove_img():
imgs = [x for x in FILES.get_sorted_files(imgDir) ]
xmls = [x for x in FILES.get_sorted_files(xmlDir) ]
# print(imgs)
# remove img
count = 0
for img in tqdm(imgs):
find_xml_name=img.replace(imgType, xmlType)
# print(find_img_name)
if find_xml_name in xmls:
pass
# print('save %s'%img)
else:
print('del %s'%img)
os.remove(os.path.join(imgDir, img))
count+=1
print("remove {} imgs.\n".format(count))
if __name__ == "__main__":
remove_img()
remove_xml()
| 23.047619
| 65
| 0.567493
|
a4616fdb7d1b25fd8794d930b6d048076e16b540
| 10,071
|
py
|
Python
|
oscarapi/urls.py
|
ramanaditya/django-oscar-api
|
da9a658c3f9e4faacda81a19eddc94527c81e46a
|
[
"BSD-3-Clause"
] | null | null | null |
oscarapi/urls.py
|
ramanaditya/django-oscar-api
|
da9a658c3f9e4faacda81a19eddc94527c81e46a
|
[
"BSD-3-Clause"
] | null | null | null |
oscarapi/urls.py
|
ramanaditya/django-oscar-api
|
da9a658c3f9e4faacda81a19eddc94527c81e46a
|
[
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=unbalanced-tuple-unpacking
from django.conf import settings
from django.urls import include, path, re_path
from rest_framework.urlpatterns import format_suffix_patterns
from oscarapi.utils.loading import get_api_classes, get_api_class
api_root = get_api_class("views.root", "api_root")
LoginView = get_api_class("views.login", "LoginView")
(
BasketView,
AddProductView,
AddVoucherView,
ShippingMethodView,
LineList,
BasketLineDetail,
) = get_api_classes(
"views.basket",
[
"BasketView",
"AddProductView",
"AddVoucherView",
"ShippingMethodView",
"LineList",
"BasketLineDetail",
],
)
(UserList, UserDetail) = get_api_classes("views.admin.user", ["UserList", "UserDetail"])
(StockRecordDetail, PartnerList, PartnerDetail) = get_api_classes(
"views.admin.partner", ["StockRecordDetail", "PartnerList", "PartnerDetail"]
)
(
BasketList,
BasketDetail,
LineAttributeDetail,
OptionList,
OptionDetail,
CountryList,
CountryDetail,
RangeList,
RangeDetail,
) = get_api_classes(
"views.basic",
[
"BasketList",
"BasketDetail",
"LineAttributeDetail",
"OptionList",
"OptionDetail",
"CountryList",
"CountryDetail",
"RangeList",
"RangeDetail",
],
)
(
ProductList,
ProductDetail,
ProductStockRecords,
ProductStockRecordDetail,
ProductPrice,
ProductAvailability,
CategoryList,
CategoryDetail,
ProductAttributeList,
ProductAttributeDetail,
ProductAttributeValueList,
ProductAttributeValueDetail,
ProductImageList,
ProductImageDetail,
) = get_api_classes(
"views.product",
[
"ProductList",
"ProductDetail",
"ProductStockRecords",
"ProductStockRecordDetail",
"ProductPrice",
"ProductAvailability",
"CategoryList",
"CategoryDetail",
"ProductAttributeList",
"ProductAttributeDetail",
"ProductAttributeValueList",
"ProductAttributeValueDetail",
"ProductImageList",
"ProductImageDetail",
],
)
(
CheckoutView,
OrderList,
OrderDetail,
OrderLineList,
OrderLineDetail,
OrderLineAttributeDetail,
UserAddressList,
UserAddressDetail,
) = get_api_classes(
"views.checkout",
[
"CheckoutView",
"OrderList",
"OrderDetail",
"OrderLineList",
"OrderLineDetail",
"OrderLineAttributeDetail",
"UserAddressList",
"UserAddressDetail",
],
)
(
ProductAdminList,
ProductAdminDetail,
ProductClassAdminList,
ProductClassAdminDetail,
ProductAttributeAdminList,
ProductAttributeAdminDetail,
AttributeOptionGroupAdminList,
AttributeOptionGroupAdminDetail,
CategoryAdminList,
CategoryAdminDetail,
) = get_api_classes(
"views.admin.product",
[
"ProductAdminList",
"ProductAdminDetail",
"ProductClassAdminList",
"ProductClassAdminDetail",
"ProductAttributeAdminList",
"ProductAttributeAdminDetail",
"AttributeOptionGroupAdminList",
"AttributeOptionGroupAdminDetail",
"CategoryAdminList",
"CategoryAdminDetail",
],
)
(
OrderAdminList,
OrderAdminDetail,
OrderLineAdminList,
OrderLineAdminDetail,
OrderLineAttributeAdminDetail,
) = get_api_classes(
"views.admin.order",
[
"OrderAdminList",
"OrderAdminDetail",
"OrderLineAdminList",
"OrderLineAdminDetail",
"OrderLineAttributeAdminDetail",
],
)
urlpatterns = [
path("", api_root, name="api-root"),
path("login/", LoginView.as_view(), name="api-login"),
path("basket/", BasketView.as_view(), name="api-basket"),
path(
"basket/add-product/", AddProductView.as_view(), name="api-basket-add-product"
),
path(
"basket/add-voucher/", AddVoucherView.as_view(), name="api-basket-add-voucher"
),
path(
"basket/shipping-methods/",
ShippingMethodView.as_view(),
name="api-basket-shipping-methods",
),
path("baskets/", BasketList.as_view(), name="basket-list"),
path("baskets/<int:pk>/", BasketDetail.as_view(), name="basket-detail"),
path("baskets/<int:pk>/lines/", LineList.as_view(), name="basket-lines-list"),
path(
"baskets/<int:basket_pk>/lines/<int:pk>/",
BasketLineDetail.as_view(),
name="basket-line-detail",
),
path(
"baskets/<int:basket_pk>/lines/<int:line_pk>/lineattributes/<int:pk>/",
LineAttributeDetail.as_view(),
name="lineattribute-detail",
),
path("products/", ProductList.as_view(), name="product-list"),
path("products/<int:pk>/", ProductDetail.as_view(), name="product-detail"),
path("products/<int:pk>/price/", ProductPrice.as_view(), name="product-price"),
path(
"products/<int:pk>/availability/",
ProductAvailability.as_view(),
name="product-availability",
),
path(
"products/<int:pk>/stockrecords/",
ProductStockRecords.as_view(),
name="product-stockrecords",
),
path(
"products/<int:product_pk>/stockrecords/<int:pk>/",
ProductStockRecordDetail.as_view(),
name="product-stockrecord-detail",
),
path("options/", OptionList.as_view(), name="option-list"),
path("options/<int:pk>/", OptionDetail.as_view(), name="option-detail"),
path("ranges/", RangeList.as_view(), name="range-list"),
path("ranges/<int:pk>/", RangeDetail.as_view(), name="range-detail"),
path("categories/", CategoryList.as_view(), name="category-list"),
path("categories/<int:pk>/", CategoryDetail.as_view(), name="category-detail"),
re_path(
"^categories/(?P<breadcrumbs>.*)/$",
CategoryList.as_view(),
name="category-child-list",
),
path("productattributes/", ProductAttributeList.as_view(), name="product-attribute"),
path("productattributes/<int:pk>/", ProductAttributeDetail.as_view(), name="product-attribute-detail"),
path("productattributevalues/", ProductAttributeValueList.as_view(), name="product-attribute-value"),
path("productattributevalues/<int:pk>/", ProductAttributeValueDetail.as_view(), name="product-attribute-value-detail"),
path("productimages/", ProductImageList.as_view(), name="product-images"),
path("productimages/<int:pk>/", ProductImageDetail.as_view(), name="product-image-detail"),
path("users/<int:pk>/", UserDetail.as_view(), name="user-detail"),
path("checkout/", CheckoutView.as_view(), name="api-checkout"),
path("orders/", OrderList.as_view(), name="order-list"),
path("orders/<int:pk>/", OrderDetail.as_view(), name="order-detail"),
path("orders/<int:pk>/lines/", OrderLineList.as_view(), name="order-lines-list"),
path("orderlines/<int:pk>/", OrderLineDetail.as_view(), name="order-lines-detail"),
path(
"orderlineattributes/<int:pk>/",
OrderLineAttributeDetail.as_view(),
name="order-lineattributes-detail",
),
path("countries/", CountryList.as_view(), name="country-list"),
re_path(
r"^countries/(?P<pk>[A-z]{2})/$", CountryDetail.as_view(), name="country-detail"
),
path("useraddresses/", UserAddressList.as_view(), name="useraddress-list"),
path(
"useraddresses/<int:pk>/",
UserAddressDetail.as_view(),
name="useraddress-detail",
),
]
admin_urlpatterns = [
path("products/", ProductAdminList.as_view(), name="admin-product-list"),
path(
"products/<int:pk>/",
ProductAdminDetail.as_view(),
name="admin-product-detail",
),
path(
"productclasses/",
ProductClassAdminList.as_view(),
name="admin-productclass-list",
),
path(
"productclasses/<slug:slug>/",
ProductClassAdminDetail.as_view(),
name="admin-productclass-detail",
),
path("categories/", CategoryAdminList.as_view(), name="admin-category-list"),
path(
"categories/<int:pk>/",
CategoryAdminDetail.as_view(),
name="admin-category-detail",
),
re_path(
r"^categories/(?P<breadcrumbs>.*)/$",
CategoryAdminList.as_view(),
name="admin-category-child-list",
),
path(
"productattributes/",
ProductAttributeAdminList.as_view(),
name="admin-productattribute-list",
),
path(
"stockrecords/<int:pk>/",
StockRecordDetail.as_view(),
name="admin-stockrecord-detail",
),
path("partners/", PartnerList.as_view(), name="admin-partner-list"),
path("partners/<int:pk>/", PartnerDetail.as_view(), name="partner-detail"),
path(
"productattributes/<int:pk>/",
ProductAttributeAdminDetail.as_view(),
name="admin-productattribute-detail",
),
path(
"attributeoptiongroups/",
AttributeOptionGroupAdminList.as_view(),
name="admin-attributeoptiongroup-list",
),
path(
"attributeoptiongroups/<int:pk>/",
AttributeOptionGroupAdminDetail.as_view(),
name="admin-attributeoptiongroup-detail",
),
path("orders/", OrderAdminList.as_view(), name="admin-order-list"),
path("orders/<int:pk>/", OrderAdminDetail.as_view(), name="admin-order-detail"),
path(
"orders/<int:pk>/lines/",
OrderLineAdminList.as_view(),
name="admin-order-lines-list",
),
path(
"orderlines/<int:pk>/",
OrderLineAdminDetail.as_view(),
name="admin-order-lines-detail",
),
path(
"orderlineattributes/<int:pk>/",
OrderLineAttributeAdminDetail.as_view(),
name="admin-order-lineattributes-detail",
),
path("users/", UserList.as_view(), name="admin-user-list"),
]
if not getattr(settings, "OSCARAPI_BLOCK_ADMIN_API_ACCESS", True):
urlpatterns.append(path("admin/", include(admin_urlpatterns)))
urlpatterns = format_suffix_patterns(urlpatterns)
| 30.798165
| 123
| 0.641644
|
c1175ec2430dedd1bcc2ff464ed1272a49418932
| 26,957
|
py
|
Python
|
arc/parserTest.py
|
shihchengli/ARC
|
9c5b03c27dac41f213dfc4b26888c01c8c6f4e83
|
[
"MIT"
] | null | null | null |
arc/parserTest.py
|
shihchengli/ARC
|
9c5b03c27dac41f213dfc4b26888c01c8c6f4e83
|
[
"MIT"
] | null | null | null |
arc/parserTest.py
|
shihchengli/ARC
|
9c5b03c27dac41f213dfc4b26888c01c8c6f4e83
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
"""
This module contains unit tests for the parser functions
"""
import numpy as np
import os
import unittest
import arc.parser as parser
from arc.settings import arc_path
from arc.species import ARCSpecies
from arc.species.converter import xyz_to_str
class TestParser(unittest.TestCase):
"""
Contains unit tests for the parser functions
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
def test_parse_frequencies(self):
"""Test frequency parsing"""
no3_path = os.path.join(arc_path, 'arc', 'testing', 'freq', 'NO3_freq_QChem_fails_on_cclib.out')
c2h6_path = os.path.join(arc_path, 'arc', 'testing', 'freq', 'C2H6_freq_QChem.out')
so2oo_path = os.path.join(arc_path, 'arc', 'testing', 'composite', 'SO2OO_CBS-QB3.log')
ch2o_path_molpro = os.path.join(arc_path, 'arc', 'testing', 'freq', 'CH2O_freq_molpro.out')
ch2o_path_terachem = os.path.join(arc_path, 'arc', 'testing', 'freq', 'CH2O_freq_terachem.dat')
ch2o_path_terachem_output = os.path.join(arc_path, 'arc', 'testing', 'freq', 'formaldehyde_freq_terachem_output.out')
ncc_path_terachem_output = os.path.join(arc_path, 'arc', 'testing', 'freq', 'ethylamine_freq_terachem_output.out')
orca_path = os.path.join(arc_path, 'arc', 'testing', 'orca_example_freq.log')
no3_freqs = parser.parse_frequencies(path=no3_path, software='QChem')
c2h6_freqs = parser.parse_frequencies(path=c2h6_path, software='QChem')
so2oo_freqs = parser.parse_frequencies(path=so2oo_path, software='Gaussian')
ch2o_molpro_freqs = parser.parse_frequencies(path=ch2o_path_molpro, software='Molpro')
ch2o_terachem_freqs = parser.parse_frequencies(path=ch2o_path_terachem, software='TeraChem')
ch2o_terachem_output_freqs = parser.parse_frequencies(path=ch2o_path_terachem_output, software='TeraChem')
ncc_terachem_output_freqs = parser.parse_frequencies(path=ncc_path_terachem_output, software='TeraChem')
orca_freqs = parser.parse_frequencies(path=orca_path, software='Orca')
np.testing.assert_almost_equal(no3_freqs,
np.array([-390.08, -389.96, 822.75, 1113.23, 1115.24, 1195.35], np.float64))
np.testing.assert_almost_equal(c2h6_freqs,
np.array([352.37, 847.01, 861.68, 1023.23, 1232.66, 1235.04, 1425.48, 1455.31,
1513.67, 1518.02, 1526.18, 1526.56, 3049.78, 3053.32, 3111.61, 3114.2,
3134.14, 3136.8], np.float64))
np.testing.assert_almost_equal(so2oo_freqs,
np.array([302.51, 468.1488, 469.024, 484.198, 641.0067, 658.6316,
902.2888, 1236.9268, 1419.0826], np.float64))
np.testing.assert_almost_equal(ch2o_molpro_freqs,
np.array([1181.01, 1261.34, 1529.25, 1764.47, 2932.15, 3000.10], np.float64))
np.testing.assert_almost_equal(ch2o_terachem_freqs,
np.array([1198.228, 1271.913, 1562.435, 1900.334, 2918.771, 2966.569],
np.float64))
np.testing.assert_almost_equal(ch2o_terachem_output_freqs,
np.array([1198.63520807, 1276.19910582, 1563.62759321, 1893.24407646,
2916.39175334, 2965.86839559], np.float64))
np.testing.assert_almost_equal(ncc_terachem_output_freqs,
np.array([170.56668709, 278.52007409, 406.49102131, 765.91960508, 861.6118189,
910.16404036, 1010.63529045, 1052.86795614, 1160.15911873,
1275.00946008, 1386.75755192, 1406.08828477, 1425.90872097,
1506.47789418, 1522.65901736, 1527.41841768, 1710.89393731,
3020.79869151, 3035.66348773, 3061.21808688, 3085.3062489,
3087.60678739, 3447.41720077, 3529.23879182], np.float64))
np.testing.assert_almost_equal(orca_freqs,
np.array([1151.03, 1250.19, 1526.12, 1846.4, 3010.49, 3070.82], np.float64))
def test_parse_normal_displacement_modes(self):
"""Test parsing frequencies and normal displacement modes"""
freq_path = os.path.join(arc_path, 'arc', 'testing', 'Gaussian_neg_freq.out')
freqs, normal_disp_modes = parser.parse_normal_displacement_modes(path=freq_path)
expected_freqs = np.array([-18.0696, 127.6948, 174.9499, 207.585, 228.8421, 281.2939, 292.4101,
308.0345, 375.4493, 486.8396, 498.6986, 537.6196, 564.0223, 615.3762,
741.8843, 749.3428, 777.1524, 855.3031, 871.055, 962.7075, 977.6181,
1050.3147, 1051.8134, 1071.7234, 1082.0731, 1146.8729, 1170.0212, 1179.6722,
1189.1581, 1206.0905, 1269.8371, 1313.4043, 1355.1081, 1380.155, 1429.7095,
1464.5357, 1475.5996, 1493.6501, 1494.3533, 1500.9964, 1507.5851, 1532.7927,
1587.7095, 1643.0702, 2992.7203, 3045.3662, 3068.6577, 3123.9646, 3158.2579,
3159.3532, 3199.1684, 3211.4927, 3223.942, 3233.9201], np.float64)
np.testing.assert_almost_equal(freqs, expected_freqs)
expected_normal_disp_modes_0 = np.array(
[[-0.0, 0.0, -0.09], [-0.0, 0.0, -0.1], [0.0, 0.0, -0.01], [0.0, 0.0, -0.07], [0.0, 0.0, -0.2],
[-0.0, -0.0, 0.28], [0.0, -0.0, -0.08], [0.0, -0.0, 0.01], [0.0, -0.0, 0.12], [0.0, -0.0, 0.12],
[0.08, -0.02, -0.04], [-0.08, 0.02, -0.04], [-0.0, 0.0, -0.18], [-0.3, -0.03, 0.41], [-0.0, -0.0, 0.4],
[0.3, 0.03, 0.41], [0.0, 0.0, -0.15], [0.0, -0.0, 0.01], [0.0, -0.0, 0.21], [0.0, -0.0, 0.19]], np.float64)
np.testing.assert_almost_equal(normal_disp_modes[0], expected_normal_disp_modes_0)
freq_path = os.path.join(arc_path, 'arc', 'testing', 'CHO_neg_freq.out')
freqs, normal_disp_modes = parser.parse_normal_displacement_modes(path=freq_path)
expected_freqs = np.array([-1612.8294, 840.8655, 1883.4822, 3498.091], np.float64)
np.testing.assert_almost_equal(freqs, expected_freqs)
expected_normal_disp_modes = np.array(
[[[0.05, 0.03, 0.], [-0.13, -0.09, 0.], [0.8, 0.57, 0.]],
[[-0.03, 0.05, 0.], [0.09, -0.13, 0.], [-0.57, 0.8, 0.]],
[[0., 0., -0.41], [-0., 0., 0.49], [0., 0., 0.77]],
[[0.05, 0.03, 0.], [-0.13, -0.09, 0.], [0.8, 0.57, 0.]],
[[-0.03, 0.05, 0.], [0.09, -0.13, 0.], [-0.57, 0.8, 0.]],
[[0., 0., -0.41], [0., 0., 0.49], [0., 0., 0.77]],
[[0.05, 0.03, 0.], [-0.13, -0.09, 0.], [0.8, 0.57, 0.]],
[[-0.03, 0.05, 0.], [0.09, -0.13, 0.], [-0.57, 0.8, 0.]],
[[0., 0., -0.41], [-0., 0., 0.49], [0., 0., 0.77]]], np.float64)
np.testing.assert_almost_equal(normal_disp_modes, expected_normal_disp_modes)
def test_parse_xyz_from_file(self):
"""Test parsing xyz from a file"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'CH3C(O)O.gjf')
path2 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'CH3C(O)O.xyz')
path3 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'AIBN.gjf')
path4 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'molpro.in')
path5 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'qchem.in')
path6 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'qchem_output.out')
path7 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'TS.gjf')
path8 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'formaldehyde_coords.xyz')
path9 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'optim_traj_terachem.xyz') # test trajectories
path10 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'ethane_minimize_terachem_output.out')
path11 = os.path.join(arc_path, 'arc', 'testing', 'orca_example_opt.log')
xyz1 = parser.parse_xyz_from_file(path1)
xyz2 = parser.parse_xyz_from_file(path2)
xyz3 = parser.parse_xyz_from_file(path3)
xyz4 = parser.parse_xyz_from_file(path4)
xyz5 = parser.parse_xyz_from_file(path5)
xyz6 = parser.parse_xyz_from_file(path6)
xyz7 = parser.parse_xyz_from_file(path7)
xyz8 = parser.parse_xyz_from_file(path8)
xyz9 = parser.parse_xyz_from_file(path9)
xyz10 = parser.parse_xyz_from_file(path10)
xyz11 = parser.parse_xyz_from_file(path11)
self.assertEqual(xyz1, xyz2)
xyz1_str = xyz_to_str(xyz1)
xyz2_str = xyz_to_str(xyz2)
xyz3_str = xyz_to_str(xyz3)
xyz4_str = xyz_to_str(xyz4)
xyz5_str = xyz_to_str(xyz5)
xyz6_str = xyz_to_str(xyz6)
xyz9_str = xyz_to_str(xyz9)
xyz11_str = xyz_to_str(xyz11)
self.assertTrue('C 1.40511900 0.21728200 0.07675200' in xyz1_str)
self.assertTrue('O -0.79314200 1.04818800 0.18134200' in xyz1_str)
self.assertTrue('H -0.43701200 -1.34990600 0.92900600' in xyz2_str)
self.assertTrue('C 2.12217963 -0.66843078 1.04808732' in xyz3_str)
self.assertTrue('N 2.41731872 -1.07916417 2.08039935' in xyz3_str)
spc3 = ARCSpecies(label='AIBN', xyz=xyz3)
self.assertEqual(len(spc3.mol.atoms), 24)
self.assertTrue('S -0.42046822 -0.39099498 0.02453521' in xyz4_str)
self.assertTrue('N -1.99742564 0.38106573 0.09139807' in xyz5_str)
self.assertTrue('N -1.17538406 0.34366165 0.03265021' in xyz6_str)
self.assertEqual(len(xyz7['symbols']), 34)
self.assertEqual(len(xyz8['symbols']), 4)
expected_xyz_9 = """N -0.67665958 0.74524340 -0.41319355
H -1.26179357 1.52577220 -0.13687665
H 0.28392722 1.06723640 -0.44163375
N -0.75345799 -0.33268278 0.51180786
H -0.97153041 -0.02416219 1.45398654
H -1.48669570 -0.95874053 0.20627423
N 2.28178508 -0.42455356 0.14404399
H 1.32677989 -0.80557411 0.33156013"""
self.assertEqual(xyz9_str, expected_xyz_9)
self.assertIsNone(xyz10)
expected_xyz_11 = """C 0.00917900 -0.00000000 -0.00000000
O 1.20814900 -0.00000000 0.00000000
H -0.59436200 0.94730400 0.00000000
H -0.59436200 -0.94730400 0.00000000"""
self.assertEqual(xyz11_str, expected_xyz_11)
def test_parse_geometry(self):
"""Test parse_geometry()"""
# Test parsing xyz from a Gaussina file with more than 50 atoms where the iop(2/9=2000) keyword is not specified
path1 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'Gaussian_large.log')
xyz = parser.parse_geometry(path=path1)
self.assertIsInstance(xyz, dict)
self.assertEqual(len(xyz['symbols']), 53)
def test_parse_trajectory(self):
"""Test parsing trajectories"""
path = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'scan_optim.xyz')
trajectory = parser.parse_trajectory(path)
self.assertEqual(len(trajectory), 46)
self.assertIsInstance(trajectory[0], dict)
self.assertEqual(len(trajectory[0]['symbols']), 9)
path = os.path.join(arc_path, 'arc', 'testing', 'irc', 'cyano_irc_1.out')
trajectory = parser.parse_trajectory(path)
self.assertEqual(len(trajectory), 58)
self.assertIsInstance(trajectory[0], dict)
self.assertEqual(len(trajectory[0]['symbols']), 16)
path = os.path.join(arc_path, 'arc', 'testing', 'irc', 'irc_failed.out')
trajectory = parser.parse_trajectory(path)
self.assertEqual(len(trajectory), 21)
self.assertIsInstance(trajectory[0], dict)
self.assertEqual(len(trajectory[0]['symbols']), 17)
def test_parse_1d_scan_coords(self):
"""Test parsing the optimized coordinates of a torsion scan at each optimization point"""
path = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'H2O2.out')
traj = parser.parse_1d_scan_coords(path)
self.assertEqual(len(traj), 37)
self.assertEqual(traj[10], {'coords': ((-0.715582, -0.140909, 0.383809),
(0.715582, 0.140909, 0.383809),
(-1.043959, 0.678384, -0.010288),
(1.043959, -0.678384, -0.010288)),
'isotopes': (16, 16, 1, 1),
'symbols': ('O', 'O', 'H', 'H')})
def test_parse_t1(self):
"""Test T1 diagnostic parsing"""
path = os.path.join(arc_path, 'arc', 'testing', 'sp', 'mehylamine_CCSD(T).out')
t1 = parser.parse_t1(path)
self.assertEqual(t1, 0.0086766)
def test_parse_e_elect(self):
"""Test parsing E0 from an sp job output file"""
path = os.path.join(arc_path, 'arc', 'testing', 'sp', 'mehylamine_CCSD(T).out')
e_elect = parser.parse_e_elect(path)
self.assertAlmostEqual(e_elect, -251377.49160993524)
path = os.path.join(arc_path, 'arc', 'testing', 'composite', 'SO2OO_CBS-QB3.log')
e_elect = parser.parse_e_elect(path, zpe_scale_factor=0.99)
self.assertAlmostEqual(e_elect, -1833127.0939478774)
path = os.path.join(arc_path, 'arc', 'testing', 'sp', 'formaldehyde_sp_terachem_output.out')
e_elect = parser.parse_e_elect(path)
self.assertAlmostEqual(e_elect, -300621.95378630824)
path = os.path.join(arc_path, 'arc', 'testing', 'sp', 'formaldehyde_sp_terachem_results.dat')
e_elect = parser.parse_e_elect(path)
self.assertAlmostEqual(e_elect, -300621.95378630824)
def test_parse_zpe(self):
"""Test the parse_zpe() function for parsing zero point energies"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'freq', 'C2H6_freq_QChem.out')
path2 = os.path.join(arc_path, 'arc', 'testing', 'freq', 'CH2O_freq_molpro.out')
path3 = os.path.join(arc_path, 'arc', 'testing', 'freq', 'NO3_freq_QChem_fails_on_cclib.out')
path4 = os.path.join(arc_path, 'arc', 'testing', 'composite', 'SO2OO_CBS-QB3.log')
zpe1, zpe2, zpe3, zpe4 = parser.parse_zpe(path1), parser.parse_zpe(path2), parser.parse_zpe(path3), \
parser.parse_zpe(path4)
self.assertAlmostEqual(zpe1, 198.08311200000, 5)
self.assertAlmostEqual(zpe2, 69.793662734869, 5)
self.assertAlmostEqual(zpe3, 25.401064000000, 5)
self.assertAlmostEqual(zpe4, 39.368057626223, 5)
def test_parse_1d_scan_energies(self):
"""Test parsing a 1D scan output file"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'sBuOH.out')
energies, angles = parser.parse_1d_scan_energies(path=path1)
expected_energies = np.array([1.57530564e-05, 3.98826556e-01, 1.60839959e+00, 3.49030801e+00,
5.74358812e+00, 8.01124810e+00, 9.87649510e+00, 1.10079306e+01,
1.11473788e+01, 1.02373175e+01, 8.49330826e+00, 6.23697731e+00,
3.89294941e+00, 1.87096796e+00, 5.13009545e-01, 1.86410533e-04,
4.16146979e-01, 1.66269755e+00, 3.59565619e+00, 5.90306099e+00,
8.19668453e+00, 1.00329329e+01, 1.10759678e+01, 1.10923247e+01,
1.00763770e+01, 8.28078980e+00, 6.04456755e+00, 3.77500671e+00,
1.83344694e+00, 5.20014378e-01, 2.21067093e-03, 3.70723206e-01,
1.56091218e+00, 3.44323279e+00, 5.73505787e+00, 8.04497265e+00,
9.93330041e+00, 1.10426686e+01, 1.11168469e+01, 1.01271857e+01,
8.32729265e+00, 6.06336876e+00, 3.76108631e+00, 1.80461632e+00,
4.94715062e-01, 0.00000000e+00], np.float64)
expected_angles = np.array([0., 8., 16., 24., 32., 40., 48., 56., 64., 72., 80., 88., 96., 104.,
112., 120., 128., 136., 144., 152., 160., 168., 176., 184., 192., 200., 208., 216.,
224., 232., 240., 248., 256., 264., 272., 280., 288., 296., 304., 312., 320., 328.,
336., 344., 352., 360.], np.float64)
np.testing.assert_almost_equal(energies, expected_energies)
np.testing.assert_almost_equal(angles, expected_angles)
def test_parse_nd_scan_energies(self):
"""Test parsing an ND scan output file"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'scan_2D_relaxed_OCdOO.log')
results = parser.parse_nd_scan_energies(path=path1, software='gaussian')
self.assertEqual(results['directed_scan_type'], 'ess_gaussian')
self.assertEqual(results['scans'], [(4, 1, 2, 5), (4, 1, 3, 6)])
self.assertEqual(len(list(results.keys())), 3)
self.assertEqual(len(list(results['directed_scan'].keys())), 36 * 36 + 1) # 1297
self.assertAlmostEqual(results['directed_scan']['170.00', '40.00']['energy'], 26.09747088)
def test_parse_dipole_moment(self):
"""Test parsing the dipole moment from an opt job output file"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'composite', 'SO2OO_CBS-QB3.log')
dm1 = parser.parse_dipole_moment(path1)
self.assertEqual(dm1, 0.63)
path2 = os.path.join(arc_path, 'arc', 'testing', 'N2H4_opt_QChem.out')
dm2 = parser.parse_dipole_moment(path2)
self.assertEqual(dm2, 2.0664)
path3 = os.path.join(arc_path, 'arc', 'testing', 'freq', 'CH2O_freq_molpro.out')
dm3 = parser.parse_dipole_moment(path3)
self.assertAlmostEqual(dm3, 2.8840, 4)
path4 = os.path.join(arc_path, 'arc', 'testing', 'orca_example_opt.log')
dm4 = parser.parse_dipole_moment(path4)
self.assertEqual(dm4, 2.11328)
path5 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'ethane_minimize_terachem_output.out')
dm5 = parser.parse_dipole_moment(path5)
self.assertAlmostEqual(dm5, 0.000179036, 4)
def test_parse_polarizability(self):
"""Test parsing the polarizability moment from a freq job output file"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'composite', 'SO2OO_CBS-QB3.log')
polar1 = parser.parse_polarizability(path1)
self.assertAlmostEqual(polar1, 3.99506, 4)
def test_process_conformers_file(self):
"""Test processing ARC conformer files"""
path1 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'conformers_before_optimization.txt')
path2 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'conformers_after_optimization.txt')
path3 = os.path.join(arc_path, 'arc', 'testing', 'xyz', 'conformers_file.txt')
xyzs, energies = parser.process_conformers_file(path1)
self.assertEqual(len(xyzs), 3)
self.assertEqual(len(energies), 3)
self.assertTrue(all([e is None for e in energies]))
spc1 = ARCSpecies(label='tst1', xyz=xyzs[0])
self.assertEqual(len(spc1.conformers), 1)
xyzs, energies = parser.process_conformers_file(path2)
self.assertEqual(len(xyzs), 3)
self.assertEqual(len(energies), 3)
self.assertEqual(energies, [0.0, 10.271, 10.288])
spc2 = ARCSpecies(label='tst2', xyz=xyzs[:2])
self.assertEqual(len(spc2.conformers), 2)
self.assertEqual(len(spc2.conformer_energies), 2)
xyzs, energies = parser.process_conformers_file(path3)
self.assertEqual(len(xyzs), 4)
self.assertEqual(len(energies), 4)
self.assertEqual(energies, [0.0, 0.005, None, 0.005])
spc3 = ARCSpecies(label='tst3', xyz=xyzs)
self.assertEqual(len(spc3.conformers), 4)
self.assertEqual(len(spc3.conformer_energies), 4)
spc4 = ARCSpecies(label='tst4', xyz=path1)
self.assertEqual(len(spc4.conformers), 3)
self.assertTrue(all([e is None for e in spc4.conformer_energies]))
spc5 = ARCSpecies(label='tst5', xyz=path2)
self.assertEqual(len(spc5.conformers), 3)
self.assertTrue(all([e is not None for e in spc5.conformer_energies]))
spc6 = ARCSpecies(label='tst6', xyz=path3)
self.assertEqual(len(spc6.conformers), 4)
def test_parse_str_blocks(self):
"""Test parsing str blocks"""
path = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'H2O2.out')
str_blks = parser.parse_str_blocks(
path, 'Initial Parameters', '--------', regex=False, tail_count=3)
desire_str_lists = [
' ! Initial Parameters !\n',
' ! (Angstroms and Degrees) !\n',
' -------------------------- --------------------------\n',
' ! Name Definition Value Derivative Info. !\n',
' --------------------------------------------------------------------------------\n',
' ! R1 R(1,2) 1.4252 calculate D2E/DX2 analytically !\n',
' ! R2 R(1,3) 0.9628 calculate D2E/DX2 analytically !\n',
' ! R3 R(2,4) 0.9628 calculate D2E/DX2 analytically !\n',
' ! A1 A(2,1,3) 101.2687 calculate D2E/DX2 analytically !\n',
' ! A2 A(1,2,4) 101.2687 calculate D2E/DX2 analytically !\n',
' ! D1 D(3,1,2,4) 118.8736 Scan !\n',
' --------------------------------------------------------------------------------\n']
self.assertEqual(len(str_blks), 1)
self.assertEqual(str_blks[0], desire_str_lists)
def test_parse_scan_args(self):
"""Test parsing scan arguments"""
path = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'CH2OOH.out')
scan_args = parser.parse_scan_args(path)
self.assertEqual(scan_args['scan'], [4, 1, 2, 3])
self.assertEqual(scan_args['freeze'], [[1, 2, 3, 6], [2, 3]])
self.assertEqual(scan_args['step'], 90)
self.assertEqual(scan_args['step_size'], 4.0)
self.assertEqual(scan_args['n_atom'], 6)
def test_parse_ic_info(self):
"""Test parsing internal coordinates information"""
path = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'CH2OOH.out')
ic_info = parser.parse_ic_info(path)
expected_labels = ['R1', 'R2', 'R3', 'R4', 'R5', 'A1', 'A2',
'A3', 'A4', 'A5', 'D1', 'D2', 'D3', 'D4']
expected_types = ['R', 'R', 'R', 'R', 'R', 'A',
'A', 'A', 'A', 'A', 'D', 'D', 'D', 'D']
expected_atoms = [[1, 2], [1, 4], [1, 5], [2, 3], [3, 6], [2, 1, 4],
[2, 1, 5], [4, 1, 5], [1, 2, 3], [2, 3, 6], [4, 1, 2, 3],
[5, 1, 2, 3], [2, 1, 4, 5], [1, 2, 3, 6]]
expected_redundant = [False] * 14
expected_scan = [False, False, False, False, False, False, False,
False, False, False, True, True, False, False]
self.assertEqual(expected_labels, ic_info.index.to_list())
self.assertEqual(expected_types, ic_info.type.to_list())
self.assertEqual(expected_atoms, ic_info.atoms.to_list())
self.assertEqual(expected_redundant, ic_info.redundant.to_list())
self.assertEqual(expected_scan, ic_info.scan.to_list())
def test_parse_ic_values(self):
"""Test parsing internal coordinate values"""
ic_blk = [
' ! R1 R(1,2) 1.4535 -DE/DX = 0.0 !\n',
' ! R2 R(1,3) 0.9674 -DE/DX = 0.0 !\n',
' ! R3 R(2,4) 0.9674 -DE/DX = 0.0 !\n',
' ! A1 A(2,1,3) 100.563 -DE/DX = 0.0 !\n',
' ! A2 A(1,2,4) 100.563 -DE/DX = 0.0 !\n',
' ! D1 D(3,1,2,4) 118.8736 -DE/DX = 0.0003 !\n']
software = 'gaussian'
ic_values = parser.parse_ic_values(ic_blk, software)
expected_labels = ['R1', 'R2', 'R3', 'A1', 'A2', 'D1']
expected_values = [1.4535, 0.9674, 0.9674, 100.563, 100.563, 118.8736]
self.assertEqual(expected_labels, ic_values.index.to_list())
self.assertEqual(expected_values, ic_values.value.to_list())
def test_parse_conformers(self):
"""Test parsing internal coordinates of all intermediate conformers in a scan job"""
path = os.path.join(arc_path, 'arc', 'testing', 'rotor_scans', 'H2O2.out')
scan_conformers = parser.parse_scan_conformers(path)
expected_labels = ['R1', 'R2', 'R3', 'A1', 'A2', 'D1']
expected_types = ['R', 'R', 'R', 'A', 'A', 'D']
expected_atoms = [[1, 2], [1, 3], [2, 4], [2, 1, 3], [1, 2, 4], [3, 1, 2, 4]]
expected_redundant = [False] * 6
expected_scan = [False] * 5 + [True]
expected_conf_0 = [1.4535, 0.9674, 0.9674, 100.563, 100.563, 118.8736]
expected_conf_18 = [1.4512, 0.9688, 0.9688, 103.2599, 103.2599, -61.1264]
expected_conf_36 = [1.4536, 0.9673, 0.9673, 100.5586, 100.5586, 118.8736]
self.assertEqual(expected_labels, scan_conformers.index.to_list())
self.assertEqual(expected_types, scan_conformers.type.to_list())
self.assertEqual(expected_atoms, scan_conformers.atoms.to_list())
self.assertEqual(expected_redundant, scan_conformers.redundant.to_list())
self.assertEqual(expected_scan, scan_conformers.scan.to_list())
self.assertEqual((6, 41), scan_conformers.shape)
self.assertEqual(expected_conf_0, scan_conformers[0].to_list())
self.assertEqual(expected_conf_18, scan_conformers[18].to_list())
self.assertEqual(expected_conf_36, scan_conformers[36].to_list())
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| 60.306488
| 125
| 0.572208
|
82aad7f6c1ec15305d68fed31f2f616448cf4fab
| 2,609
|
py
|
Python
|
CNN.py
|
Giorgiobientinesi/CT-and-X-ray-COVID-image-classification
|
7570ab28fa2b4e035e23d53abb5c9764bd9bb68f
|
[
"MIT"
] | null | null | null |
CNN.py
|
Giorgiobientinesi/CT-and-X-ray-COVID-image-classification
|
7570ab28fa2b4e035e23d53abb5c9764bd9bb68f
|
[
"MIT"
] | null | null | null |
CNN.py
|
Giorgiobientinesi/CT-and-X-ray-COVID-image-classification
|
7570ab28fa2b4e035e23d53abb5c9764bd9bb68f
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
import pickle
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten,Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
import time
Data_directory = "COVID-19 Dataset/CT"
Categories = ["COVID", "Non-COVID"]
Size_of_images = 150
training_data = []
def training_creation():
for category in Categories:
path = os.path.join(Data_directory, category)
class_num = Categories.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (Size_of_images, Size_of_images), cv2.INTER_AREA)
training_data.append([new_array,class_num])
training_creation()
print(len(training_data))
random.shuffle(training_data)
X = []
y = []
for features, label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, Size_of_images, Size_of_images, 1)
y = np.array(y)
X = X/255.0
dense_layers = [0,1]
layer_sizes = [32,64,128] #64, 128
conv_layers = [1,2,3] #3
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer - 1):
model.add(Conv2D(layer_size, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation('sigmoid'))
tensorboard = TensorBoard(log_dir="logsCT/{}".format(NAME))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
)
model.fit(X, y,
batch_size=64,
epochs=15,
validation_split=0.3,
callbacks=[tensorboard])
| 26.09
| 111
| 0.601763
|
97e413bb24da78fb5e5b36ab2b0c0fb148eaaf26
| 3,148
|
py
|
Python
|
YR2_batch_processing_multiple_images_in_python.py
|
zrpllvv/Image-processing
|
a4d4a0875175a5be56494d765168e6cb9df06da8
|
[
"MIT"
] | null | null | null |
YR2_batch_processing_multiple_images_in_python.py
|
zrpllvv/Image-processing
|
a4d4a0875175a5be56494d765168e6cb9df06da8
|
[
"MIT"
] | null | null | null |
YR2_batch_processing_multiple_images_in_python.py
|
zrpllvv/Image-processing
|
a4d4a0875175a5be56494d765168e6cb9df06da8
|
[
"MIT"
] | null | null | null |
import cv2
import glob
from skimage.filters import gaussian
from skimage import img_as_ubyte
#select the path
path = "test_images/imgs/*.*"
img_number = 1 #Start an iterator for image number.
#This number can be later added to output image file names.
for file in glob.glob(path):
print(file) #just stop here to see all file names printed
img= cv2.imread(file, 0) #now, we can read each file since we have the full path
#process each image - change color from BGR to RGB.
smoothed_image = img_as_ubyte(gaussian(img, sigma=5, mode='constant', cval=0.0))
cv2.imwrite("test_images/smoothed/smoothed_image"+str(img_number)+".jpg", smoothed_image)
img_number +=1
#Using os library to walk through folders
import os
import cv2
from skimage.filters import gaussian
from skimage import img_as_ubyte
img_number = 1
for root, dirs, files in os.walk("test_images/imgs"):
#for path,subdir,files in os.walk("."):
# for name in dirs:
# print (os.path.join(root, name)) # will print path of directories
for name in files:
print (os.path.join(root, name)) # will print path of files
path = os.path.join(root, name)
img= cv2.imread(path, 0) #now, we can read each file since we have the full path
#process each image - change color from BGR to RGB.
smoothed_image = img_as_ubyte(gaussian(img, sigma=5, mode='constant', cval=0.0))
cv2.imwrite("test_images/smoothed/smoothed_image"+str(img_number)+".jpg", smoothed_image)
img_number +=1
import numpy as np
import cv2
import os
import glob
from skimage.filters import gaussian
from skimage import img_as_ubyte
images_list = []
SIZE = 512
path = "test_images/imgs/*.*"
#First create a stack array of all images
for file in glob.glob(path):
print(file) #just stop here to see all file names printed
img= cv2.imread(file, 0) #now, we can read each file since we have the full path
img = cv2.resize(img, (SIZE, SIZE))
images_list.append(img)
images_list = np.array(images_list)
#Process each slice in the stack
img_number = 1
for image in range(images_list.shape[0]):
input_img = images_list[image,:,:] #Grey images. For color add another dim.
smoothed_image = img_as_ubyte(gaussian(input_img, sigma=5, mode='constant', cval=0.0))
cv2.imwrite("test_images/smoothed/smoothed_image"+str(img_number)+".jpg", smoothed_image)
img_number +=1
import numpy as np
import cv2
import os
import glob
from skimage.filters import gaussian
from skimage import img_as_ubyte
# file = 'test_images/scratch_time_series.tif'
# img= cv2.imread(file, 0)
import tifffile
img = tifffile.imread(file)
img_number = 1
for image in range(img.shape[0]):
input_img = img[image,:,:] #Grey images. For color add another dim.
smoothed_image = img_as_ubyte(gaussian(input_img, sigma=5, mode='constant', cval=0.0))
cv2.imwrite("test_images/smoothed/smoothed_image"+str(img_number)+".jpg", smoothed_image)
img_number +=1
| 33.136842
| 97
| 0.683609
|
48158c9237fc6d00d0fab3d724c21086f6cf252a
| 372
|
py
|
Python
|
spider/opener.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
spider/opener.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
spider/opener.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'abbot'
import urllib2
# 构建HTTPHandler处理器对象,支持处理HTTP请求
http_handler = urllib2.HTTPHandler(debuglevel=1)
# 创建支持处理HTTP请求的opener对象
opener = urllib2.build_opener(http_handler)
# 构建Request请求
request = urllib2.Request("http://www.baidu.com")
# 调用自定义的opener对象的open()方法,发送request请求
response = opener.open(request)
print response.read()
| 18.6
| 49
| 0.768817
|
0af1a7735a304bf26207a9ff3b7b6c1ee84653a2
| 1,340
|
py
|
Python
|
src/vigorish/util/exceptions.py
|
a-luna/vigorish
|
6cede5ced76c7d2c9ad0aacdbd2b18c2f1ee4ee6
|
[
"MIT"
] | 2
|
2021-07-15T13:53:33.000Z
|
2021-07-25T17:03:29.000Z
|
src/vigorish/util/exceptions.py
|
a-luna/vigorish
|
6cede5ced76c7d2c9ad0aacdbd2b18c2f1ee4ee6
|
[
"MIT"
] | 650
|
2019-05-18T07:00:12.000Z
|
2022-01-21T19:38:55.000Z
|
src/vigorish/util/exceptions.py
|
a-luna/vigorish
|
6cede5ced76c7d2c9ad0aacdbd2b18c2f1ee4ee6
|
[
"MIT"
] | 2
|
2020-03-28T21:01:31.000Z
|
2022-01-06T05:16:11.000Z
|
"""Custom exceptions for issues specific to vigorish actions/processes."""
class S3BucketException(Exception):
"""Exception raised when an operation requiring S3 bucket access cannot be performed."""
def __init__(self, message=None):
if not message:
message = "Unable to access S3 bucket, please verify AWS credentials are propertly configured."
super().__init__(message)
class ScrapedDataException(Exception):
"""Exception raised when data identified by file_type, data_set and url_id cannot be found."""
def __init__(self, file_type, data_set, url_id):
message = f"Failed to locate scraped data: URL ID: {url_id} (File Type: {file_type}, Data Set: {data_set})"
super().__init__(message)
class InvalidSeasonException(Exception):
"""Exception raised when the database does not contain data for the requested season."""
def __init__(self, year):
message = f"The database does not contain any data for the MLB {year} Season"
super().__init__(message)
class UnknownPlayerException(Exception):
"""Exception raised when the database does not contain data for the requested player."""
def __init__(self, mlb_id):
message = f"The database does not contain any data for a player with MLB ID: {mlb_id}"
super().__init__(message)
| 38.285714
| 115
| 0.709701
|
138ca0898a98ec6bd3e1adafe2a5cef9bd159ff3
| 7,453
|
py
|
Python
|
train/train_english_v1_multi_gpu.py
|
xiaoyubing/chinese_ocr
|
0e9f3dd4672dd3e3aaff607c691b94a4cfa4006c
|
[
"Apache-2.0"
] | null | null | null |
train/train_english_v1_multi_gpu.py
|
xiaoyubing/chinese_ocr
|
0e9f3dd4672dd3e3aaff607c691b94a4cfa4006c
|
[
"Apache-2.0"
] | null | null | null |
train/train_english_v1_multi_gpu.py
|
xiaoyubing/chinese_ocr
|
0e9f3dd4672dd3e3aaff607c691b94a4cfa4006c
|
[
"Apache-2.0"
] | 1
|
2019-06-26T10:06:43.000Z
|
2019-06-26T10:06:43.000Z
|
#-*- coding:utf-8 -*-
import os
import json
import threading
import numpy as np
from PIL import Image
import tensorflow as tf
from keras import losses
from keras import backend as K
from keras.utils import plot_model
from keras.preprocessing import image
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Input, Dense, Flatten
from keras.layers.core import Reshape, Masking, Lambda, Permute
from keras.layers.recurrent import GRU, LSTM
from keras.layers.wrappers import Bidirectional, TimeDistributed
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adam
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, TensorBoard
from imp import reload
import densenet
from keras.utils import multi_gpu_model
GPU_NUM = 2
img_h = 32
img_w = 280*2
batch_size = 64
maxlabellength = 10*2
def get_session(gpu_fraction=1.0):
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
if GPU_NUM > 1:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads, allow_soft_placement=True))
else:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
if GPU_NUM > 1:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
def readfile(filename):
res = []
with open(filename, 'r') as f:
lines = f.readlines()
for i in lines:
res.append(i.strip())
dic = {}
for i in res:
p = i.split(' ')
dic[p[0]] = p[1:]
return dic
class random_uniform_num():
"""
均匀随机,确保每轮每个只出现一次
"""
def __init__(self, total):
self.total = total
self.range = [i for i in range(total)]
np.random.shuffle(self.range)
self.index = 0
def get(self, batchsize):
r_n=[]
if(self.index + batchsize > self.total):
r_n_1 = self.range[self.index:self.total]
np.random.shuffle(self.range)
self.index = (self.index + batchsize) - self.total
r_n_2 = self.range[0:self.index]
r_n.extend(r_n_1)
r_n.extend(r_n_2)
else:
r_n = self.range[self.index : self.index + batchsize]
self.index = self.index + batchsize
return r_n
def gen(data_file, image_path, batchsize=128, maxlabellength=10, imagesize=(32, 280)):
image_label = readfile(data_file)
_imagefile = [i for i, j in image_label.items()]
x = np.zeros((batchsize, imagesize[0], imagesize[1], 1), dtype=np.float)
labels = np.ones([batchsize, maxlabellength]) * 10000
input_length = np.zeros([batchsize, 1])
label_length = np.zeros([batchsize, 1])
r_n = random_uniform_num(len(_imagefile))
_imagefile = np.array(_imagefile)
while 1:
shufimagefile = _imagefile[r_n.get(batchsize)]
for i, j in enumerate(shufimagefile):
img1 = Image.open(os.path.join(image_path, j)).convert('L')
img = np.array(img1, 'f') / 255.0 - 0.5
x[i] = np.expand_dims(img, axis=2)
# print('imag:shape', img.shape)
str = image_label[j]
label_length[i] = len(str)
if(len(str) <= 0):
print("len < 0", j)
input_length[i] = imagesize[1] // 8
labels[i, :len(str)] = [int(k) - 1 for k in str]
inputs = {'the_input': x,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length,
}
outputs = {'ctc': np.zeros([batchsize])}
yield (inputs, outputs)
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def get_model(img_h, nclass):
input = Input(shape=(img_h, None, 1), name='the_input')
y_pred = densenet.dense_cnn(input, nclass)
# y_pred = densenet.dense_blstm(input, nclass)
basemodel = Model(inputs=input, outputs=y_pred)
basemodel.summary()
labels = Input(name='the_labels', shape=[None], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
model = Model(inputs=[input, labels, input_length, label_length], outputs=loss_out)
if GPU_NUM > 1:
model = multi_gpu_model(model, gpus=GPU_NUM)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='adam', metrics=['accuracy'])
return basemodel, model
if __name__ == '__main__':
# char_set = open('char_std_5990.txt', 'r', encoding='utf-8').readlines()
char_set = open('char_std_96_english_v1.txt', 'r', encoding='utf-8').readlines()
char_set = ''.join([ch.strip('\n') if ch.strip('\n')!='' else ' ' for ch in char_set][1:] + ['卍'])
print('char_set:',char_set)
nclass = len(char_set)
print('~~~~~~~~~~~~nclass:',nclass)
K.set_session(get_session())
reload(densenet)
basemodel, model = get_model(img_h, nclass)
modelPath = './models/weights_densenet_v2-08-2.77--.h5'
if os.path.exists(modelPath):
print("Loading model weights...")
model.load_weights(modelPath, by_name=True, skip_mismatch=True)
print('done!')
basemodelPath = './models/pretrain_model/weights_densenet.h5'
if os.path.exists(basemodelPath) and not os.path.exists(modelPath):
print("Loading basemodelPath weights...")
basemodel.load_weights(basemodelPath, by_name=True, skip_mismatch=True)
print('done!')
base_train_path = '/dockershare/hmbdata2/'
base_test_path = '/dockershare/hmbdata3/'
train_loader = gen(base_train_path + 'total_label_0-100.txt', base_train_path, batchsize=batch_size, maxlabellength=maxlabellength, imagesize=(img_h, img_w))
test_loader = gen(base_test_path + 'total_label_0-100.txt', base_test_path, batchsize=batch_size, maxlabellength=maxlabellength, imagesize=(img_h, img_w))
checkpoint = ModelCheckpoint(filepath='./models/weights_densenet_v3-{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=False, save_weights_only=True)
lr_schedule = lambda epoch: 0.0005 * 0.4**epoch
learning_rate = np.array([lr_schedule(i) for i in range(10)])
changelr = LearningRateScheduler(lambda epoch: float(learning_rate[epoch]))
earlystop = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
tensorboard = TensorBoard(log_dir='./models/logs', write_graph=True)
print('-----------Start training-----------')
model.fit_generator(train_loader,
steps_per_epoch = 181999// batch_size,
epochs = 150,
initial_epoch = 8,
validation_data = test_loader,
validation_steps = 181999 // batch_size,
# callbacks = [checkpoint, changelr, tensorboard])
callbacks = [checkpoint, earlystop, changelr, tensorboard])
| 37.265
| 169
| 0.666443
|
4e4275e46e00c5461b9f1aebe0673e90ff44c7d5
| 1,043
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
eycho97/recipe-app-api
|
5ccbc4458e9be43c4c668e053f569679a5755982
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
eycho97/recipe-app-api
|
5ccbc4458e9be43c4c668e053f569679a5755982
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
eycho97/recipe-app-api
|
5ccbc4458e9be43c4c668e053f569679a5755982
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-02-20 19:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.965517
| 118
| 0.604027
|
363c3c30db14046000fdf690f300f06f2c065eec
| 777
|
py
|
Python
|
python/mul_tbls_printer/tests/consts.py
|
AravinthSelvaraj/problems-and-solutions
|
5c31ad3797cc49d2f7d779a10f04d81fc3b559f2
|
[
"MIT"
] | null | null | null |
python/mul_tbls_printer/tests/consts.py
|
AravinthSelvaraj/problems-and-solutions
|
5c31ad3797cc49d2f7d779a10f04d81fc3b559f2
|
[
"MIT"
] | null | null | null |
python/mul_tbls_printer/tests/consts.py
|
AravinthSelvaraj/problems-and-solutions
|
5c31ad3797cc49d2f7d779a10f04d81fc3b559f2
|
[
"MIT"
] | null | null | null |
TC1_EXP_OUTPUT = """1 x 5 = 5
2 x 5 = 10
3 x 5 = 15
4 x 5 = 20
5 x 5 = 25
6 x 5 = 30
7 x 5 = 35
8 x 5 = 40
9 x 5 = 45
10 x 5 = 50"""
TC2_EXP_OUTPUT = """1 x 7 = 7
2 x 7 = 14
3 x 7 = 21
4 x 7 = 28
5 x 7 = 35
6 x 7 = 42
7 x 7 = 49
8 x 7 = 56
9 x 7 = 63
10 x 7 = 70
11 x 7 = 77
12 x 7 = 84
13 x 7 = 91
14 x 7 = 98
15 x 7 = 105"""
TC3_EXP_OUTPUT = """1 x 13 = 13
2 x 13 = 26
3 x 13 = 39
4 x 13 = 52
5 x 13 = 65
6 x 13 = 78
7 x 13 = 91
8 x 13 = 104
9 x 13 = 117
10 x 13 = 130
11 x 13 = 143
12 x 13 = 156
13 x 13 = 169
14 x 13 = 182
15 x 13 = 195
16 x 13 = 208
17 x 13 = 221
18 x 13 = 234
19 x 13 = 247
20 x 13 = 260"""
| 16.531915
| 35
| 0.407979
|
e9eac0db3fb83d7d634196a98c59e94535027dc9
| 3,064
|
py
|
Python
|
cohesity_management_sdk/models/outlook_mailbox.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/outlook_mailbox.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/outlook_mailbox.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.restore_object
import cohesity_management_sdk.models.outlook_folder
class OutlookMailbox(object):
"""Implementation of the 'Outlook Mailbox.' model.
Specifies the Outlook mailbox with restore details to support full or
partial recovery.
Attributes:
mailbox_object (RestoreObject): Specifies an object to recover or
clone or an object to restore files and folders from. A VM object
can be recovered or cloned. A View object can be cloned. To
specify a particular snapshot, you must specify a jobRunId and a
startTimeUsecs. If jobRunId and startTimeUsecs are not specified,
the last Job Run of the specified Job is used.
outlook_folder_list (list of OutlookFolder): Specifies the list of
folders to be restored incase user wishes not to restore entire
mailbox.
restore_entire_mailbox (bool): Specifies whether the enitre mailbox is
to be restored.
"""
# Create a mapping from Model property names to API property names
_names = {
"mailbox_object":'mailboxObject',
"outlook_folder_list":'outlookFolderList',
"restore_entire_mailbox":'restoreEntireMailbox'
}
def __init__(self,
mailbox_object=None,
outlook_folder_list=None,
restore_entire_mailbox=None):
"""Constructor for the OutlookMailbox class"""
# Initialize members of the class
self.mailbox_object = mailbox_object
self.outlook_folder_list = outlook_folder_list
self.restore_entire_mailbox = restore_entire_mailbox
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mailbox_object = cohesity_management_sdk.models.restore_object.RestoreObject.from_dictionary(dictionary.get('mailboxObject')) if dictionary.get('mailboxObject') else None
outlook_folder_list = None
if dictionary.get('outlookFolderList') != None:
outlook_folder_list = list()
for structure in dictionary.get('outlookFolderList'):
outlook_folder_list.append(cohesity_management_sdk.models.outlook_folder.OutlookFolder.from_dictionary(structure))
restore_entire_mailbox = dictionary.get('restoreEntireMailbox')
# Return an object of this model
return cls(mailbox_object,
outlook_folder_list,
restore_entire_mailbox)
| 38.3
| 178
| 0.674935
|
2be07185460ccb74e015e4c2e257169233d3e18d
| 811
|
py
|
Python
|
juju_suspend/providers/openstack.py
|
niedbalski/juju-suspend
|
a3fa076e1cac48e0fd6a73dc3aef473c78150251
|
[
"MIT"
] | 3
|
2015-02-13T22:13:38.000Z
|
2015-02-17T02:42:28.000Z
|
juju_suspend/providers/openstack.py
|
niedbalski/juju-suspend
|
a3fa076e1cac48e0fd6a73dc3aef473c78150251
|
[
"MIT"
] | null | null | null |
juju_suspend/providers/openstack.py
|
niedbalski/juju-suspend
|
a3fa076e1cac48e0fd6a73dc3aef473c78150251
|
[
"MIT"
] | null | null | null |
from juju_suspend.providers.base import Provider
class OpenstackProvider(Provider):
suspend_cmd = ". {1}; nova stop {0}"
resume_cmd = ". {1}; nova start {0}"
def __init__(self, environment):
Provider.__init__(self, environment)
if not self.environment.options.novarc:
raise Exception("Please specify your novarc file")
def filter_machines(self):
for i, v in self.environment.machines:
instance_id = v['InstanceId']
if instance_id not in ('localhost',) and v['DNSName'] + ':17070'\
not in self.environment.state_servers:
yield instance_id
def suspend(self):
self.do_suspend(self.environment.options.novarc)
def resume(self):
self.do_resume(self.environment.options.novarc)
| 30.037037
| 77
| 0.64365
|
374191fba27ee79db3614e20c69ab12a9b0a7134
| 2,749
|
py
|
Python
|
src/groupingDishes/groupingDishes.py
|
kasimte/coding-challenges
|
975506a7257ad0d2d234a7856006472b79ebfdc3
|
[
"MIT"
] | null | null | null |
src/groupingDishes/groupingDishes.py
|
kasimte/coding-challenges
|
975506a7257ad0d2d234a7856006472b79ebfdc3
|
[
"MIT"
] | null | null | null |
src/groupingDishes/groupingDishes.py
|
kasimte/coding-challenges
|
975506a7257ad0d2d234a7856006472b79ebfdc3
|
[
"MIT"
] | null | null | null |
'''You are given a list dishes, where each element consists of a list of
strings beginning with the name of the dish, followed by all the
ingredients used in preparing it. You want to group the dishes by
ingredients, so that for each ingredient you'll be able to find all
the dishes that contain it (if there are at least 2 such dishes).
Return an array where each element is a list beginning with the
ingredient name, followed by the names of all the dishes that contain
this ingredient. The dishes inside each list should be sorted
lexicographically, and the result array should be sorted
lexicographically by the names of the ingredients.
Example
For
dishes = [["Salad", "Tomato", "Cucumber", "Salad", "Sauce"],
["Pizza", "Tomato", "Sausage", "Sauce", "Dough"],
["Quesadilla", "Chicken", "Cheese", "Sauce"],
["Sandwich", "Salad", "Bread", "Tomato", "Cheese"]]
the output should be
groupingDishes(dishes) = [["Cheese", "Quesadilla", "Sandwich"],
["Salad", "Salad", "Sandwich"],
["Sauce", "Pizza", "Quesadilla", "Salad"],
["Tomato", "Pizza", "Salad", "Sandwich"]]
For
dishes = [["Pasta", "Tomato Sauce", "Onions", "Garlic"],
["Chicken Curry", "Chicken", "Curry Sauce"],
["Fried Rice", "Rice", "Onions", "Nuts"],
["Salad", "Spinach", "Nuts"],
["Sandwich", "Cheese", "Bread"],
["Quesadilla", "Chicken", "Cheese"]]
the output should be
groupingDishes(dishes) = [["Cheese", "Quesadilla", "Sandwich"],
["Chicken", "Chicken Curry", "Quesadilla"],
["Nuts", "Fried Rice", "Salad"],
["Onions", "Fried Rice", "Pasta"]]
'''
def groupingDishes(dishes):
'''
Approach:
- establish hash
- iterate over each dish
- for each dish:
- grab the name of the dish
- for each ingredient, store in a hash: ingredient -> [array of dishes]
- sort they keys of the hash
- iterate over the sorted keys and add to an output results array
- [ingredient, sorted list of dishes]
- return
'''
hash = {}
for dish in dishes:
dish_name = dish[0] # assumes at least dish name and ingredient
for i in dish[1:]:
if i in hash:
hash[i].append(dish_name)
else:
hash[i] = [dish_name]
ingredients = list(hash)
ingredients.sort()
result = []
for i in ingredients:
dishes = hash[i]
if len(dishes) > 1:
dishes.sort()
result.append([i] + dishes)
return result
'''
Log: Solved in 19 minutes on first try.
'''
| 35.24359
| 77
| 0.574391
|
1e0c4a3388b7da0b0a98091b82b2705f9dac294b
| 14,312
|
py
|
Python
|
windmill/server/convergence.py
|
mikeal/windmill
|
0f2b640d107bfaf6615184471f8e14ff2ecc8319
|
[
"Apache-2.0"
] | 2
|
2015-06-23T17:34:58.000Z
|
2015-11-06T00:04:14.000Z
|
windmill/server/convergence.py
|
mikeal/windmill
|
0f2b640d107bfaf6615184471f8e14ff2ecc8319
|
[
"Apache-2.0"
] | null | null | null |
windmill/server/convergence.py
|
mikeal/windmill
|
0f2b640d107bfaf6615184471f8e14ff2ecc8319
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides the communication and management between the various
server interfaces and the browser's js interface"""
import copy, os, sys
from windmill.dep import json
import logging
from windmill.dep import uuid
import windmill
from time import sleep
test_results_logger = logging.getLogger('test_results')
class ControllerQueue(object):
def __init__(self, command_resolution_suite, test_resolution_suite):
self.resolution_suites = {'test': test_resolution_suite,
'command': command_resolution_suite}
self.queue = []
self.current_suite = None
def add_command(self, command, suite_name=None):
"""Add Command to the controller queue"""
if suite_name is None and not command.get('suite_name'):
suite_name = self.current_suite
command['suite_name'] = suite_name
if command['params'].get('priority', None):
priority = command['params'].pop('priority')
else:
priority = None
command['type'] = 'command'
if type(priority) is int:
self.queue.insert(priority, command)
else:
self.queue.append(command)
def add_test(self, test, suite_name=None):
if suite_name is None and not test.get('suite_name'):
suite_name = self.current_suite
test['suite_name'] = suite_name
test['type'] = 'test'
self.queue.append(test)
def start_suite(self, suite_name):
self.current_suite = suite_name
def stop_suite(self):
self.current_suite = None
# def command(self, command):
# self.command_queue.insert(0, command)
def next_action(self):
if len(self.queue) is not 0:
controller_action = self.queue.pop(0)
self.resolution_suites[controller_action.pop('type')].add(controller_action)
return controller_action
else:
return None
callback = {'version':'0.1'}
class TestResolutionSuite(object):
"""Collection of tests run and results"""
result_processor = None
def __init__(self):
self.unresolved = {}
self.resolved = {}
self.current_suite = None
def resolve(self, result, uuid, starttime, endtime, debug=None, output=None):
"""Resolve test by uuid"""
test = self.unresolved.pop(uuid)
if debug:
test['debug'] = debug
test['result'] = result
test['starttime'] = starttime
test['endtime'] = endtime
test['output'] = output
self.resolved[uuid] = test
if result is False:
test_results_logger.error('Test Failure in test %s' % repr(test))
elif result is True:
test_results_logger.debug('Test Success in test %s' % repr(test))
if self.result_processor is not None:
if result is False:
self.result_processor.failure(test, debug=debug)
elif result is True:
self.result_processor.success(test, debug=debug)
def report_without_resolve(self, result, uuid, starttime, endtime, suite_name, debug=None, output=None):
test = {'result':result, 'uuid':uuid, 'starttime':starttime, 'endtime':endtime,
'suite_name':suite_name, 'debug':debug, 'output':output}
# if result is False:
# test_results_logger.error('Test Failure in test %s' % repr(test))
# elif result is True:
# test_results_logger.debug('Test Success in test %s' % repr(test))
if self.result_processor is not None:
if result is False:
self.result_processor.failure(test, debug=debug)
windmill.test_has_failed = True
elif result is True:
self.result_processor.success(test, debug=debug)
def start_suite(self, suite_name):
self.current_suite = suite_name
def stop_suite(self):
self.current_suite = None
def add(self, test, suite_name=None):
self.unresolved[test['params']['uuid']] = test
class CommandResolutionSuite(object):
def __init__(self):
self.unresolved = {}
self.resolved ={}
def resolve(self, status, uuid, result):
"""Resolve command by uuid"""
command = self.unresolved.pop(uuid, None)
command['status'] = status
command['result'] = result
self.resolved[uuid] = command
if status is False:
test_results_logger.error('Command Failure in command %s' % command)
elif status is True:
test_results_logger.debug('Command Succes in command %s' % command)
def add(self, command, suite_name=None):
self.unresolved[command['params']['uuid']] = command
class RecursiveRPC(object):
def __init__(self, execution_method):
self.execution_method = execution_method
def __getattr__(self, key):
"""Call a method on the controller as if it was a local method"""
class ExecuteJSONRecursiveAttribute(object):
def __init__(self, name, execution_method):
self.name = name
self.execution_method = execution_method
def __call__(self, **kwargs):
rpc = {'method':self.name, 'params':kwargs}
return self.execution_method(rpc)
def __getattr__(self, key):
return ExecuteRecursiveAttribute(self.name+'.'+key, self.execution_method)
return ExecuteRecursiveAttribute(key, self.execution_method)
class RPCMethods(object):
def __init__(self, queue, test_resolution_suite, command_resolution_suite, proxy):
self._queue = queue
self._logger = logging.getLogger('jsonrpc_methods_instance')
self._test_resolution_suite = test_resolution_suite
self._command_resolution_suite = command_resolution_suite
self.proxy = proxy
def start_suite(self, suite_name):
self._test_resolution_suite.start_suite(suite_name)
self._queue.start_suite(suite_name)
return 200
def stop_suite(self):
self._test_resolution_suite.stop_suite()
return 200
def add_object(self, queue_method, action_object, suite_name=None):
"""Procedue neutral addition method"""
callback_object = copy.copy(callback)
callback_object.update(action_object)
if not callback_object['params'].get('uuid'):
callback_object['params']['uuid'] = str(uuid.uuid1())
self._logger.debug('Adding object %s' % str(callback_object))
queue_method(callback_object, suite_name=suite_name)
return callback_object['params']['uuid']
def add_json_test(self, json, suite_name=None):
"""Add test from json object with 'method' and 'params' defined"""
action_object = json.loads(json)
self.add_object(self._queue.add_test, action_object, suite_name)
return 200
def add_test(self, test_object, suite_name=None):
self.add_object(self._queue.add_test, test_object, suite_name)
return 200
def add_json_command(self, json):
"""Add command from json object with 'method' and 'params' defined"""
action_object = json.loads(json)
self.add_object(self._queue.add_command, action_object)
return 200
def add_command(self, command_object):
"""Add command from object"""
self.add_object(self._queue.add_command, command_object)
return 200
def execute_object(self, queue_method, resolution_suite, action_object):
"""Procedure neutral blocking exeution of a given object."""
uuid = self.add_object(queue_method, action_object)
while not resolution_suite.resolved.get(uuid):
sleep(.25)
result = resolution_suite.resolved[uuid]
result.pop('totaltime', None)
return result
def execute_json_command(self, json):
"""Add command from json object with 'method' and 'params' defined, block until it returns, return the result"""
action_object = json.loads(json)
return self.execute_object(self._queue.add_command, self._command_resolution_suite, action_object)
def execute_json_test(self, json):
"""Add test from json object with 'method' and 'params' defined, block until it returns, return the result"""
action_object = json.loads(json)
return self.execute_object(self._queue.add_test, self._test_resolution_suite, action_object)
def execute_command(self, action_object):
"""Add command from dict object with 'method' and 'params' defined, block until it returns, return the result"""
return self.execute_object(self._queue.add_command, self._command_resolution_suite, action_object)
def execute_test(self, action_object):
"""Add test from dict object with 'method' and 'params' defined, block until it returns, return the result"""
return self.execute_object(self._queue.add_test, self._test_resolution_suite, action_object)
def run_json_tests(self, tests):
"""Run list of json tests"""
return self.run_tests([json.loads(test) for test in tests])
def run_tests(self, tests):
"""Run list of tests"""
for test in tests:
if test['method'].find('command') is -1:
self.add_test(test)
else:
self.add_command(test)
return 200
def clear_queue(self):
"""Clear the server queue"""
self._queue.queue = []
return 200
class JSONRPCMethods(RPCMethods):
def next_action(self):
"""The next action for the browser to execute"""
windmill.ide_is_awake = True
from windmill.bin import admin_lib
if len(admin_lib.on_ide_awake) is not 0:
for func in copy.copy(admin_lib.on_ide_awake):
func()
admin_lib.on_ide_awake.remove(func)
action = self._queue.next_action()
if action is not None:
self._logger.debug('queue has next_action %s' % str(action))
return action
else:
self._logger.debug('queue has no next_action, returning "pause" method')
action = copy.copy(callback)
action.update({'method':'defer'})
return action
def report(self, uuid, result, starttime, endtime, debug=None, output=None):
"""Report fass/fail for a test"""
self._test_resolution_suite.resolve(result, uuid, starttime, endtime, debug, output)
return 200
def report_without_resolve(self, uuid, result, starttime, endtime, suite_name, debug=None, output=None):
self._test_resolution_suite.report_without_resolve(result, uuid, starttime, endtime, suite_name, debug, output)
if result is True:
sys.stdout.write('.')
else:
sys.stdout.write('F')
sys.stdout.flush()
return 200
count = 0
def command_result(self, status, uuid, result):
self._command_resolution_suite.resolve(status, uuid, result)
def status_change(self, status):
pass
def set_test_url(self, url):
self.proxy.fm.set_test_url(url)
return 200
def restart_test_run(self, tests):
self.clear_queue()
self._test_resolution_suite.unresolved = {}
self._command_resolution_suite.unresolved ={}
for test in tests:
self.add_test(test, suite_name=test.get('suite_name'))
def create_save_file(self, transformer, suite_name, tests):
from windmill.authoring import transforms
if not windmill.settings['SAVES_PATH']:
transforms.create_saves_path()
for test in tests:
if test.get('suite_name'):
test.pop('suite_name')
if test['params'].get('uuid'):
test['params'].pop('uuid')
return transforms.registry[transformer](suite_name, tests)
def teardown(self, tests):
"""teardown_module function for functest based python tests"""
import windmill
if getattr(windmill, "js_framework_active", False) is True:
sys.stdout.write('\n'); sys.stdout.flush();
if tests["testFailureCount"] is not 0:
for k, v in tests.items():
if type(v) is dict and v.get(u'result') is False:
print "Failed: "+k
windmill.test_has_failed = True
total_string = "Total Tests Run: "+str(tests["testCount"])+" "
total_string += "Total Passed: "+str(tests["testCount"] - tests["testFailureCount"])+" "
total_string += "Total Failed: "+str(tests["testFailureCount"])
print total_string
if windmill.settings['EXIT_ON_DONE']:
from windmill.bin import admin_lib
admin_lib.teardown(admin_lib.shell_objects_dict)
windmill.runserver_running = False
sleep(.25)
class XMLRPCMethods(RPCMethods):
def stop_runserver(self):
import windmill
windmill.runserver_running = False
| 38.785908
| 120
| 0.621646
|
f3c615d0393bf35a7e33f5d4f1d07170e02ca9dc
| 450
|
py
|
Python
|
isolate/__init__.py
|
meteran/isolate
|
7d46279a364a701ba70a7d3fbf3c391e7dc05f1d
|
[
"BSD-3-Clause"
] | 2
|
2017-05-10T23:50:19.000Z
|
2021-12-28T22:23:43.000Z
|
isolate/__init__.py
|
meteran/isolate
|
7d46279a364a701ba70a7d3fbf3c391e7dc05f1d
|
[
"BSD-3-Clause"
] | 1
|
2016-10-19T20:08:26.000Z
|
2016-10-19T20:08:26.000Z
|
isolate/__init__.py
|
meteran/isolate
|
7d46279a364a701ba70a7d3fbf3c391e7dc05f1d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from cgroupspy.nodes import Node
from isolate.namespaces import NewNamespaces, JoinNamespaces, in_namespaces as run_in_namespaces, NAMESPACES
from cgroups import Cgroup
from syscalls import *
from seccomp import SecureComputing, Arg
BYTES = Cgroup.BYTES
KILOBYTES = Cgroup.KILOBYTES
MEGABYTES = Cgroup.MEGABYTES
GIGABYTES = Cgroup.GIGABYTES
SUBSYSTEMS = Node.CONTROLLERS.keys()
NAMESPACES = NAMESPACES.keys()
| 26.470588
| 108
| 0.813333
|
e329c69d7dcb64ff9e9e4dab30f4c65f9938d461
| 91,992
|
py
|
Python
|
scalyr_agent/builtin_monitors/kubernetes_monitor.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/builtin_monitors/kubernetes_monitor.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/builtin_monitors/kubernetes_monitor.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# author: Imron Alston <imron@scalyr.com>
__author__ = 'imron@scalyr.com'
import datetime
import docker
import fnmatch
import traceback
import logging
import os
import re
import random
import socket
import stat
from string import Template
import struct
import sys
import time
import threading
from scalyr_agent import ScalyrMonitor, define_config_option, define_metric
import scalyr_agent.util as scalyr_util
import scalyr_agent.scalyr_logging as scalyr_logging
from scalyr_agent.json_lib import JsonObject
from scalyr_agent.json_lib import JsonConversionException, JsonMissingFieldException
from scalyr_agent.log_watcher import LogWatcher
from scalyr_agent.monitor_utils.server_processors import LineRequestParser
from scalyr_agent.monitor_utils.server_processors import RequestSizeExceeded
from scalyr_agent.monitor_utils.server_processors import RequestStream
from scalyr_agent.monitor_utils.k8s import KubernetesApi, KubeletApi, KubeletApiException, KubernetesCache, PodInfo, DockerMetricFetcher
from scalyr_agent.util import StoppableThread
from scalyr_agent.util import RunState
from requests.packages.urllib3.exceptions import ProtocolError
global_log = scalyr_logging.getLogger(__name__)
__monitor__ = __name__
define_config_option(__monitor__, 'module',
'Always ``scalyr_agent.builtin_monitors.kubernetes_monitor``',
convert_to=str, required_option=True)
define_config_option( __monitor__, 'container_name',
'Optional (defaults to None). Defines a regular expression that matches the name given to the '
'container running the scalyr-agent.\n'
'If this is None, the scalyr agent will look for a container running /usr/sbin/scalyr-agent-2 as the main process.\n',
convert_to=str, default=None)
define_config_option( __monitor__, 'container_check_interval',
'Optional (defaults to 5). How often (in seconds) to check if containers have been started or stopped.',
convert_to=int, default=5)
define_config_option( __monitor__, 'api_socket',
'Optional (defaults to /var/scalyr/docker.sock). Defines the unix socket used to communicate with '
'the docker API. WARNING, if you have `mode` set to `syslog`, you must also set the '
'`docker_api_socket` configuration option in the syslog monitor to this same value\n'
'Note: You need to map the host\'s /run/docker.sock to the same value as specified here, using the -v parameter, e.g.\n'
'\tdocker run -v /run/docker.sock:/var/scalyr/docker.sock ...',
convert_to=str, default='/var/scalyr/docker.sock')
define_config_option( __monitor__, 'docker_api_version',
'Optional (defaults to \'auto\'). The version of the Docker API to use. WARNING, if you have '
'`mode` set to `syslog`, you must also set the `docker_api_version` configuration option in the '
'syslog monitor to this same value\n',
convert_to=str, default='auto')
define_config_option( __monitor__, 'docker_log_prefix',
'Optional (defaults to docker). Prefix added to the start of all docker logs. ',
convert_to=str, default='docker')
define_config_option( __monitor__, 'docker_max_parallel_stats',
'Optional (defaults to 20). Maximum stats requests to issue in parallel when retrieving container '
'metrics using the Docker API.', convert_to=int, default=20)
define_config_option( __monitor__, 'max_previous_lines',
'Optional (defaults to 5000). The maximum number of lines to read backwards from the end of the stdout/stderr logs\n'
'when starting to log a containers stdout/stderr to find the last line that was sent to Scalyr.',
convert_to=int, default=5000)
define_config_option( __monitor__, 'readback_buffer_size',
'Optional (defaults to 5k). The maximum number of bytes to read backwards from the end of any log files on disk\n'
'when starting to log a containers stdout/stderr. This is used to find the most recent timestamp logged to file '
'was sent to Scalyr.',
convert_to=int, default=5*1024)
define_config_option( __monitor__, 'log_mode',
'Optional (defaults to "docker_api"). Determine which method is used to gather logs from the '
'local containers. If "docker_api", then this agent will use the docker API to contact the local '
'containers and pull logs from them. If "syslog", then this agent expects the other containers '
'to push logs to this one using the syslog Docker log plugin. Currently, "syslog" is the '
'preferred method due to bugs/issues found with the docker API. It is not the default to protect '
'legacy behavior.\n',
convert_to=str, default="docker_api")
define_config_option( __monitor__, 'metrics_only',
'Optional (defaults to False). If true, the docker monitor will only log docker metrics and not any other information '
'about running containers.\n',
convert_to=bool, default=False)
define_config_option( __monitor__, 'container_globs',
'Optional (defaults to None). If true, a list of glob patterns for container names. Only containers whose names '
'match one of the glob patterns will be monitored.',
default=None)
define_config_option( __monitor__, 'report_container_metrics',
'Optional (defaults to True). If true, metrics will be collected from the container and reported '
'to Scalyr. Note, metrics are only collected from those containers whose logs are being collected',
convert_to=bool, default=True)
define_config_option( __monitor__, 'report_k8s_metrics',
'Optional (defaults to True). If true and report_container_metrics is true, metrics will be '
'collected from the k8s and reported to Scalyr. ', convert_to=bool, default=False)
define_config_option( __monitor__, 'k8s_ignore_namespaces',
'Optional (defaults to "kube-system"). A comma-delimited list of the namespaces whose pods\'s '
'logs should not be collected and sent to Scalyr.', convert_to=str, default="kube-system")
define_config_option( __monitor__, 'k8s_ignore_pod_sandboxes',
'Optional (defaults to True). If True then all containers with the label '
'`io.kubernetes.docker.type` equal to `podsandbox` are excluded from the'
'logs being collected', convert_to=bool, default=True)
define_config_option( __monitor__, 'k8s_include_all_containers',
'Optional (defaults to True). If True, all containers in all pods will be monitored by the kubernetes monitor '
'unless they have an include: false or exclude: true annotation. '
'If false, only pods/containers with an include:true or exclude:false annotation '
'will be monitored. See documentation on annotations for further detail.', convert_to=bool, default=True)
define_config_option( __monitor__, 'k8s_use_v2_attributes',
'Optional (defaults to False). If True, will use v2 version of attribute names instead of '
'the names used with the original release of this monitor. This is a breaking change so could '
'break searches / alerts if you rely on the old names', convert_to=bool, default=False)
define_config_option( __monitor__, 'k8s_use_v1_and_v2_attributes',
'Optional (defaults to False). If True, send attributes using both v1 and v2 versions of their'
'names. This may be used to fix breakages when you relied on the v1 attribute names',
convert_to=bool, default=False)
define_config_option( __monitor__, 'k8s_api_url',
'Optional (defaults to "https://kubernetes.default"). The URL for the Kubernetes API server for '
'this cluster.', convert_to=str, default='https://kubernetes.default')
define_config_option( __monitor__, 'k8s_cache_expiry_secs',
'Optional (defaults to 30). The amount of time to wait between fully updating the k8s cache from the k8s api. '
'Increase this value if you want less network traffic from querying the k8s api. Decrease this value if you '
'want dynamic updates to annotation configuration values to be processed more quickly.',
convert_to=int, default=30)
define_config_option( __monitor__, 'k8s_cache_purge_secs',
'Optional (defaults to 300). The number of seconds to wait before purging unused items from the k8s cache',
convert_to=int, default=300)
define_config_option( __monitor__, 'k8s_cache_init_abort_delay',
'Optional (defaults to 20). The number of seconds to wait for initialization of the kubernetes cache before aborting '
'the kubernetes_monitor.',
convert_to=int, default=20)
define_config_option( __monitor__, 'k8s_parse_json',
'Optional (defaults to True). If True, the log files will be parsed as json before uploading to the server '
'to extract log and timestamp fields. If False, the raw json will be uploaded to Scalyr.',
convert_to=bool, default=True)
define_config_option( __monitor__, 'verify_k8s_api_queries',
'Optional (defaults to True). If true, then the ssl connection for all queries to the k8s API will be verified using '
'the ca.crt certificate found in the service account directory. If false, no verification will be performed. '
'This is useful for older k8s clusters where certificate verification can fail.',
convert_to=bool, default=True)
define_config_option( __monitor__, 'gather_k8s_pod_info',
'Optional (defaults to False). If true, then every gather_sample interval, metrics will be collected '
'from the docker and k8s APIs showing all discovered containers and pods. This is mostly a debugging aid '
'and there are performance implications to always leaving this enabled', convert_to=bool, default=False)
define_config_option( __monitor__, 'include_daemonsets_as_deployments',
'Deprecated',
convert_to=bool, default=True)
# for now, always log timestamps to help prevent a race condition
#define_config_option( __monitor__, 'log_timestamps',
# 'Optional (defaults to False). If true, stdout/stderr logs will contain docker timestamps at the beginning of the line\n',
# convert_to=bool, default=False)
define_metric( __monitor__, "docker.net.rx_bytes", "Total received bytes on the network interface", cumulative=True, unit="bytes", category="Network" )
define_metric( __monitor__, "docker.net.rx_dropped", "Total receive packets dropped on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.rx_errors", "Total receive errors on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.rx_packets", "Total received packets on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.tx_bytes", "Total transmitted bytes on the network interface", cumulative=True, unit="bytes", category="Network" )
define_metric( __monitor__, "docker.net.tx_dropped", "Total transmitted packets dropped on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.tx_errors", "Total transmission errors on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.tx_packets", "Total packets transmitted on the network intervace", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.mem.stat.active_anon", "The number of bytes of active memory backed by anonymous pages, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.active_file", "The number of bytes of active memory backed by files, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.cache", "The number of bytes used for the cache, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.hierarchical_memory_limit", "The memory limit in bytes for the container.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.inactive_anon", "The number of bytes of inactive memory in anonymous pages, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.inactive_file", "The number of bytes of inactive memory in file pages, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.mapped_file", "The number of bytes of mapped files, excluding sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgfault", "The total number of page faults, excluding sub-cgroups.", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgmajfault", "The number of major page faults, excluding sub-cgroups", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgpgin", "The number of charging events, excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgpgout", "The number of uncharging events, excluding sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.rss", "The number of bytes of anonymous and swap cache memory (includes transparent hugepages), excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.rss_huge", "The number of bytes of anonymous transparent hugepages, excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.unevictable", "The number of bytes of memory that cannot be reclaimed (mlocked etc), excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.writeback", "The number of bytes being written back to disk, excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_active_anon", "The number of bytes of active memory backed by anonymous pages, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_active_file", "The number of bytes of active memory backed by files, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_cache", "The number of bytes used for the cache, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_inactive_anon", "The number of bytes of inactive memory in anonymous pages, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_inactive_file", "The number of bytes of inactive memory in file pages, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_mapped_file", "The number of bytes of mapped files, including sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgfault", "The total number of page faults, including sub-cgroups.", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgmajfault","The number of major page faults, including sub-cgroups", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgpgin", "The number of charging events, including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgpgout", "The number of uncharging events, including sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_rss", "The number of bytes of anonymous and swap cache memory (includes transparent hugepages), including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_rss_huge", "The number of bytes of anonymous transparent hugepages, including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_unevictable", "The number of bytes of memory that cannot be reclaimed (mlocked etc), including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_writeback", "The number of bytes being written back to disk, including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.max_usage", "The max amount of memory used by container in bytes.", unit="bytes", category="Memory" )
define_metric( __monitor__, "docker.mem.usage", "The current number of bytes used for memory including cache.", unit="bytes", category="Memory" )
define_metric( __monitor__, "docker.mem.fail_cnt", "The number of times the container hit its memory limit", category="Memory" )
define_metric( __monitor__, "docker.mem.limit", "The memory limit for the container in bytes.", unit="bytes", category="Memory")
define_metric( __monitor__, "docker.cpu.usage", "Total CPU consumed by container in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.system_cpu_usage", "Total CPU consumed by container in kernel mode in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.usage_in_usermode", "Total CPU consumed by tasks of the cgroup in user mode in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.total_usage", "Total CPU consumed by tasks of the cgroup in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.usage_in_kernelmode", "Total CPU consumed by tasks of the cgroup in kernel mode in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.throttling.periods", "The number of of periods with throttling active.", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.throttling.throttled_periods", "The number of periods where the container hit its throttling limit", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.throttling.throttled_time", "The aggregate amount of time the container was throttled in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "k8s.pod.network.rx_bytes", "The total received bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.pod.network.rx_errors", "The total received errors on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.pod.network.tx_bytes", "The total transmitted bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.pod.network.tx_errors", "The total transmission errors on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.rx_bytes", "The total received bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.rx_errors", "The total received errors on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.tx_bytes", "The total transmitted bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.tx_errors", "The total transmission errors on a pod", cumulative=True, category="Network" )
# A mapping of k8s controller kinds to the appropriate field name
# passed to the scalyr server for metrics that originate from pods
# controlled by that object. See #API-62
_CONTROLLER_KEYS = {
'CronJob' : 'k8s-cron-job',
'DaemonSet' : 'k8s-daemon-set',
'Deployment' : 'k8s-deployment',
'Job' : 'k8s-job',
'ReplicaSet': 'k8s-replica-set',
'ReplicationController': 'k8s-replication-controller',
'StatefulSet': 'k8s-stateful-set'
}
class K8sInitException( Exception ):
""" Wrapper exception to indicate when the monitor failed to start due to
a problem with initializing the k8s cache
"""
class WrappedStreamResponse( object ):
""" Wrapper for generator returned by docker.Client._stream_helper
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__( self, client, response, decode ):
self.client = client
self.response = response
self.decode = self.decode
def __iter__( self ):
for item in super( DockerClient, self.client )._stream_helper( self.response, self.decode ):
yield item
class WrappedRawResponse( object ):
""" Wrapper for generator returned by docker.Client._stream_raw_result
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__( self, client, response ):
self.client = client
self.response = response
def __iter__( self ):
for item in super( DockerClient, self.client )._stream_raw_result( self.response ):
yield item
class WrappedMultiplexedStreamResponse( object ):
""" Wrapper for generator returned by docker.Client._multiplexed_response_stream_helper
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__( self, client, response ):
self.client = client
self.response = response
def __iter__( self ):
for item in super( DockerClient, self.client )._multiplexed_response_stream_helper( self.response ):
yield item
class DockerClient( docker.Client ):
""" Wrapper for docker.Client to return 'wrapped' versions of streamed responses
so that we can have access to the response object, which allows us to get the
socket in use, and shutdown the blocked socket from another thread (e.g. upon
shutdown
"""
def _stream_helper( self, response, decode=False ):
return WrappedStreamResponse( self, response, decode )
def _stream_raw_result( self, response ):
return WrappedRawResponse( self, response )
def _multiplexed_response_stream_helper( self, response ):
return WrappedMultiplexedStreamResponse( self, response )
def _get_raw_response_socket(self, response):
if response.raw._fp.fp:
return super( DockerClient, self )._get_raw_response_socket( response )
return None
def _split_datetime_from_line( line ):
"""Docker timestamps are in RFC3339 format: 2015-08-03T09:12:43.143757463Z, with everything up to the first space
being the timestamp.
"""
log_line = line
dt = datetime.datetime.utcnow()
pos = line.find( ' ' )
if pos > 0:
dt = scalyr_util.rfc3339_to_datetime( line[0:pos] )
log_line = line[pos+1:]
return (dt, log_line)
def _get_short_cid( container_id ):
"""returns a shortened container id. Useful for logging, where using a full length container id
is not necessary and would just add noise to the log.
The shortened container id will contain enough information to uniquely
identify the container for most situations. Note: the returned value
should never be used as a key in a dict for containers because there is
always the remote possibility of a conflict (given a large enough number
of containers).
"""
# return the first 8 chars of the container id.
# we don't need to check for length because even if len( container_id ) < 8
# it's still valid to slice beyond the end of a string. See:
# https://docs.python.org/2/reference/expressions.html#slicings
return container_id[:8]
def _ignore_old_dead_container( container, created_before=None ):
"""
Returns True or False to determine whether we should ignore the
logs for a dead container, depending on whether the create time
of the container is before a certain threshold time (specified in
seconds since the epoch).
If the container was created before the threshold time, then the
container logs will be ignored.
Otherwise the logs of the dead container will be uploaded.
"""
# check for recently finished containers
if created_before is not None:
state = container.get( 'State', {} )
#ignore any that are finished and that are also too old
if state != 'running':
created = container.get( 'Created', 0 ) # default to a long time ago
if created < created_before:
return True
return False
def _get_containers(client, ignore_container=None, restrict_to_container=None, logger=None,
only_running_containers=True, running_or_created_after=None, glob_list=None, include_log_path=False, k8s_cache=None,
k8s_include_by_default=True, k8s_namespaces_to_exclude=None, ignore_pod_sandboxes=True, current_time=None):
"""Queries the Docker API and returns a dict of running containers that maps container id to container name, and other info
@param client: A docker.Client object
@param ignore_container: String, a single container id to exclude from the results (useful for ignoring the scalyr_agent container)
@param restrict_to_container: String, a single continer id that will be the only returned result
@param logger: scalyr_logging.Logger. Allows the caller to write logging output to a specific logger. If None the default agent.log
logger is used.
@param only_running_containers: Boolean. If true, will only return currently running containers
@param running_or_created_after: Unix timestamp. If specified, the results will include any currently running containers *and* any
dead containers that were created after the specified time. Used to pick up short-lived containers.
@param glob_list: String. A glob string that limit results to containers whose container names match the glob
@param include_log_path: Boolean. If true include the path to the raw log file on disk as part of the extra info mapped to the container id.
@param k8s_cache: KubernetesCache. If not None, k8s information (if it exists) for the container will be added as part of the extra info mapped to the container id
@param k8s_include_by_default: Boolean. If True, then all k8s containers are included by default, unless an include/exclude annotation excludes them.
If False, then all k8s containers are excluded by default, unless an include/exclude annotation includes them.
@param k8s_namespaces_to_exclude: List The of namespaces whose containers should be excluded.
@param ignore_pod_sandboxes: Boolean. If True then any k8s pod sandbox containers are ignored from the list of monitored containers
@param current_time. Timestamp since the epoch
"""
if logger is None:
logger = global_log
k8s_labels = {
'pod_uid': 'io.kubernetes.pod.uid',
'pod_name': 'io.kubernetes.pod.name',
'pod_namespace': 'io.kubernetes.pod.namespace',
'k8s_container_name': 'io.kubernetes.container.name'
}
if running_or_created_after is not None:
only_running_containers=False
result = {}
try:
filters = {"id": restrict_to_container} if restrict_to_container is not None else None
response = client.containers(filters=filters, all=not only_running_containers)
for container in response:
cid = container['Id']
short_cid = _get_short_cid( cid )
if ignore_container is not None and cid == ignore_container:
continue
# Note we need to *include* results that were created after the 'running_or_created_after' time.
# that means we need to *ignore* any containers created before that
# hence the reason 'create_before' is assigned to a value named '...created_after'
if _ignore_old_dead_container( container, created_before=running_or_created_after ):
continue
if len( container['Names'] ) > 0:
name = container['Names'][0].lstrip('/')
# ignore any pod sandbox containers
if ignore_pod_sandboxes:
container_type = container.get( 'Labels', {} ).get( 'io.kubernetes.docker.type', '' )
if container_type == 'podsandbox':
continue
add_container = True
if glob_list:
add_container = False
for glob in glob_list:
if fnmatch.fnmatch( name, glob ):
add_container = True
break
if add_container:
log_path = None
k8s_info = None
status = None
if include_log_path or k8s_cache is not None:
try:
info = client.inspect_container( cid )
log_path = info['LogPath'] if include_log_path and 'LogPath' in info else None
if not only_running_containers:
status = info['State']['Status']
if k8s_cache is not None:
config = info.get('Config', {} )
labels = config.get( 'Labels', {} )
k8s_info = {}
missing_field = False
for key, label in k8s_labels.iteritems():
value = labels.get( label )
if value:
k8s_info[key] = value
else:
missing_field = True
logger.warn( "Missing kubernetes label '%s' in container %s" % (label, short_cid), limit_once_per_x_secs=300,limit_key="docker-inspect-k8s-%s" % short_cid)
if missing_field:
logger.log( scalyr_logging.DEBUG_LEVEL_1, "Container Labels %s" % (scalyr_util.json_encode(labels)), limit_once_per_x_secs=300,limit_key="docker-inspect-container-dump-%s" % short_cid)
if 'pod_name' in k8s_info and 'pod_namespace' in k8s_info:
if k8s_namespaces_to_exclude is not None and k8s_info['pod_namespace'] in k8s_namespaces_to_exclude:
logger.log( scalyr_logging.DEBUG_LEVEL_2, "Excluding container '%s' based excluded namespaces" % short_cid)
continue
pod = k8s_cache.pod( k8s_info['pod_namespace'], k8s_info['pod_name'], current_time )
if pod:
k8s_info['pod_info'] = pod
k8s_container = k8s_info.get( 'k8s_container_name', None )
# check to see if we should exclude this container
default_exclude = not k8s_include_by_default
exclude = pod.exclude_pod( container_name=k8s_container, default=default_exclude)
if exclude:
if pod.annotations:
logger.log( scalyr_logging.DEBUG_LEVEL_2, "Excluding container '%s' based on pod annotations, %s" % (short_cid, str(pod.annotations)) )
continue
# add a debug message if containers are excluded by default but this container is included
if default_exclude and not exclude:
logger.log( scalyr_logging.DEBUG_LEVEL_2, "Including container '%s' based on pod annotations, %s" % (short_cid, str(pod.annotations)) )
except Exception, e:
logger.error("Error inspecting container '%s'" % cid, limit_once_per_x_secs=300,limit_key="docker-api-inspect")
result[cid] = {'name': name, 'log_path': log_path }
if status:
result[cid]['status'] = status
if k8s_info:
result[cid]['k8s_info'] = k8s_info
else:
result[cid] = {'name': cid, 'log_path': None}
except Exception, e: # container querying failed
logger.error("Error querying running containers", limit_once_per_x_secs=300,
limit_key='docker-api-running-containers' )
result = None
return result
class ContainerChecker( StoppableThread ):
"""
Monitors containers to check when they start and stop running.
"""
def __init__( self, config, logger, socket_file, docker_api_version, host_hostname, data_path, log_path,
include_all, include_controller_info, namespaces_to_ignore,
ignore_pod_sandboxes ):
self._config = config
self._logger = logger
self.__delay = self._config.get( 'container_check_interval' )
self.__log_prefix = self._config.get( 'docker_log_prefix' )
self.__name = self._config.get( 'container_name' )
self.__use_v2_attributes = self._config.get('k8s_use_v2_attributes')
self.__use_v1_and_v2_attributes = self._config.get('k8s_use_v1_and_v2_attributes')
self.__parse_json = self._config.get( 'k8s_parse_json' )
self.__socket_file = socket_file
self.__docker_api_version = docker_api_version
self.__client = None
self.container_id = None
self.__log_path = log_path
self.__host_hostname = host_hostname
self.__readback_buffer_size = self._config.get( 'readback_buffer_size' )
self.__glob_list = config.get( 'container_globs' )
# The namespace whose logs we should not collect.
self.__namespaces_to_ignore = namespaces_to_ignore
self.__ignore_pod_sandboxes = ignore_pod_sandboxes
# This is currently an experimental feature. Including controller information for every event uploaded about
# a pod (cluster name, controller name, controller labels)
self.__include_controller_info = include_controller_info
self.containers = {}
self.__include_all = include_all
self.__k8s = None
self.__k8s_cache_expiry_secs = self._config.get( 'k8s_cache_expiry_secs' )
self.__k8s_cache_purge_secs = self._config.get( 'k8s_cache_purge_secs' )
self.__k8s_cache_init_abort_delay = self._config.get( 'k8s_cache_init_abort_delay' )
self.k8s_cache = None
self.__log_watcher = None
self.__module = None
self.__start_time = time.time()
self.__thread = StoppableThread( target=self.check_containers, name="Container Checker" )
def start( self ):
try:
k8s_api_url = self._config.get('k8s_api_url')
if self._config.get( 'verify_k8s_api_queries' ):
self.__k8s = KubernetesApi(k8s_api_url=k8s_api_url)
else:
self.__k8s = KubernetesApi( ca_file=None, k8s_api_url=k8s_api_url)
self.__client = DockerClient( base_url=('unix:/%s'%self.__socket_file), version=self.__docker_api_version )
self.container_id = self.__get_scalyr_container_id( self.__client, self.__name )
# create the k8s cache
self.k8s_cache = KubernetesCache( self.__k8s, self._logger,
cache_expiry_secs=self.__k8s_cache_expiry_secs,
cache_purge_secs=self.__k8s_cache_purge_secs,
namespaces_to_ignore=self.__namespaces_to_ignore )
delay = 0.5
message_delay = 5
start_time = time.time()
message_time = start_time
abort = False
# wait until the k8s_cache is initialized before aborting
while not self.k8s_cache.is_initialized():
time.sleep( delay )
current_time = time.time()
# see if we need to print a message
elapsed = current_time - message_time
if elapsed > message_delay:
self._logger.log(scalyr_logging.DEBUG_LEVEL_0, 'start() - waiting for Kubernetes cache to be initialized' )
message_time = current_time
# see if we need to abort the monitor because we've been waiting too long for init
elapsed = current_time - start_time
if elapsed > self.__k8s_cache_init_abort_delay:
abort = True
break
if abort:
raise K8sInitException( "Unable to initialize kubernetes cache" )
# check to see if the user has manually specified a cluster name, and if so then
# force enable 'Starbuck' features
if self.k8s_cache.get_cluster_name() is not None:
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "ContainerChecker - cluster name detected, enabling v2 attributes and controller information" )
self.__use_v2_attributes = True
self.__include_controller_info = True
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Attempting to retrieve list of containers:' )
self.containers = _get_containers(self.__client, ignore_container=self.container_id,
glob_list=self.__glob_list, include_log_path=True,
k8s_cache=self.k8s_cache, k8s_include_by_default=self.__include_all,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
# if querying the docker api fails, set the container list to empty
if self.containers is None:
self.containers = {}
self.raw_logs = []
self.docker_logs = self.__get_docker_logs( self.containers, self.k8s_cache )
#create and start the DockerLoggers
self.__start_docker_logs( self.docker_logs )
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Initialization complete. Starting k8s monitor for Scalyr" )
self.__thread.start()
except K8sInitException, e:
global_log.warn( "Failed to start container checker - %s. Aborting kubernetes_monitor" % (str(e)) )
raise
except Exception, e:
global_log.warn( "Failed to start container checker - %s\n%s" % (str(e), traceback.format_exc() ))
def stop( self, wait_on_join=True, join_timeout=5 ):
self.__thread.stop( wait_on_join=wait_on_join, join_timeout=join_timeout )
#stop the DockerLoggers
for logger in self.raw_logs:
path = logger['log_config']['path']
if self.__log_watcher:
self.__log_watcher.remove_log_path( self.__module.module_name, path )
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Stopping %s" % (path) )
self.raw_logs = []
def get_k8s_data( self ):
""" Convenience wrapper to query and process all pods
and pods retreived by the k8s API.
A filter is used to limit pods returned to those that
are running on the current node
@return: a dict keyed by namespace, whose values are a dict of pods inside that namespace, keyed by pod name
"""
result = {}
try:
result = self.k8s_cache.pods_shallow_copy()
except Exception, e:
global_log.warn( "Failed to get k8s data: %s\n%s" % (str(e), traceback.format_exc() ),
limit_once_per_x_secs=300, limit_key='get_k8s_data' )
return result
def check_containers( self, run_state ):
"""Update thread for monitoring docker containers and the k8s info such as labels
"""
# Assert that the cache has been initialized
if not self.k8s_cache.is_initialized():
self._logger.log(scalyr_logging.DEBUG_LEVEL_0, 'container_checker - Kubernetes cache not initialized' )
raise K8sInitException( "check_container - Kubernetes cache not initialized. Aborting" )
# store the digests from the previous iteration of the main loop to see
# if any pod information has changed
prev_digests = {}
base_attributes = self.__get_base_attributes()
previous_time = time.time()
while run_state.is_running():
try:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Attempting to retrieve list of containers:' )
current_time = time.time()
running_containers = _get_containers(
self.__client, ignore_container=self.container_id, running_or_created_after=previous_time,
glob_list=self.__glob_list, include_log_path=True, k8s_cache=self.k8s_cache,
k8s_include_by_default=self.__include_all, current_time=current_time,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
previous_time = current_time - 1
# if running_containers is None, that means querying the docker api failed.
# rather than resetting the list of running containers to empty
# continue using the previous list of containers
if running_containers is None:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Failed to get list of containers')
running_containers = self.containers
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Found %d containers' % len(running_containers))
#get the containers that have started since the last sample
starting = {}
changed = {}
digests = {}
for cid, info in running_containers.iteritems():
pod = None
if 'k8s_info' in info:
pod_name = info['k8s_info'].get( 'pod_name', 'invalid_pod' )
pod_namespace = info['k8s_info'].get( 'pod_namespace', 'invalid_namespace' )
pod = info['k8s_info'].get( 'pod_info', None )
if not pod:
self._logger.warning( "No pod info for container %s. pod: '%s/%s'" % (_get_short_cid( cid ), pod_namespace, pod_name),
limit_once_per_x_secs=300,
limit_key='check-container-pod-info-%s' % cid)
# start the container if have a container that wasn't running
if cid not in self.containers:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Starting loggers for container '%s'" % info['name'] )
starting[cid] = info
elif cid in prev_digests:
# container was running and it exists in the previous digest dict, so see if
# it has changed
if pod and prev_digests[cid] != pod.digest:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Pod digest changed for '%s'" % info['name'] )
changed[cid] = info
# store the digest from this iteration of the loop
if pod:
digests[cid] = pod.digest
#get the containers that have stopped
stopping = {}
for cid, info in self.containers.iteritems():
if cid not in running_containers:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Stopping logger for container '%s' (%s)" % (info['name'], cid[:6] ) )
stopping[cid] = info
#stop the old loggers
self.__stop_loggers( stopping )
#update the list of running containers
#do this before starting new ones, as starting up new ones
#will access self.containers
self.containers = running_containers
#start the new ones
self.__start_loggers( starting, self.k8s_cache )
prev_digests = digests
# update the log config for any changed containers
if self.__log_watcher:
for logger in self.raw_logs:
if logger['cid'] in changed:
info = changed[logger['cid']]
new_config = self.__get_log_config_for_container( logger['cid'], info, self.k8s_cache, base_attributes )
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "updating config for '%s'" % info['name'] )
self.__log_watcher.update_log_config( self.__module.module_name, new_config )
except Exception, e:
self._logger.warn( "Exception occurred when checking containers %s\n%s" % (str( e ), traceback.format_exc()) )
run_state.sleep_but_awaken_if_stopped( self.__delay )
def set_log_watcher( self, log_watcher, module ):
self.__log_watcher = log_watcher
self.__module = module
def __get_scalyr_container_id( self, client, name ):
"""Gets the container id of the scalyr-agent container
If the config option container_name is empty, then it is assumed that the scalyr agent is running
on the host and not in a container and None is returned.
"""
result = None
regex = None
if name is not None:
regex = re.compile( name )
# get all the containers
containers = client.containers()
for container in containers:
# see if we are checking on names
if name is not None:
# if so, loop over all container names for this container
# Note: containers should only have one name, but the 'Names' field
# is a list, so iterate over it just in case
for cname in container['Names']:
cname = cname.lstrip( '/' )
# check if the name regex matches
m = regex.match( cname )
if m:
result = container['Id']
break
# not checking container name, so check the Command instead to see if it's the agent
else:
if container['Command'].startswith( '/usr/sbin/scalyr-agent-2' ):
result = container['Id']
if result:
break
if not result:
# only raise an exception if we were looking for a specific name but couldn't find it
if name is not None:
raise Exception( "Unable to find a matching container id for container '%s'. Please make sure that a "
"container matching the regular expression '%s' is running." % (name, name) )
return result
def __stop_loggers( self, stopping ):
"""
Stops any DockerLoggers in the 'stopping' dict
@param: stopping - a dict of container ids => container names. Any running containers that have
the same container-id as a key in the dict will be stopped.
"""
if stopping:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Stopping all docker loggers')
# go through all the raw logs and see if any of them exist in the stopping list, and if so, stop them
for logger in self.raw_logs:
cid = logger['cid']
if cid in stopping:
path = logger['log_config']['path']
if self.__log_watcher:
self.__log_watcher.schedule_log_path_for_removal( self.__module.module_name, path )
self.raw_logs[:] = [l for l in self.raw_logs if l['cid'] not in stopping]
self.docker_logs[:] = [l for l in self.docker_logs if l['cid'] not in stopping]
def __start_loggers( self, starting, k8s_cache ):
"""
Starts a list of DockerLoggers
@param: starting - a list of DockerLoggers to start
"""
if starting:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Starting all docker loggers')
docker_logs = self.__get_docker_logs( starting, k8s_cache )
self.__start_docker_logs( docker_logs )
self.docker_logs.extend( docker_logs )
def __start_docker_logs( self, docker_logs ):
for log in docker_logs:
if self.__log_watcher:
log['log_config'] = self.__log_watcher.add_log_config( self.__module, log['log_config'] )
self.raw_logs.append( log )
def __get_last_request_for_log( self, path ):
result = datetime.datetime.fromtimestamp( self.__start_time )
try:
full_path = os.path.join( self.__log_path, path )
fp = open( full_path, 'r', self.__readback_buffer_size )
# seek readback buffer bytes from the end of the file
fp.seek( 0, os.SEEK_END )
size = fp.tell()
if size < self.__readback_buffer_size:
fp.seek( 0, os.SEEK_SET )
else:
fp.seek( size - self.__readback_buffer_size, os.SEEK_SET )
first = True
for line in fp:
# ignore the first line because it likely started somewhere randomly
# in the line
if first:
first = False
continue
dt, _ = _split_datetime_from_line( line )
if dt:
result = dt
fp.close()
except Exception, e:
global_log.info( "%s", str(e) )
return scalyr_util.seconds_since_epoch( result )
def __create_log_config( self, parser, path, attributes, parse_as_json=False ):
"""Convenience function to create a log_config dict from the parameters"""
return { 'parser': parser,
'path': path,
'parse_lines_as_json' : parse_as_json,
'attributes': attributes
}
def __get_base_attributes( self ):
attributes = None
try:
attributes = JsonObject( { "monitor": "agentKubernetes" } )
if self.__host_hostname:
attributes['serverHost'] = self.__host_hostname
except Exception, e:
self._logger.error( "Error setting monitor attribute in KubernetesMonitor" )
raise
return attributes
def __get_log_config_for_container( self, cid, info, k8s_cache, base_attributes ):
result = None
container_attributes = base_attributes.copy()
if not self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['containerName'] = info['name']
container_attributes['containerId'] = cid
elif self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['container_id'] = cid
parser = 'docker'
common_annotations = {}
container_annotations = {}
# pod name and namespace are set to an invalid value for cases where errors occur and a log
# message is produced, so that the log message has clearly invalid values for these rather
# than just being empty
pod_name = '--'
pod_namespace = '--'
short_cid = _get_short_cid( cid )
# dict of available substitutions for the rename_logfile field
rename_vars = {
'short_id' : short_cid,
'container_id' : cid,
'container_name' : info['name'],
}
k8s_info = info.get( 'k8s_info', {} )
if k8s_info:
pod_name = k8s_info.get('pod_name', 'invalid_pod')
pod_namespace = k8s_info.get('pod_namespace', 'invalid_namespace')
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "got k8s info for container %s, '%s/%s'" % (short_cid, pod_namespace, pod_name) )
pod = k8s_cache.pod( pod_namespace, pod_name )
if pod:
rename_vars['pod_name'] = pod.name
rename_vars['namespace'] = pod.namespace
rename_vars['node_name'] = pod.node_name
container_attributes['pod_name'] = pod.name
container_attributes['pod_namespace'] = pod.namespace
container_attributes['pod_uid'] = pod.uid
if not self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['node_name'] = pod.node_name
elif self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['k8s_node'] = pod.node_name
container_attributes['scalyr-category'] = 'log'
for label, value in pod.labels.iteritems():
container_attributes[label] = value
if 'parser' in pod.labels:
parser = pod.labels['parser']
# get the controller information if any
if pod.controller is not None:
controller = pod.controller
# for backwards compatibility allow both deployment_name and controller_name here
rename_vars['deployment_name'] = controller.name
rename_vars['controller_name'] = controller.name
if self.__include_controller_info:
container_attributes['_k8s_dn'] = controller.name
container_attributes['_k8s_dl'] = controller.flat_labels
container_attributes['_k8s_ck'] = controller.kind
# get the cluster name
cluster_name = k8s_cache.get_cluster_name()
if self.__include_controller_info and cluster_name is not None:
container_attributes['_k8s_cn'] = cluster_name
# get the annotations of this pod as a dict.
# by default all annotations will be applied to all containers
# in the pod
all_annotations = pod.annotations
container_specific_annotations = False
# get any common annotations for all containers
for annotation, value in all_annotations.iteritems():
if annotation in pod.container_names:
container_specific_annotations = True
else:
common_annotations[annotation] = value
# now get any container specific annotations
# for this container
if container_specific_annotations:
k8s_container_name = k8s_info.get('k8s_container_name', '')
if k8s_container_name in all_annotations:
# get the annotations for this container
container_annotations = all_annotations[k8s_container_name]
# sanity check to make sure annotations are either a JsonObject or dict
if not isinstance( container_annotations, JsonObject ) and not isinstance( container_annotations, dict ):
self._logger.warning( "Unexpected configuration found in annotations for pod '%s/%s'. Expected a dict for configuration of container '%s', but got a '%s' instead. No container specific configuration options applied." % ( pod.namespace, pod.name, k8s_container_name, str( type(container_annotations) ) ),
limit_once_per_x_secs=300,
limit_key='k8s-invalid-container-config-%s' % cid)
container_annotations = {}
else:
self._logger.warning( "Couldn't map container '%s' to pod '%s/%s'. Logging limited metadata from docker container labels instead." % ( short_cid, pod_namespace, pod_name ),
limit_once_per_x_secs=300,
limit_key='k8s-docker-mapping-%s' % cid)
container_attributes['pod_name'] = pod_name
container_attributes['pod_namespace'] = pod_namespace
container_attributes['pod_uid'] = k8s_info.get('pod_uid', 'invalid_uid')
container_attributes['k8s_container_name'] = k8s_info.get('k8s_container_name', 'invalid_container_name')
else:
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "no k8s info for container %s" % short_cid )
if 'log_path' in info and info['log_path']:
result = self.__create_log_config( parser=parser, path=info['log_path'], attributes=container_attributes, parse_as_json=self.__parse_json )
result['rename_logfile'] = '/docker/%s.log' % info['name']
# This is for a hack to prevent the original log file name from being added to the attributes.
if self.__use_v2_attributes and not self.__use_v1_and_v2_attributes:
result['rename_no_original'] = True
# apply common annotations first
annotations = common_annotations
# set/override any container specific annotations
annotations.update( container_annotations )
# ignore include/exclude options which have special
# handling in the log_config verification that expects a different type than the one used in the nnotations
skip_keys = [ 'include', 'exclude' ]
# list of config items that cannot be updated via annotations
invalid_keys = [ 'path', 'lineGroupers' ]
# set config items, ignoring invalid options and taking care to
# handle attributes
for key, value in annotations.iteritems():
if key in skip_keys:
continue
if key in invalid_keys:
self._logger.warning( "Invalid key '%s' found in annotation config for '%s/%s'. Configuration of '%s' is not currently supported via annotations and has been ignored." % (key, pod_namespace, pod_name, key ),
limit_once_per_x_secs=300,
limit_key='k8s-invalid-annotation-config-key-%s' % key)
continue
# we need to make sure we update attributes rather
# than overriding the entire dict, otherwise we'll override pod_name, namespace etc
if key == 'attributes':
if 'attributes' not in result:
result['attributes'] = {}
attrs = result['attributes']
attrs.update( value )
# we also need to override the top level parser value if attributes['parser'] is set
if 'parser' in attrs:
result['parser'] = attrs['parser']
continue
elif key == 'rename_logfile':
# rename logfile supports string substitions
# so update value if necessary
template = Template( value )
value = template.safe_substitute( rename_vars )
# everything else is added to the log_config result as is
result[key] = value
return result
def __get_docker_logs( self, containers, k8s_cache ):
"""Returns a list of dicts containing the container id, stream, and a log_config
for each container in the 'containers' param.
"""
result = []
attributes = self.__get_base_attributes()
prefix = self.__log_prefix + '-'
for cid, info in containers.iteritems():
log_config = self.__get_log_config_for_container( cid, info, k8s_cache, attributes )
if log_config:
result.append( { 'cid': cid, 'stream': 'raw', 'log_config': log_config } )
return result
class KubernetesMonitor( ScalyrMonitor ):
"""
# Kubernetes Monitor
This monitor is based of the docker_monitor plugin, and uses the raw logs mode of the docker
plugin to send Kubernetes logs to Scalyr. It also reads labels from the Kubernetes API and
associates them with the appropriate logs.
## Log Config via Annotations
The logs collected by the Kubernetes monitor can be configured via k8s pod annotations.
The monitor examines all annotations on all pods, and for any annotation that begins with the
prefix log.config.scalyr.com/, it extracts the
entries (minus the prefix) and maps them to the log_config stanza for that pod's containers.
The mapping is described below.
The following fields can be configured a log via pod annotations:
* parser
* attributes
* sampling_rules
* rename_logfile
* redaction_rules
These behave in the same way as specified in the main [Scalyr help
docs](https://www.scalyr.com/help/scalyr-agent#logUpload). The following configuration
fields behave differently when configured via k8s annotations:
* exclude (see below)
* lineGroupers (not supported at all)
* path (the path is always fixed for k8s container logs)
### Excluding Logs
Containers and pods can be specifically included/excluded from having their logs collected and
sent to Scalyr. Unlike the normal log_config `exclude` option which takes an array of log path
exclusion globs, annotations simply support a Boolean true/false for a given container/pod.
Both `include` and `exclude` are supported, with `include` always overriding `exclude` if both
are set. e.g.
log.config.scalyr.com/exclude: true
has the same effect as
log.config.scalyr.com/include: false
By default the agent monitors the logs of all pods/containers, and you have to manually exclude
pods/containers you don't want. You can also set `k8s_include_all_containers: false` in the
kubernetes_monitor monitor config section of `agent.d/docker.json`, in which case all containers are
excluded by default and have to be manually included.
### Specifying Config Options
The Kubernetes monitor takes the string value of each annotation and maps it to a dict, or
array value according to the following format:
Values separated by a period are mapped to dict keys e.g. if one annotation on a given pod was
specified as:
log.config.scalyr.com/attributes.parser: accessLog
Then this would be mapped to the following dict, which would then be applied to the log config
for all containers in that pod:
{ "attributes": { "parser": "accessLog" } }
Arrays can be specified by using one or more digits as the key, e.g. if the annotation was
log.config.scalyr.com/sampling_rules.0.match_expression: INFO
log.config.scalyr.com/sampling_rules.0.sampling_rate: 0.1
log.config.scalyr.com/sampling_rules.1.match_expression: FINE
log.config.scalyr.com/sampling_rules.1.sampling_rate: 0
This will be mapped to the following structure:
{ "sampling_rules":
[
{ "match_expression": "INFO", "sampling_rate": 0.1 },
{ "match_expression": "FINE", "sampling_rate": 0 }
]
}
Array keys are sorted by numeric order before processing and unique objects need to have
different digits as the array key. If a sub-key has an identical array key as a previously seen
sub-key, then the previous value of the sub-key is overwritten
There is no guarantee about the order of processing for items with the same numeric array key,
so if the config was specified as:
log.config.scalyr.com/sampling_rules.0.match_expression: INFO
log.config.scalyr.com/sampling_rules.0.match_expression: FINE
It is not defined or guaranteed what the actual value will be (INFO or FINE).
### Applying config options to specific containers in a pod
If a pod has multiple containers and you only want to apply log configuration options to a
specific container you can do so by prefixing the option with the container name, e.g. if you
had a pod with two containers `nginx` and `helper1` and you wanted to exclude `helper1` logs you
could specify the following annotation:
log.config.scalyr.com/helper1.exclude: true
Config items specified without a container name are applied to all containers in the pod, but
container specific settings will override pod-level options, e.g. in this example:
log.config.scalyr.com/exclude: true
log.config.scalyr.com/nginx.include: true
All containers in the pod would be excluded *except* for the nginx container, which is included.
This technique is applicable for all log config options, not just include/exclude. For
example you could set the line sampling rules for all containers in a pod, but use a different set
of line sampling rules for one specific container in the pod if needed.
### Dynamic Updates
Currently all annotation config options except `exclude: true`/`include: false` can be
dynamically updated using the `kubectl annotate` command.
For `exclude: true`/`include: false` once a pod/container has started being logged, then while the
container is still running, there is currently no way to dynamically start/stop logging of that
container using annotations without updating the config yaml, and applying the updated config to the
cluster.
"""
def __get_socket_file( self ):
"""Gets the Docker API socket file and validates that it is a UNIX socket
"""
#make sure the API socket exists and is a valid socket
api_socket = self._config.get( 'api_socket' )
try:
st = os.stat( api_socket )
if not stat.S_ISSOCK( st.st_mode ):
raise Exception()
except:
raise Exception( "The file '%s' specified by the 'api_socket' configuration option does not exist or is not a socket.\n\tPlease make sure you have mapped the docker socket from the host to this container using the -v parameter.\n\tNote: Due to problems Docker has mapping symbolic links, you should specify the final file and not a path that contains a symbolic link, e.g. map /run/docker.sock rather than /var/run/docker.sock as on many unices /var/run is a symbolic link to the /run directory." % api_socket )
return api_socket
def _initialize( self ):
data_path = ""
log_path = ""
host_hostname = ""
# Since getting metrics from Docker takes a non-trivial amount of time, we will deduct the time spent
# in gathering the metric samples from the time we should sleep so that we do gather a sample once every
# sample_interval_secs
self._adjust_sleep_by_gather_time = True
# Override the default value for the rate limit for writing the metric logs. We override it to set no limit
# because it is fairly difficult to bound this since the log will emit X metrics for every pod being monitored.
self._log_write_rate = self._config.get('monitor_log_write_rate', convert_to=int, default=-1)
self._log_max_write_burst = self._config.get('monitor_log_max_write_burst', convert_to=int, default=-1)
if self._global_config:
data_path = self._global_config.agent_data_path
log_path = self._global_config.agent_log_path
if self._global_config.server_attributes:
if 'serverHost' in self._global_config.server_attributes:
host_hostname = self._global_config.server_attributes['serverHost']
else:
self._logger.info( "no server host in server attributes" )
else:
self._logger.info( "no server attributes in global config" )
# The namespace whose logs we should not collect.
self.__namespaces_to_ignore = []
for x in self._config.get('k8s_ignore_namespaces').split():
self.__namespaces_to_ignore.append(x.strip())
self.__ignore_pod_sandboxes = self._config.get('k8s_ignore_pod_sandboxes')
self.__socket_file = self.__get_socket_file()
self.__docker_api_version = self._config.get( 'docker_api_version' )
self.__k8s_api_url = self._config.get('k8s_api_url')
self.__client = DockerClient( base_url=('unix:/%s'%self.__socket_file), version=self.__docker_api_version )
self.__metric_fetcher = DockerMetricFetcher(self.__client, self._config.get('docker_max_parallel_stats'),
self._logger)
self.__glob_list = self._config.get( 'container_globs' )
self.__include_all = self._config.get( 'k8s_include_all_containers' )
self.__report_container_metrics = self._config.get('report_container_metrics')
self.__report_k8s_metrics = self._config.get('report_k8s_metrics') and self.__report_container_metrics
# Object for talking to the kubelet server running on this localhost. This is used to gather metrics only
# available via the kubelet.
self.__kubelet_api = None
self.__gather_k8s_pod_info = self._config.get('gather_k8s_pod_info')
# Including controller information for every event uploaded about a pod (cluster name, controller name,
# controller labels)
self.__include_controller_info = self._config.get('include_deployment_info', convert_to=bool, default=False)
self.__container_checker = None
if self._config.get('log_mode') != 'syslog':
self.__container_checker = ContainerChecker( self._config, self._logger, self.__socket_file,
self.__docker_api_version, host_hostname, data_path, log_path,
self.__include_all, self.__include_controller_info,
self.__namespaces_to_ignore, self.__ignore_pod_sandboxes )
# Metrics provided by the kubelet API.
self.__k8s_pod_network_metrics = {
'k8s.pod.network.rx_bytes': 'rxBytes',
'k8s.pod.network.rx_errors': 'rxErrors',
'k8s.pod.network.tx_bytes': 'txBytes',
'k8s.pod.network.tx_errors': 'txErrors',
}
# Metrics provide by the kubelet API.
self.__k8s_node_network_metrics = {
'k8s.node.network.rx_bytes': 'rxBytes',
'k8s.node.network.rx_errors': 'rxErrors',
'k8s.node.network.tx_bytes': 'txBytes',
'k8s.node.network.tx_errors': 'txErrors',
}
# All the docker. metrics are provided by the docker API.
self.__network_metrics = self.__build_metric_dict( 'docker.net.', [
"rx_bytes",
"rx_dropped",
"rx_errors",
"rx_packets",
"tx_bytes",
"tx_dropped",
"tx_errors",
"tx_packets",
])
self.__mem_stat_metrics = self.__build_metric_dict( 'docker.mem.stat.', [
"total_pgmajfault",
"cache",
"mapped_file",
"total_inactive_file",
"pgpgout",
"rss",
"total_mapped_file",
"writeback",
"unevictable",
"pgpgin",
"total_unevictable",
"pgmajfault",
"total_rss",
"total_rss_huge",
"total_writeback",
"total_inactive_anon",
"rss_huge",
"hierarchical_memory_limit",
"total_pgfault",
"total_active_file",
"active_anon",
"total_active_anon",
"total_pgpgout",
"total_cache",
"inactive_anon",
"active_file",
"pgfault",
"inactive_file",
"total_pgpgin"
])
self.__mem_metrics = self.__build_metric_dict( 'docker.mem.', [
"max_usage",
"usage",
"fail_cnt",
"limit"
])
self.__cpu_usage_metrics = self.__build_metric_dict( 'docker.cpu.', [
"usage_in_usermode",
"total_usage",
"usage_in_kernelmode"
])
self.__cpu_throttling_metrics = self.__build_metric_dict( 'docker.cpu.throttling.', [
"periods",
"throttled_periods",
"throttled_time"
])
def set_log_watcher( self, log_watcher ):
"""Provides a log_watcher object that monitors can use to add/remove log files
"""
if self.__container_checker:
self.__container_checker.set_log_watcher( log_watcher, self )
def __build_metric_dict( self, prefix, names ):
result = {}
for name in names:
result["%s%s"%(prefix, name)] = name
return result
def __log_metrics( self, monitor_override, metrics_to_emit, metrics, extra=None ):
if metrics is None:
return
for key, value in metrics_to_emit.iteritems():
if value in metrics:
# Note, we do a bit of a hack to pretend the monitor's name include the container/pod's name. We take this
# approach because the Scalyr servers already have some special logic to collect monitor names and ids
# to help auto generate dashboards. So, we want a monitor name like `docker_monitor(foo_container)`
# for each running container.
self._logger.emit_value( key, metrics[value], extra, monitor_id_override=monitor_override )
def __log_network_interface_metrics( self, container, metrics, interface=None, k8s_extra={} ):
""" Logs network interface metrics
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: interface - an optional interface value to associate with each metric value emitted
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
extra = None
if interface:
if k8s_extra is None:
extra = {}
else:
extra = k8s_extra.copy()
extra['interface'] = interface
self.__log_metrics( container, self.__network_metrics, metrics, extra )
def __log_memory_stats_metrics( self, container, metrics, k8s_extra ):
""" Logs memory stats metrics
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
if 'stats' in metrics:
self.__log_metrics( container, self.__mem_stat_metrics, metrics['stats'], k8s_extra )
self.__log_metrics( container, self.__mem_metrics, metrics, k8s_extra )
def __log_cpu_stats_metrics( self, container, metrics, k8s_extra ):
""" Logs cpu stats metrics
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
if 'cpu_usage' in metrics:
cpu_usage = metrics['cpu_usage']
if 'percpu_usage' in cpu_usage:
percpu = cpu_usage['percpu_usage']
count = 1
if percpu:
for usage in percpu:
# Use dev for the CPU number since it is a known tag for Scalyr to use in delta computation.
extra = { 'dev' : count }
if k8s_extra is not None:
extra.update(k8s_extra)
self._logger.emit_value( 'docker.cpu.usage', usage, extra, monitor_id_override=container )
count += 1
self.__log_metrics( container, self.__cpu_usage_metrics, cpu_usage, k8s_extra )
if 'system_cpu_usage' in metrics:
self._logger.emit_value( 'docker.cpu.system_cpu_usage', metrics['system_cpu_usage'], k8s_extra,
monitor_id_override=container )
if 'throttling_data' in metrics:
self.__log_metrics( container, self.__cpu_throttling_metrics, metrics['throttling_data'], k8s_extra )
def __log_json_metrics( self, container, metrics, k8s_extra ):
""" Log docker metrics based on the JSON response returned from querying the Docker API
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
for key, value in metrics.iteritems():
if value is None:
continue
if key == 'networks':
for interface, network_metrics in value.iteritems():
self.__log_network_interface_metrics( container, network_metrics, interface, k8s_extra=k8s_extra )
elif key == 'network':
self.__log_network_interface_metrics( container, value, k8s_extra )
elif key == 'memory_stats':
self.__log_memory_stats_metrics( container, value, k8s_extra )
elif key == 'cpu_stats':
self.__log_cpu_stats_metrics( container, value, k8s_extra )
def __gather_metrics_from_api_for_container( self, container, k8s_extra ):
""" Query the Docker API for container metrics
@param: container - name of the container to query
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
result = self.__metric_fetcher.get_metrics(container)
if result is not None:
self.__log_json_metrics( container, result, k8s_extra )
def __build_k8s_controller_info( self, pod ):
"""
Builds a dict containing information about the controller settings for a given pod
@param: pod - a PodInfo object containing basic information (namespace/name) about the pod to query
@return: a dict containing the controller name for the controller running
the specified pod, or an empty dict if the pod is not part of a controller
"""
k8s_extra = {}
if pod is not None:
# default key and controlle name
key = 'k8s-controller'
name = 'none'
# check if we have a controller, and if so use it
controller = pod.controller
if controller is not None:
# use one of the predefined key if this is a controller kind we know about
if controller.kind in _CONTROLLER_KEYS:
key = _CONTROLLER_KEYS[controller.kind]
name = controller.name
k8s_extra = {
key: name
}
return k8s_extra
def __get_k8s_controller_info( self, container ):
"""
Gets information about the kubernetes controller of a given container
@param: container - a dict containing information about a container, returned by _get_containers
"""
k8s_info = container.get( 'k8s_info', {} )
pod = k8s_info.get( 'pod_info', None )
if pod is None:
return None
return self.__build_k8s_controller_info( pod )
def __get_cluster_info( self, cluster_name ):
""" returns a dict of values about the cluster """
cluster_info = {}
if self.__include_controller_info and cluster_name is not None:
cluster_info['k8s-cluster'] = cluster_name
return cluster_info
def __gather_metrics_from_api( self, containers, cluster_name ):
cluster_info = self.__get_cluster_info( cluster_name )
for cid, info in containers.iteritems():
self.__metric_fetcher.prefetch_metrics(info['name'])
for cid, info in containers.iteritems():
k8s_extra = {}
if self.__include_controller_info:
k8s_extra = self.__get_k8s_controller_info( info )
if k8s_extra is not None:
k8s_extra.update( cluster_info )
k8s_extra.update({'pod_uid': info['name']})
self.__gather_metrics_from_api_for_container( info['name'], k8s_extra )
def __gather_k8s_metrics_for_node( self, node, extra ):
"""
Gathers metrics from a Kubelet API response for a specific pod
@param: node_metrics - A JSON Object from a response to a Kubelet API query
@param: extra - Extra fields to append to each metric
"""
name = node.get( "nodeName", None )
if name is None:
return
node_extra = {
'node_name': name
}
node_extra.update(extra)
for key, metrics in node.iteritems():
if key == 'network':
self.__log_metrics( name, self.__k8s_node_network_metrics, metrics, node_extra )
def __gather_k8s_metrics_for_pod( self, pod_metrics, pod_info, k8s_extra ):
"""
Gathers metrics from a Kubelet API response for a specific pod
@param: pod_metrics - A JSON Object from a response to a Kubelet API query
@param: pod_info - A PodInfo structure regarding the pod in question
@param: k8s_extra - Extra k8s specific fields to append to each metric
"""
extra = {
'pod_uid': pod_info.uid
}
extra.update( k8s_extra )
for key, metrics in pod_metrics.iteritems():
if key == 'network':
self.__log_metrics( pod_info.uid, self.__k8s_pod_network_metrics, metrics, extra )
def __gather_k8s_metrics_from_kubelet( self, containers, kubelet_api, cluster_name ):
"""
Gathers k8s metrics from a response to a stats query of the Kubelet API
@param: containers - a dict returned by _get_containers with info for all containers we are interested in
@param: kubelet_api - a KubeletApi object for querying the KubeletApi
@param: cluster_name - the name of the k8s cluster
"""
cluster_info = self.__get_cluster_info( cluster_name )
# get set of pods we are interested in querying
pod_info = {}
for cid, info in containers.iteritems():
k8s_info = info.get( 'k8s_info', {} )
pod = k8s_info.get( 'pod_info', None )
if pod is None:
continue
pod_info[pod.uid] = pod
try:
stats = kubelet_api.query_stats()
node = stats.get( 'node', {} )
if node:
self.__gather_k8s_metrics_for_node( node, cluster_info )
pods = stats.get( 'pods', [] )
# process pod stats, skipping any that are not in our list
# of pod_info
for pod in pods:
pod_ref = pod.get( 'podRef', {} )
pod_uid = pod_ref.get( 'uid', '<invalid>' )
if pod_uid not in pod_info:
continue
info = pod_info[pod_uid]
controller_info = {}
if self.__include_controller_info:
controller_info = self.__build_k8s_controller_info( info )
controller_info.update( cluster_info )
self.__gather_k8s_metrics_for_pod( pod, info, controller_info )
except KubeletApiException, e:
self._logger.warning( "Error querying kubelet API: %s" % str( e ),
limit_once_per_x_secs=300,
limit_key='kubelet-api-query' )
def gather_sample( self ):
k8s_cache = None
if self.__container_checker:
k8s_cache = self.__container_checker.k8s_cache
cluster_name = None
if k8s_cache is not None:
cluster_name = k8s_cache.get_cluster_name()
# gather metrics
containers = None
if self.__report_container_metrics:
containers = _get_containers(self.__client, ignore_container=None, glob_list=self.__glob_list,
k8s_cache=k8s_cache, k8s_include_by_default=self.__include_all,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
try:
if containers:
if self.__report_container_metrics:
self._logger.log(scalyr_logging.DEBUG_LEVEL_3, 'Attempting to retrieve metrics for %d containers' % len(containers))
self.__gather_metrics_from_api( containers, cluster_name )
if self.__report_k8s_metrics:
self._logger.log(scalyr_logging.DEBUG_LEVEL_3, 'Attempting to retrieve k8s metrics %d' % len(containers))
self.__gather_k8s_metrics_from_kubelet( containers, self.__kubelet_api, cluster_name )
except Exception, e:
self._logger.exception( "Unexpected error logging metrics: %s" %( str(e) ) )
if self.__gather_k8s_pod_info:
cluster_info = self.__get_cluster_info( cluster_name )
containers = _get_containers( self.__client, only_running_containers=False, k8s_cache=k8s_cache,
k8s_include_by_default=self.__include_all,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
for cid, info in containers.iteritems():
try:
extra = info.get( 'k8s_info', {} )
extra['status'] = info.get('status', 'unknown')
if self.__include_controller_info:
controller = self.__get_k8s_controller_info( info )
extra.update( controller )
extra.update( cluster_info )
namespace = extra.get( 'pod_namespace', 'invalid-namespace' )
self._logger.emit_value( 'docker.container_name', info['name'], extra, monitor_id_override="namespace:%s" % namespace )
except Exception, e:
self._logger.error( "Error logging container information for %s: %s" % (_get_short_cid( cid ), str( e )) )
if self.__container_checker:
namespaces = self.__container_checker.get_k8s_data()
for namespace, pods in namespaces.iteritems():
for pod_name, pod in pods.iteritems():
try:
extra = { 'pod_uid': pod.uid,
'pod_namespace': pod.namespace,
'node_name': pod.node_name }
if self.__include_controller_info:
controller_info = self.__build_k8s_controller_info( pod )
if controller_info:
extra.update( controller_info )
extra.update( cluster_info )
self._logger.emit_value( 'k8s.pod', pod.name, extra, monitor_id_override="namespace:%s" % pod.namespace )
except Exception, e:
self._logger.error( "Error logging pod information for %s: %s" % (pod.name, str( e )) )
def run( self ):
# workaround a multithread initialization problem with time.strptime
# see: http://code-trick.com/python-bug-attribute-error-_strptime/
# we can ignore the result
tm = time.strptime( "2016-08-29", "%Y-%m-%d" )
if self.__container_checker:
self.__container_checker.start()
try:
# check to see if the user has manually specified a cluster name, and if so then
# force enable 'Starbuck' features
if self.__container_checker and self.__container_checker.k8s_cache.get_cluster_name() is not None:
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "Cluster name detected, enabling k8s metric reporting and controller information" )
self.__include_controller_info = True
self.__report_k8s_metrics = self.__report_container_metrics
if self.__report_k8s_metrics:
k8s = KubernetesApi(k8s_api_url=self.__k8s_api_url)
self.__kubelet_api = KubeletApi( k8s )
except Exception, e:
self._logger.error( "Error creating KubeletApi object. Kubernetes metrics will not be logged: %s" % str( e ) )
self.__report_k8s_metrics = False
global_log.info('kubernetes_monitor parameters: ignoring namespaces: %s, report_controllers %s, '
'report_metrics %s' % (','.join(self.__namespaces_to_ignore),
str(self.__include_controller_info),
str(self.__report_container_metrics)))
ScalyrMonitor.run( self )
def stop(self, wait_on_join=True, join_timeout=5):
#stop the main server
ScalyrMonitor.stop( self, wait_on_join=wait_on_join, join_timeout=join_timeout )
if self.__container_checker is not None:
self.__container_checker.stop( wait_on_join, join_timeout )
if self.__metric_fetcher is not None:
self.__metric_fetcher.stop()
| 51.106667
| 523
| 0.634251
|
071987ac59a87f2dda87de96212d27e8bcff982c
| 11,259
|
py
|
Python
|
flexget/plugins/output/subtitles_subliminal.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | 1
|
2018-05-02T21:14:50.000Z
|
2018-05-02T21:14:50.000Z
|
flexget/plugins/output/subtitles_subliminal.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | null | null | null |
flexget/plugins/output/subtitles_subliminal.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import collections
import logging
import os
import sys
import tempfile
from flexget import plugin
from flexget.event import event
log = logging.getLogger('subtitles')
try:
from subliminal.extensions import provider_manager
PROVIDERS = provider_manager.names()
except ImportError:
PROVIDERS = [
'opensubtitles',
'thesubdb',
'podnapisi',
'addic7ed',
'tvsubtitles'
]
AUTHENTICATION_SCHEMA = dict((provider, {'type': 'object'}) for provider in PROVIDERS)
class PluginSubliminal(object):
"""
Search and download subtitles using Subliminal by Antoine Bertin
(https://pypi.python.org/pypi/subliminal).
Example (complete task)::
subs:
find:
path:
- d:\media\incoming
regexp: '.*\.(avi|mkv|mp4)$'
recursive: yes
accept_all: yes
subliminal:
languages:
- ita
alternatives:
- eng
exact_match: no
providers: addic7ed, opensubtitles
single: no
directory: /disk/subtitles
hearing_impaired: yes
authentication:
addic7ed:
username: myuser
passsword: mypassword
"""
schema = {
'type': 'object',
'properties': {
'languages': {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1},
'alternatives': {'type': 'array', 'items': {'type': 'string'}},
'exact_match': {'type': 'boolean', 'default': True},
'providers': {'type': 'array', 'items': {'type': 'string', 'enum': PROVIDERS}},
'single': {'type': 'boolean', 'default': True},
'directory': {'type:': 'string'},
'hearing_impaired': {'type': 'boolean', 'default': False},
'authentication': {'type': 'object', 'properties': AUTHENTICATION_SCHEMA},
},
'required': ['languages'],
'additionalProperties': False
}
def on_task_start(self, task, config):
if list(sys.version_info) < [2, 7]:
raise plugin.DependencyError('subliminal', 'Python 2.7', 'Subliminal plugin requires python 2.7.')
try:
import babelfish
except ImportError as e:
log.debug('Error importing Babelfish: %s', e)
raise plugin.DependencyError('subliminal', 'babelfish', 'Babelfish module required. ImportError: %s' % e)
try:
import subliminal
except ImportError as e:
log.debug('Error importing Subliminal: %s', e)
raise plugin.DependencyError('subliminal', 'subliminal', 'Subliminal module required. ImportError: %s' % e)
def on_task_output(self, task, config):
"""
Configuration::
subliminal:
languages: List of languages (as IETF codes) in order of preference. At least one is required.
alternatives: List of second-choice languages; subs will be downloaded but entries rejected.
exact_match: Use file hash only to search for subs, otherwise Subliminal will try to guess by filename.
providers: List of providers from where to download subtitles.
single: Download subtitles in single mode (no language code added to subtitle filename).
directory: Path to directory where to save the subtitles, default is next to the video.
hearing_impaired: Prefer subtitles for the hearing impaired when available
authentication: >
Dictionary of configuration options for different providers.
Keys correspond to provider names, and values are dictionaries, usually specifying `username` and
`password`.
"""
if not task.accepted:
log.debug('nothing accepted, aborting')
return
from babelfish import Language
from dogpile.cache.exception import RegionAlreadyConfigured
import subliminal
from subliminal.cli import MutexLock
from subliminal.score import episode_scores, movie_scores
try:
subliminal.region.configure('dogpile.cache.dbm',
arguments={
'filename': os.path.join(tempfile.gettempdir(), 'cachefile.dbm'),
'lock_factory': MutexLock,
})
except RegionAlreadyConfigured:
pass
# Let subliminal be more verbose if our logger is set to DEBUG
if log.isEnabledFor(logging.DEBUG):
logging.getLogger("subliminal").setLevel(logging.INFO)
else:
logging.getLogger("subliminal").setLevel(logging.CRITICAL)
logging.getLogger("dogpile").setLevel(logging.CRITICAL)
logging.getLogger("enzyme").setLevel(logging.WARNING)
try:
languages = set([Language.fromietf(s) for s in config.get('languages', [])])
alternative_languages = set([Language.fromietf(s) for s in config.get('alternatives', [])])
except ValueError as e:
raise plugin.PluginError(e)
# keep all downloaded subtitles and save to disk when done (no need to write every time)
downloaded_subtitles = collections.defaultdict(list)
providers_list = config.get('providers', None)
provider_configs = config.get('authentication', None)
# test if only one language was provided, if so we will download in single mode
# (aka no language code added to subtitle filename)
# unless we are forced not to by configuration
# if we pass 'yes' for single in configuration but choose more than one language
# we ignore the configuration and add the language code to the
# potentially downloaded files
single_mode = config.get('single', '') and len(languages | alternative_languages) <= 1
hearing_impaired = config.get('hearing_impaired', False)
with subliminal.core.ProviderPool(providers=providers_list, provider_configs=provider_configs) as provider_pool:
for entry in task.accepted:
if 'location' not in entry:
log.warning('Cannot act on entries that do not represent a local file.')
continue
if not os.path.exists(entry['location']):
entry.fail('file not found: %s' % entry['location'])
continue
if '$RECYCLE.BIN' in entry['location']: # ignore deleted files in Windows shares
continue
try:
entry_languages = set(entry.get('subtitle_languages', [])) or languages
video = subliminal.scan_video(entry['location'])
# use metadata refiner to get mkv metadata
refiner = ('metadata',)
subliminal.core.refine(video, episode_refiners=refiner, movie_refiners=refiner)
existing_subtitles = set(subliminal.core.search_external_subtitles(entry['location']).values())
video.subtitle_languages |= existing_subtitles
if isinstance(video, subliminal.Episode):
title = video.series
hash_scores = episode_scores['hash']
else:
title = video.title
hash_scores = movie_scores['hash']
log.info('Name computed for %s was %s', entry['location'], title)
msc = hash_scores if config['exact_match'] else 0
if entry_languages.issubset(video.subtitle_languages) or (single_mode and video.subtitle_languages):
log.debug('All preferred languages already exist for "%s"', entry['title'])
entry['subtitles_missing'] = set()
continue # subs for preferred lang(s) already exists
else:
# Gather the subtitles for the alternative languages too, to avoid needing to search the sites
# again. They'll just be ignored if the main languages are found.
all_subtitles = provider_pool.list_subtitles(video, entry_languages | alternative_languages)
subtitles = provider_pool.download_best_subtitles(all_subtitles, video, entry_languages,
min_score=msc,
hearing_impaired=hearing_impaired)
if subtitles:
downloaded_subtitles[video].extend(subtitles)
log.info('Subtitles found for %s', entry['location'])
else:
# only try to download for alternatives that aren't alread downloaded
subtitles = provider_pool.download_best_subtitles(all_subtitles, video,
alternative_languages, min_score=msc,
hearing_impaired=hearing_impaired)
if subtitles:
downloaded_subtitles[video].extend(subtitles)
entry.fail('subtitles found for a second-choice language.')
else:
entry.fail('cannot find any subtitles for now.')
downloaded_languages = set([Language.fromietf(str(l.language))
for l in subtitles])
if entry_languages:
entry['subtitles_missing'] = entry_languages - downloaded_languages
if len(entry['subtitles_missing']) > 0:
entry.fail('Subtitles for all primary languages not found')
except ValueError as e:
log.error('subliminal error: %s', e)
entry.fail()
if downloaded_subtitles:
if task.options.test:
log.verbose('Test mode. Found subtitles:')
# save subtitles to disk
for video, subtitle in downloaded_subtitles.items():
if subtitle:
_directory = config.get('directory')
if _directory:
_directory = os.path.expanduser(_directory)
if task.options.test:
log.verbose(' FOUND LANGUAGES %s for %s', [str(l.language) for l in subtitle], video.name)
continue
subliminal.save_subtitles(video, subtitle, single=single_mode, directory=_directory)
@event('plugin.register')
def register_plugin():
plugin.register(PluginSubliminal, 'subliminal', api_ver=2)
| 48.530172
| 120
| 0.564171
|
25377e975d806717769df5dbd3ddb7e2388846b8
| 633
|
py
|
Python
|
Chapter05/decorator_set.py
|
ibiscum/Learning-Concurrency-in-Python
|
d3f0320ad2a80c46b37de331bf335b80df0d3ed9
|
[
"MIT"
] | null | null | null |
Chapter05/decorator_set.py
|
ibiscum/Learning-Concurrency-in-Python
|
d3f0320ad2a80c46b37de331bf335b80df0d3ed9
|
[
"MIT"
] | null | null | null |
Chapter05/decorator_set.py
|
ibiscum/Learning-Concurrency-in-Python
|
d3f0320ad2a80c46b37de331bf335b80df0d3ed9
|
[
"MIT"
] | null | null | null |
def locked_method(method):
"""Method decorator. Requires a lock object at self._lock"""
def new_method(self, *args, **kwargs):
with self._lock:
return method(self, *args, **kwargs)
return new_method
class DecoratorLockedSet(set):
def __init__(self, *args, **kwargs):
self._lock = Lock()
super(DecoratorLockedSet, self).__init__(*args, **kwargs)
@locked_method
def add(self, *args, **kwargs):
return super(DecoratorLockedSet, self).add(elem)
@locked_method
def remove(self, *args, **kwargs):
return super(DecoratorLockedSet, self).remove(elem)
| 27.521739
| 65
| 0.647709
|
18a053c19e1b2d061b7ab7bea1cc38d95c392c0b
| 413
|
py
|
Python
|
backend/indep_mobile_32141/wsgi.py
|
crowdbotics-apps/indep-mobile-32141
|
1c6a7799b449cd247f4c1ac58a4b43a6647f914e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/indep_mobile_32141/wsgi.py
|
crowdbotics-apps/indep-mobile-32141
|
1c6a7799b449cd247f4c1ac58a4b43a6647f914e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/indep_mobile_32141/wsgi.py
|
crowdbotics-apps/indep-mobile-32141
|
1c6a7799b449cd247f4c1ac58a4b43a6647f914e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for indep_mobile_32141 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'indep_mobile_32141.settings')
application = get_wsgi_application()
| 24.294118
| 78
| 0.79661
|
94c89aec0f9a1b838b9408a5ca7d08b0be6f586f
| 1,381
|
py
|
Python
|
hw_15/embed.py
|
coinflip112/deep_reinforcment_learning
|
b7290b4be915e331c5aecb222c82c538cf50ef57
|
[
"MIT"
] | null | null | null |
hw_15/embed.py
|
coinflip112/deep_reinforcment_learning
|
b7290b4be915e331c5aecb222c82c538cf50ef57
|
[
"MIT"
] | null | null | null |
hw_15/embed.py
|
coinflip112/deep_reinforcment_learning
|
b7290b4be915e331c5aecb222c82c538cf50ef57
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Embed compressed data into a Python module."""
__version__ = "1.1.0"
__author__ = ("Milan Straka <straka@ufal.mff.cuni.cz>",)
import argparse
import base64
import io
import os
import sys
import tarfile
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument(
"--output",
default="embedded_baseline.py",
type=str,
help="Name of output Python file with embedded data.",
)
args = parser.parse_args()
print("Compressing given paths...", file=sys.stderr, end="")
tar_data = io.BytesIO()
with tarfile.open(fileobj=tar_data, mode="w:xz") as tar_file:
for path in [
Path("bipedal_walker_245.49905788070714", filename)
for filename in os.listdir("bipedal_walker_245.49905788070714")
]:
tar_file.add(path)
print("done.", file=sys.stderr)
with open(args.output, "w") as output_file:
print(
"""#!/usr/bin/env python3
def extract():
import base64
import io
import tarfile
data = """,
base64.b85encode(tar_data.getbuffer()),
"""
with io.BytesIO(base64.b85decode(data)) as tar_data:
with tarfile.open(fileobj=tar_data, mode="r") as tar_file:
tar_file.extractall()
extract()""",
file=output_file,
sep="",
)
print(
"Output file `{}` with embedded data created.".format(args.output), file=sys.stderr
)
| 24.660714
| 87
| 0.664012
|
e2f92c07c25bcc67308a2cb5dcae80b8b541482b
| 35
|
py
|
Python
|
junn/common/__init__.py
|
modsim/junn
|
a40423b98c6a3739dd0b2ba02d546a5db91f9215
|
[
"BSD-2-Clause"
] | null | null | null |
junn/common/__init__.py
|
modsim/junn
|
a40423b98c6a3739dd0b2ba02d546a5db91f9215
|
[
"BSD-2-Clause"
] | null | null | null |
junn/common/__init__.py
|
modsim/junn
|
a40423b98c6a3739dd0b2ba02d546a5db91f9215
|
[
"BSD-2-Clause"
] | null | null | null |
"""Common functionality module."""
| 17.5
| 34
| 0.714286
|
7fa56427b806defda1e661b69b5987b131d93397
| 17,037
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20171101/route_filter.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20171101/route_filter.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20171101/route_filter.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteFilterArgs', 'RouteFilter']
@pulumi.input_type
class RouteFilterArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteFilter resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]] peerings: A collection of references to express route circuit peerings.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if peerings is not None:
pulumi.set(__self__, "peerings", peerings)
if route_filter_name is not None:
pulumi.set(__self__, "route_filter_name", route_filter_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def peerings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@peerings.setter
def peerings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]):
pulumi.set(self, "peerings", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route filter.
"""
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteFilter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]] peerings: A collection of references to express route circuit peerings.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param RouteFilterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["peerings"] = peerings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter_name"] = route_filter_name
__props__.__dict__["rules"] = rules
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-native:network:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20201101:RouteFilter")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilter, __self__).__init__(
'azure-native:network/v20171101:RouteFilter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilter':
"""
Get an existing RouteFilter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteFilter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RouteFilterRuleResponse']]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 54.085714
| 4,223
| 0.680871
|
d30626ebfae3d7032c8dd1caa66ccac60f3840bc
| 1,469
|
py
|
Python
|
image_generation/read_schematic.py
|
mihirp1998/blender_emblang
|
4b7092b8f4dfdc5240ed8ecf8e18ec75b9e0141c
|
[
"BSD-3-Clause"
] | null | null | null |
image_generation/read_schematic.py
|
mihirp1998/blender_emblang
|
4b7092b8f4dfdc5240ed8ecf8e18ec75b9e0141c
|
[
"BSD-3-Clause"
] | null | null | null |
image_generation/read_schematic.py
|
mihirp1998/blender_emblang
|
4b7092b8f4dfdc5240ed8ecf8e18ec75b9e0141c
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from nbtschematic import SchematicFile
from mpl_toolkits.mplot3d import Axes3D
res = 64
vpath = 'output/blendfiles/train/CLEVR_new_000001.schematic'
vpath = '../output/CLEVR_' + str(res) + '_OBJ_FULL/voxels/train/CLEVR_new_00000%d.schematic'
for img in range(10):
sf = SchematicFile.load(vpath%img)
blocks = np.frombuffer(sf.blocks, dtype=sf.blocks.dtype)
data = np.frombuffer(sf.data, dtype=sf.data.dtype)
blocks = blocks.reshape((res,res,res))
# np.save('voxel.npy',blocks)
vals = np.unique(blocks)
print(vals)
colors = np.empty(blocks.shape, dtype=object)
colorname = ['red','blue','green','black','yellow','cyan','magenta']
for i,c in zip(vals, colorname):
colors[blocks == i] = c
# from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
# box = [108, 93, 0, 19, 20, 19]
# Z = np.array([[108,93,0],[108,93,19],[108,113,0],[127,93,0],[108,113,19],[127,113,0],[127,93,19],[127,113,19]])
# verts = [[Z[0],Z[1],Z[2],Z[3]],
# [Z[4],Z[5],Z[6],Z[7]],
# [Z[0],Z[1],Z[5],Z[4]],
# [Z[2],Z[3],Z[7],Z[6]],
# [Z[1],Z[2],Z[6],Z[5]],
# [Z[4],Z[7],Z[3],Z[0]]]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.voxels(blocks, facecolors=colors)
# ax.scatter3D(Z[:, 0], Z[:, 1], Z[:, 2])
# ax.add_collection3d(Poly3DCollection(verts, facecolors='cyan', linewidths=1, edgecolors='r', alpha=.25))
plt.show()
plt.close()
| 34.97619
| 114
| 0.655548
|
896c543aa30ffeec6f29997b5b73ee34d1b50833
| 1,170
|
py
|
Python
|
esmf_regrid/tests/unit/esmf_regridder/test_GridInfo.py
|
trexfeathers/iris-esmf-regrid
|
8d7121b3c099ef3aeca47e5cb4d581882322180f
|
[
"BSD-3-Clause"
] | null | null | null |
esmf_regrid/tests/unit/esmf_regridder/test_GridInfo.py
|
trexfeathers/iris-esmf-regrid
|
8d7121b3c099ef3aeca47e5cb4d581882322180f
|
[
"BSD-3-Clause"
] | 1
|
2021-05-18T13:44:16.000Z
|
2021-05-18T13:44:16.000Z
|
esmf_regrid/tests/unit/esmf_regridder/test_GridInfo.py
|
trexfeathers/iris-esmf-regrid
|
8d7121b3c099ef3aeca47e5cb4d581882322180f
|
[
"BSD-3-Clause"
] | null | null | null |
"""Unit tests for :class:`esmf_regrid.esmf_regridder.GridInfo`."""
import numpy as np
from esmf_regrid.esmf_regridder import GridInfo
import esmf_regrid.tests as tests
def _make_small_grid_args():
small_x = 2
small_y = 3
small_grid_lon = np.array(range(small_x)) / (small_x + 1)
small_grid_lat = np.array(range(small_y)) * 2 / (small_y + 1)
small_grid_lon_bounds = np.array(range(small_x + 1)) / (small_x + 1)
small_grid_lat_bounds = np.array(range(small_y + 1)) * 2 / (small_y + 1)
return (
small_grid_lon,
small_grid_lat,
small_grid_lon_bounds,
small_grid_lat_bounds,
)
def test_make_grid():
"""Basic test for :meth:`~esmf_regrid.esmf_regridder.GridInfo.make_esmf_field`."""
lon, lat, lon_bounds, lat_bounds = _make_small_grid_args()
grid = GridInfo(lon, lat, lon_bounds, lat_bounds)
esmf_grid = grid.make_esmf_field()
esmf_grid.data[:] = 0
relative_path = ("esmf_regridder", "test_GridInfo", "small_grid.txt")
fname = tests.get_result_path(relative_path)
with open(fname) as fi:
expected_repr = fi.read()
assert esmf_grid.__repr__() == expected_repr
| 30.789474
| 86
| 0.688889
|
7b61a52deadd07a3b2d9f3624f16931418f6ecb5
| 2,391
|
py
|
Python
|
analyze_results/visualize.py
|
robinhenry/gym-anm-exp
|
a24aa069f060ae6c8114f735438459b63afb98de
|
[
"MIT"
] | 5
|
2021-06-19T14:27:31.000Z
|
2021-12-23T12:04:49.000Z
|
analyze_results/visualize.py
|
robinhenry/gym-anm-exp
|
a24aa069f060ae6c8114f735438459b63afb98de
|
[
"MIT"
] | null | null | null |
analyze_results/visualize.py
|
robinhenry/gym-anm-exp
|
a24aa069f060ae6c8114f735438459b63afb98de
|
[
"MIT"
] | 2
|
2021-04-09T19:20:26.000Z
|
2021-09-09T04:53:22.000Z
|
"""
This script can be used to visualize a trained-agent interacting
with an environment, assuming the environment supports rendering.
Usage
-----
To visualize a trained agent <ALGO> (SAC or PPO) saved in folder <PATH>,
for <TIMESTEPS> timesteps, pausing for <SLEEP> seconds between each timesteps
(to make it easier to visualize): ::
python -m analyze_results.visualize <ALGO> -p <PATH> -s <SLEEP> -T <TIMESTEPS>
"""
import os
import time
import argparse
from stable_baselines3.sac import SAC
from stable_baselines3.ppo import PPO
from rl_agents.hyperparameters import ENV_ID
from rl_agents.utils import load_visualization_env
def visualize(path, algo, T, sleep_time):
"""
Visualize a trained agent.
Parameters
----------
path : str
The path to the folder in which the agent is saved.
algo : :py:class:`stable_baselines3.sac.SAC` or :py:class:`stable_baselines3.ppo.PPO`
The class of the trained RL agent.
T : int
The number of timesteps to simulate.
sleep_time : float
The amount of seconds to sleep between timesteps.
"""
# Load agent and environment.
model = algo.load(os.path.join(path, 'best_model'))
env = load_visualization_env(ENV_ID, os.path.join(path, 'training_vec_env'), 1)
# Enjoy trained agent
obs = env.reset()
done, state = False, None
for i in range(T):
action, state = model.predict(obs, state=state, deterministic=True)
obs, _, dones, info = env.step(action)
env.render()
time.sleep(sleep_time)
env.close()
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("agent", type=str, help="Class of agent to visualize")
parser.add_argument("--path", '-p', type=str, help='The path to the folder containing the trained agent')
parser.add_argument("--sleep", '-s', type=float, default=0.5, help='Sleep time between rendering updates')
parser.add_argument("-T", type=int, default=int(1e4), help='Number of timesteps to render')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
if args.agent == 'PPO':
ALGO = PPO
elif args.agent == 'SAC':
ALGO = SAC
else:
raise ValueError('Unimplemented agent ' + args.agent)
visualize(args.path, ALGO, args.T, args.sleep)
print('Done.')
| 31.051948
| 110
| 0.676704
|
4ffc7d8cf1f4de8293ee5b9f197ad111b33cea46
| 1,845
|
py
|
Python
|
watchman/tests/integration/test_perms.py
|
mathewhodson/watchman
|
7e831f2fe71deb6429819aa1f7856a5335b0111e
|
[
"Apache-2.0"
] | null | null | null |
watchman/tests/integration/test_perms.py
|
mathewhodson/watchman
|
7e831f2fe71deb6429819aa1f7856a5335b0111e
|
[
"Apache-2.0"
] | null | null | null |
watchman/tests/integration/test_perms.py
|
mathewhodson/watchman
|
7e831f2fe71deb6429819aa1f7856a5335b0111e
|
[
"Apache-2.0"
] | null | null | null |
# vim:ts=4:sw=4:et:
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import pywatchman
import WatchmanTestCase
try:
import unittest2 as unittest
except ImportError:
import unittest
def is_root():
return hasattr(os, "geteuid") and os.geteuid() == 0
@WatchmanTestCase.expand_matrix
class TestPerms(WatchmanTestCase.WatchmanTestCase):
def checkOSApplicability(self):
if os.name == "nt":
self.skipTest("N/A on Windows")
@unittest.skipIf(is_root(), "N/A if root")
def test_permDeniedSubDir(self):
root = self.mkdtemp()
subdir = os.path.join(root, "subdir")
os.mkdir(subdir)
os.chmod(subdir, 0)
self.watchmanCommand("watch", root)
res = self.watchmanCommand(
"query", root, {"expression": ["exists"], "fields": ["name"]}
)
self.assertRegex(res["warning"], "Marking this portion of the tree deleted")
@unittest.skipIf(is_root(), "N/A if root")
def test_permDeniedRoot(self):
root = self.mkdtemp()
os.chmod(root, 0)
with self.assertRaisesRegex(pywatchman.CommandError, "(open|opendir|realpath)"):
self.watchmanCommand("watch", root)
| 31.271186
| 88
| 0.687263
|
4ec95c3d6a3bc774133347b7c98c8540d2e8770d
| 11,550
|
py
|
Python
|
2D/2DTruncated/DoubleG-SWG.py
|
LiuYangMage/GenerativeEnsembleRegression
|
90d9649b2566a69f4add5ab1c147293c7eb5e7c8
|
[
"MIT"
] | null | null | null |
2D/2DTruncated/DoubleG-SWG.py
|
LiuYangMage/GenerativeEnsembleRegression
|
90d9649b2566a69f4add5ab1c147293c7eb5e7c8
|
[
"MIT"
] | null | null | null |
2D/2DTruncated/DoubleG-SWG.py
|
LiuYangMage/GenerativeEnsembleRegression
|
90d9649b2566a69f4add5ab1c147293c7eb5e7c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import os
from scipy import stats
import argparse
# In[2]:
parser = argparse.ArgumentParser(description='GAN-SODE')
parser.add_argument('--GPU', type=int, default=0, help='GPU ID')
parser.add_argument('-dim', '--dim', type = int, default= 2)
parser.add_argument('--GAN', choices=['SW','WGAN-GP'], default='WGAN-GP')
parser.add_argument('-trs', '--train_size', type=int, default= 100000)
parser.add_argument('-its', '--iterations', type=int, default=200000)
parser.add_argument('--bs', type=int, default= 1000)
parser.add_argument('-res', '--restore', type=int, default=-1)
parser.add_argument('--seed',type=int, default=0, help='random seed')
parser.add_argument('--lasso', type=float, default = 0.0, help='use L1 penalty on the terms, not for nn')
parser.add_argument('--drift', choices=['4term', 'nn'], default='4term')
parser.add_argument('--diff', choices=['const'], default='const')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--float64', action= 'store_true')
parser.add_argument('--grad', action= 'store_true')
parser.add_argument('--frames', type=int, default=3)
args = parser.parse_args()
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' # see issue #152
os.environ['CUDA_VISIBLE_DEVICES']= str(args.GPU)
bs = args.bs
seed = args.seed
lamda = 0.1
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
if args.float64:
dtype = tf.float64
else:
dtype = tf.float32
dim = args.dim
zdim = args.dim
dt = 0.01
if args.frames == 7:
steps = [0, 10, 20, 30, 50, 70, 100]
elif args.frames == 3:
steps = [20, 50, 100]
ref_steps = []
total_steps = 100
frames = len(steps)
ref_frames = len(ref_steps)
ref = {i: np.load('data{}D-s{}/ref_{}.npz'.format(dim,seed,i))['ref'] for i in ref_steps + steps}
Qdata = [ref[A] for A in steps]
def feed_NN(X, W, b, act = tf.nn.tanh):
A = X
L = len(W)
for i in range(L-1):
A = act(tf.add(tf.matmul(A, W[i]), b[i]))
return tf.add(tf.matmul(A, W[-1]), b[-1])
def initgenerator(X, W, b):
y = feed_NN(X,W,b, act= tf.nn.tanh)
return y
# In[4]:
def fun_diff(x):
if args.diff == 'const':
diff_raw = tf.concat([tf.nn.softplus(s_W[0]), s_W[1],
tf.zeros((1,), dtype = dtype), tf.nn.softplus(s_W[2])], axis = 0)
diff = tf.reshape(diff_raw, [2,2])
else:
raise NotImplementedError
return diff
def fun_drift(x):
if args.drift == '4term':
po = -(x[:,0] + d_W[0])**2 * (x[:,1] + d_W[1])**2 \
-(x[:,0] + d_W[2])**2 * (x[:,1] + d_W[3])**2
drift = tf.gradients(po, x)[0]
elif args.drift == 'nn':
po = feed_NN(x, d_W, d_b, act= tf.nn.tanh)
drift = tf.gradients(po, x)[0]
else:
raise NotImplementedError
return drift
def generator(x, steps, dt, bs = bs):
'''
x shape: [bs, dim]
'''
u = [None for i in range(steps + 1)]
u[0] = x
print(0, end = ' ', flush = True)
for i in range(steps):
drift = fun_drift(u[i])
diff = fun_diff(u[i])
u[i+1] = u[i] + dt * drift + np.sqrt(dt) * tf.matmul(tf.random.normal([bs, dim], mean=0.0, stddev=1.0, dtype = dtype), diff)
print(i+1, end = ' ', flush = True)
return u[-1], u
def save_drift(title, dim1, dim2, sc = 0.1):
current_drift_x, current_drift_ref, current_drift = sess.run([vis_drift_x, vis_drift_ref, vis_drift])
np.savez(savedir + '/' + title + '.npz', x = current_drift_x,
drift = current_drift,
drift_ref = current_drift_ref)
def save_sample(title, steps, repeat = 100):
init = []
for s in steps:
init.append(np.concatenate([sess.run(Gs[s]) for i in range(repeat)], axis = 0))
np.savez(savedir + '/' + title + '.npz', steps = np.array(steps), Gdata = np.array(init))
class Net(object):
def __init__(self, W, b):
self.W = W
self.b = b
def __call__(self, x):
return feed_NN(x, self.W, self.b, act= tf.nn.leaky_relu)
def gradient_pernalty(G, T, f, batch_size = bs):
zu = tf.random_uniform([batch_size,1], minval=0, maxval=1, dtype=dtype)
D_interpolates = zu * T + (1 - zu) * G
D_disc_interpolates = f(D_interpolates)
D_gradients = tf.gradients(D_disc_interpolates, [D_interpolates])[0]
D_slopes = tf.norm(D_gradients, axis = 1)
D_gradient_penalty = tf.reduce_mean((D_slopes-1)**2)
return D_gradient_penalty
def mask(X):
maskX = X[:,0] < 0.5
maskedX = tf.boolean_mask(X, maskX)
return maskX, maskedX
layer_dims = [zdim] + 3*[128] + [dim]
L = len(layer_dims)
G_W = [tf.get_variable('G_W_{}'.format(l), [layer_dims[l-1], layer_dims[l]], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) for l in range(1, L)]
G_b = [tf.get_variable('G_b_{}'.format(l), [1,layer_dims[l]], dtype=dtype, initializer=tf.zeros_initializer()) for l in range(1, L)]
if args.drift == '4term':
d_W = [tf.Variable(np.random.normal(0,1,(1,)), dtype = dtype) for i in range(4)]
d_b = []
elif args.drift == 'nn':
layer_dims = [dim] + 3*[128] + [1]
L = len(layer_dims)
d_W = [tf.get_variable('d_W_{}'.format(l), [layer_dims[l-1], layer_dims[l]], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) for l in range(1, L)]
d_b = [tf.get_variable('d_b_{}'.format(l), [1,layer_dims[l]], dtype=dtype, initializer=tf.zeros_initializer()) for l in range(1, L)]
s_W = [tf.Variable(np.zeros((1,)), dtype = dtype) for i in range(3)]
Qs = [tf.placeholder(dtype, [bs,dim]) for i in range(frames)]
Zs = tf.random.normal([bs, zdim], 0, 1, dtype=dtype)
Is = initgenerator(Zs, G_W, G_b)
_, Gs = generator(Is, total_steps, dt, bs)
num_projections = 1000
loss_PQ = [None for i in range(frames)]
maskG = [None for i in range(frames)]
maskT = [None for i in range(frames)]
maskedG = [None for i in range(frames)]
maskedT = [None for i in range(frames)]
gp_bs = [None for i in range(frames)]
if args.GAN == 'SW':
for i in range(frames):
maskG[i], maskedG[i] = mask(Gs[steps[i]])
maskT[i], maskedT[i] = mask(Qs[i])
gp_bs[i] = tf.minimum(tf.reduce_sum(tf.cast(maskG[i], tf.int32)), tf.reduce_sum(tf.cast(maskT[i], tf.int32)))
theta = tf.nn.l2_normalize(tf.random_normal(shape=[dim, num_projections], dtype = dtype), axis=0)
projected_true = tf.transpose(tf.matmul(maskedT[i][:gp_bs[i],:], theta))
projected_fake = tf.transpose(tf.matmul(maskedG[i][:gp_bs[i],:], theta))
sorted_true, true_indices = tf.nn.top_k(projected_true,gp_bs[i])
sorted_fake, fake_indices = tf.nn.top_k(projected_fake,gp_bs[i])
loss_PQ[i] = tf.reduce_mean(tf.square(sorted_true - sorted_fake))
print(i, end = ' ', flush = True)
loss_PQ_all = tf.reduce_sum(loss_PQ)
if args.gradclip:
G_op_original = tf.train.AdamOptimizer(learning_rate = args.lr)
G_op = tf.contrib.estimator.clip_gradients_by_norm(D_op_original, clip_norm=0.5).minimize(loss_PQ_all, var_list = G_W + G_b + d_W + d_b + s_W)
else:
G_op = tf.train.AdamOptimizer(learning_rate = args.lr).minimize(loss_PQ_all, var_list = G_W + G_b + d_W + d_b + s_W)
elif args.GAN == 'WGAN-GP':
loss_D = [None for i in range(frames)]
D_W = []
D_b = []
for i in range(frames):
layer_dims = [dim] + 3*[128] + [1]
L = len(layer_dims)
D_W.append([tf.get_variable("D_W_{}_{}".format(i,l), [layer_dims[l-1], layer_dims[l]], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) for l in range(1, L)])
D_b.append([tf.get_variable("D_b_{}_{}".format(i,l), [1,layer_dims[l]], dtype=dtype, initializer=tf.zeros_initializer()) for l in range(1, L)])
d = Net(D_W[i], D_b[i])
maskG[i], maskedG[i] = mask(Gs[steps[i]])
maskT[i], maskedT[i] = mask(Qs[i])
gp_bs[i] = tf.minimum(tf.reduce_sum(tf.cast(maskG[i], tf.int32)), tf.reduce_sum(tf.cast(maskT[i], tf.int32)))
surrogate_loss = tf.reduce_mean(d(maskedT[i])) - tf.reduce_mean(d(maskedG[i]))
loss_PQ[i] = surrogate_loss
loss_D[i] = - surrogate_loss + lamda * gradient_pernalty(maskedG[i][:gp_bs[i],:], maskedT[i][:gp_bs[i],:], d, batch_size = gp_bs[i])
print(i, end = ' ', flush = True)
loss_PQ_all = tf.reduce_sum(loss_PQ)
G_op = tf.train.AdamOptimizer(learning_rate = args.lr, beta1=0.5, beta2=0.9).minimize(loss_PQ_all, var_list = G_W + G_b + d_W + d_b + s_W)
loss_D_all = tf.reduce_sum(loss_D)
D_vars = sum(D_W, []) + sum(D_b, [])
D_op = tf.train.AdamOptimizer(learning_rate = args.lr, beta1=0.5, beta2=0.9).minimize(loss_D_all, var_list = D_vars)
drift_x = np.linspace(-2,2,101)
drift_x2, drift_x1 = np.meshgrid(drift_x, drift_x)
drift_x = np.concatenate([drift_x1.reshape(-1,1), drift_x2.reshape(-1,1)], axis = 1)
vis_drift_x = tf.constant(drift_x, dtype = dtype)
vis_drift_ref = vis_drift_x - vis_drift_x ** 3
vis_drift = fun_drift(vis_drift_x)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
# In[26]:
savedir = 'save-GAN{}-drift{}-diff{}-frames{}-float64{}-bs{}-seed{}'.format(
args.GAN, args.drift, args.diff, frames, args.float64, args.bs, args.seed)
if not os.path.exists(savedir):
os.mkdir(savedir)
saver = tf.train.Saver(max_to_keep=1000)
if args.restore >= 0:
it = args.restore
saver.restore(sess, savedir+'/' + str(it) + '.ckpt')
diff_history = [np.array(A) for A in np.load(savedir+'/diff_history.npz')['diff_history']][:-1]
if args.drift != 'nn':
drift_history = [np.array(A) for A in np.load(savedir+'/drift_history.npz')['drift_history']][:-1]
else:
np.savez(savedir + '/train.npz', Qdata = np.array(Qdata), steps = np.array(steps))
it = 0
diff_history = []
if args.drift != 'nn':
drift_history = []
fail = 0
for _ in range(args.iterations - it + 1):
if it % 1000 == 0:
save_path = saver.save(sess, savedir+'/' + str(it) + '.ckpt')
save_drift('drift{}'.format(it), 0, 1)
if it % 500 ==0:
print(it, flush = True)
print(sess.run(gp_bs, feed_dict= {Qs[t]: Qdata[t][np.random.choice(len(Qdata[t]), bs), :] for t in range(frames)}))
if args.drift != 'nn':
drift_history.append(sess.run(d_W))
np.savez(savedir+'/drift_history.npz', drift_history = np.array(drift_history))
print(drift_history[-1])
diff_history.append(sess.run(s_W))
np.savez(savedir+'/diff_history.npz', diff_history = np.array(diff_history))
print(diff_history[-1])
if args.GAN == 'WGAN-GP':
for _ in range(5):
sess.run(D_op, feed_dict= {Qs[t]: Qdata[t][np.random.choice(len(Qdata[t]), bs), :] for t in range(frames)})
feed_dict = {Qs[t]: Qdata[t][np.random.choice(len(Qdata[t]), bs), :] for t in range(frames)}
try:
sess.run(G_op, feed_dict = feed_dict)
except:
save_path = saver.save(sess, savedir+'/' + str(it) + '.ckpt')
print(sess.run(gp_bs, feed_dict = feed_dict))
np.savez(savedir+'/maskedG' + str(it) + '.npz', maskedG = np.array(sess.run(maskedG, feed_dict = feed_dict)))
fail+=1
if fail > 10:
break
it += 1
print('.', end = '', flush = True)
save_sample('samples', steps + ref_steps)
| 33.189655
| 182
| 0.620693
|
ce96e25bd0c02d6c6099bc0bd3e8347bc3f955d1
| 5,206
|
py
|
Python
|
onnxmltools/convert/sparkml/operator_converters/gbt_classifier.py
|
szha/onnxmltools
|
b04d05bda625cbc006955ce0a220277739a95825
|
[
"MIT"
] | 3
|
2019-02-27T21:03:43.000Z
|
2020-04-07T22:16:50.000Z
|
onnxmltools/convert/sparkml/operator_converters/gbt_classifier.py
|
szha/onnxmltools
|
b04d05bda625cbc006955ce0a220277739a95825
|
[
"MIT"
] | null | null | null |
onnxmltools/convert/sparkml/operator_converters/gbt_classifier.py
|
szha/onnxmltools
|
b04d05bda625cbc006955ce0a220277739a95825
|
[
"MIT"
] | 2
|
2020-10-01T09:24:55.000Z
|
2021-04-17T13:57:31.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from onnx import onnx_pb as onnx_proto
from pyspark.ml.classification import GBTClassificationModel
from ...common._apply_operation import apply_neg, apply_concat, apply_mul, apply_exp, apply_add, \
apply_argmax, apply_matmul
from ...common.data_types import Int64TensorType, FloatTensorType
from ...common.utils import check_input_and_output_numbers, check_input_and_output_types
from ...common._registration import register_converter, register_shape_calculator
from ..ops_names import get_sparkml_operator_name
from .decision_tree_regressor import convert_decision_tree_regressor
def convert_gbt_classifier(scope, operator, container):
op = operator.raw_operator
regressor_output_names = []
# spark implementation uses DecisionTreeRegressor (and not Classifier) for each tree in this forest
for tree_model in op.trees:
regressor_op = scope.declare_local_operator(get_sparkml_operator_name(type(tree_model)), tree_model)
regressor_op.raw_params = operator.raw_params
regressor_op.inputs = operator.inputs
regressor_output = scope.declare_local_variable('regressor_prediction', FloatTensorType())
regressor_output_names.append(regressor_output.full_name)
regressor_op.outputs.append(regressor_output)
convert_decision_tree_regressor(scope, regressor_op, container)
regressor_op.is_evaluated = True
targets_tensor = scope.get_unique_variable_name('target_tensor')
weights_tensor = scope.get_unique_variable_name('weights_tensor')
container.add_initializer(weights_tensor, onnx_proto.TensorProto.FLOAT, [len(op.treeWeights), 1], op.treeWeights)
concatenated_predictions = scope.get_unique_variable_name('concatenated_predictions_tensor')
apply_concat(scope, regressor_output_names, concatenated_predictions, container, axis=1)
apply_matmul(scope, [concatenated_predictions, weights_tensor], targets_tensor, container)
# this is to calculate prediction and probability given the raw_prediction (= [-target, target])
targets_neg_tensor = scope.get_unique_variable_name('target_neg_tensor')
apply_neg(scope, targets_tensor, targets_neg_tensor, container)
raw_prediction_tensor = scope.get_unique_variable_name('raw_prediction_tensor')
apply_concat(scope, [targets_neg_tensor, targets_tensor], raw_prediction_tensor, container,
axis=1)
if isinstance(op, GBTClassificationModel):
# this section is only for the classifier; for the regressor we don't calculate the probability
minus_two = scope.get_unique_variable_name('minus_two_tensor')
container.add_initializer(minus_two, onnx_proto.TensorProto.FLOAT, [1], [-2.0])
mul_output_tensor = scope.get_unique_variable_name('mul_output_tensor')
apply_mul(scope, [raw_prediction_tensor, minus_two], mul_output_tensor, container)
exp_output_tensor = scope.get_unique_variable_name('exp_output_tensor')
apply_exp(scope, mul_output_tensor, exp_output_tensor, container)
one_tensor = scope.get_unique_variable_name('one_tensor')
container.add_initializer(one_tensor, onnx_proto.TensorProto.FLOAT, [1], [1.0])
add_output_tensor = scope.get_unique_variable_name('add_output_tensor')
apply_add(scope, [exp_output_tensor, one_tensor], add_output_tensor, container)
container.add_node('Reciprocal', add_output_tensor, operator.outputs[1].full_name,
name=scope.get_unique_operator_name('Reciprocal'),
op_version=6)
# to get Prediction from rawPrediction (or probability)
apply_argmax(scope, raw_prediction_tensor, operator.outputs[0].full_name, container,
axis=1, keepdims=0)
register_converter('pyspark.ml.classification.GBTClassificationModel', convert_gbt_classifier)
register_converter('pyspark.ml.regression.GBTRegressionModel', convert_gbt_classifier)
def calculate_gbt_classifier_output_shapes(operator):
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=[1, 2])
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType])
if len(operator.inputs[0].type.shape) != 2:
raise RuntimeError('Input must be a [N, C]-tensor')
N = operator.inputs[0].type.shape[0]
operator.outputs[0].type = Int64TensorType(shape=[N])
if isinstance(operator.raw_operator, GBTClassificationModel):
class_count = 2
operator.outputs[1].type = FloatTensorType([N, class_count])
register_shape_calculator('pyspark.ml.classification.GBTClassificationModel',
calculate_gbt_classifier_output_shapes)
register_shape_calculator('pyspark.ml.regression.GBTRegressionModel',
calculate_gbt_classifier_output_shapes)
| 60.534884
| 118
| 0.73761
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.