hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8a4d6853804f4590a0288c9c5eca52412b23f32
| 31,243
|
py
|
Python
|
augly/audio/transforms.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | 1
|
2021-09-29T21:27:50.000Z
|
2021-09-29T21:27:50.000Z
|
augly/audio/transforms.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | null | null | null |
augly/audio/transforms.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | 1
|
2021-07-02T13:08:55.000Z
|
2021-07-02T13:08:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import augly.audio.functional as F
import augly.utils as utils
import numpy as np
from augly.audio.utils import RNGSeed
"""
Base Classes for Transforms
"""
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> audio_array = np.array([...])
>>> pitch_shift_tsfm = PitchShift(n_steps=4.0, p=0.5)
>>> shifted_audio = pitch_shift_tsfm(audio_array, sample_rate)
"""
| 35.223224
| 92
| 0.625964
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import augly.audio.functional as F
import augly.utils as utils
import numpy as np
from augly.audio.utils import RNGSeed
"""
Base Classes for Transforms
"""
class BaseTransform(object):
def __init__(self, p: float = 1.0):
"""
@param p: the probability of the transform being applied; default value is 1.0
"""
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.p = p
def __call__(
self,
audio: np.ndarray,
sample_rate: int = utils.DEFAULT_SAMPLE_RATE,
metadata: Optional[List[Dict[str, Any]]] = None,
force: bool = False,
) -> Tuple[np.ndarray, int]:
"""
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@param force: if set to True, the transform will be applied. otherwise,
application is determined by the probability set
@returns: the augmented audio array and sample rate
"""
assert isinstance(audio, np.ndarray), "Audio passed in must be a np.ndarray"
assert type(force) == bool, "Expected type bool for variable `force`"
if not force and random.random() > self.p:
return audio, sample_rate
return self.apply_transform(audio, sample_rate, metadata)
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
This function is to be implemented in the child classes.
From this function, call the augmentation function with the
parameters specified
"""
raise NotImplementedError()
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> audio_array = np.array([...])
>>> pitch_shift_tsfm = PitchShift(n_steps=4.0, p=0.5)
>>> shifted_audio = pitch_shift_tsfm(audio_array, sample_rate)
"""
class AddBackgroundNoise(BaseTransform):
def __init__(
self,
background_audio: Optional[Union[str, np.ndarray]] = None,
snr_level_db: float = 10.0,
seed: Optional[RNGSeed] = None,
p: float = 1.0,
):
"""
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
@param snr_level_db: signal-to-noise ratio in dB
@param seed: a NumPy random generator (or seed) such that these results
remain reproducible
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.background_audio = background_audio
self.snr_level_db = snr_level_db
self.seed = seed
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Mixes in a background sound into the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.add_background_noise(
audio,
sample_rate,
self.background_audio,
self.snr_level_db,
self.seed,
metadata=metadata,
)
class ApplyLambda(BaseTransform):
def __init__(
self,
aug_function: Callable[..., Tuple[np.ndarray, int]] = lambda x, y: (x, y),
p: float = 1.0,
):
"""
@param aug_function: the augmentation function to be applied onto the audio
(should expect the audio np.ndarray & sample rate int as input, and return
the transformed audio & sample rate)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_function = aug_function
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Apply a user-defined lambda to the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.apply_lambda(audio, sample_rate, self.aug_function, metadata=metadata)
class ChangeVolume(BaseTransform):
def __init__(self, volume_db: float = 0.0, p: float = 1.0):
"""
@param volume_db: the decibel amount by which to either increase (positive
value) or decrease (negative value) the volume of the audio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.volume_db = volume_db
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the volume of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.change_volume(audio, sample_rate, self.volume_db, metadata=metadata)
class Clicks(BaseTransform):
def __init__(self, seconds_between_clicks: float = 0.5, p: float = 1.0):
"""
@param seconds_between_clicks: the amount of time between each click that will
be added to the audio, in seconds
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.seconds_between_clicks = seconds_between_clicks
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds clicks to the audio at a given regular interval
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.clicks(
audio, sample_rate, self.seconds_between_clicks, metadata=metadata
)
class Clip(BaseTransform):
def __init__(
self, offset_factor: float = 0.0, duration_factor: float = 1.0, p: float = 1.0
):
"""
@param offset_factor: start point of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param duration_factor: the length of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor = offset_factor
self.duration_factor = duration_factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Clips the audio using the specified offset and duration factors
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.clip(
audio,
sample_rate,
self.offset_factor,
self.duration_factor,
metadata=metadata,
)
class Harmonic(BaseTransform):
def __init__(
self,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
p: float = 1.0,
):
"""
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the harmonic part of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.harmonic(
audio,
sample_rate,
self.kernel_size,
self.power,
self.margin,
metadata=metadata,
)
class HighPassFilter(BaseTransform):
def __init__(self, cutoff_hz: float = 3000.0, p: float = 1.0):
"""
@param cutoff_hz: frequency (in Hz) where signals with lower frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) below this point
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.cutoff_hz = cutoff_hz
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency higher than the given cutoff to pass
through and attenuates signals with frequencies lower than the cutoff frequency
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.high_pass_filter(audio, sample_rate, self.cutoff_hz, metadata=metadata)
class InsertInBackground(BaseTransform):
def __init__(
self,
offset_factor: float = 0.0,
background_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[RNGSeed] = None,
p: float = 1.0,
):
"""
@param offset_factor: start point of the crop relative to the background duration
(this parameter is multiplied by the background duration)
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
@param seed: a NumPy random generator (or seed) such that these results
remain reproducible
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor = offset_factor
self.background_audio = background_audio
self.seed = seed
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Non-overlapping insert audio in a background audio.
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.insert_in_background(
audio,
sample_rate,
self.offset_factor,
self.background_audio,
self.seed,
metadata=metadata,
)
class InvertChannels(BaseTransform):
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Inverts the channels of the audio.
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.invert_channels(audio, sample_rate, metadata=metadata)
class LowPassFilter(BaseTransform):
def __init__(self, cutoff_hz: float = 500.0, p: float = 1.0):
"""
@param cutoff_hz: frequency (in Hz) where signals with higher frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) above this point
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.cutoff_hz = cutoff_hz
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency lower than the given cutoff to pass through
and attenuates signals with frequencies higher than the cutoff frequency
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.low_pass_filter(audio, sample_rate, self.cutoff_hz, metadata=metadata)
class Normalize(BaseTransform):
def __init__(
self,
norm: Optional[float] = np.inf,
axis: int = 0,
threshold: Optional[float] = None,
fill: Optional[bool] = None,
p: float = 1.0,
):
"""
@param norm: the type of norm to compute:
- np.inf: maximum absolute value
- -np.inf: minimum absolute value
- 0: number of non-zeros (the support)
- float: corresponding l_p norm
- None: no normalization is performed
@param axis: axis along which to compute the norm
@param threshold: if provided, only the columns (or rows) with norm of at
least `threshold` are normalized
@param fill: if None, then columns (or rows) with norm below `threshold` are
left as is. If False, then columns (rows) with norm below `threshold` are
set to 0. If True, then columns (rows) with norm below `threshold` are
filled uniformly such that the corresponding norm is 1
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.norm, self.axis = norm, axis
self.threshold, self.fill = threshold, fill
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1)
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.normalize(
audio,
sample_rate,
self.norm,
self.axis,
self.threshold,
self.fill,
metadata=metadata,
)
class PeakingEqualizer(BaseTransform):
def __init__(
self,
center_hz: float = 500.0,
q: float = 1.0,
gain_db: float = -3.0,
p: float = 1.0,
):
"""
@param center_hz: point in the frequency spectrum at which EQ is applied
@param q: ratio of center frequency to bandwidth; bandwidth is inversely
proportional to Q, meaning that as you raise Q, you narrow the bandwidth
@param gain_db: amount of gain (boost) or reduction (cut) that is applied
at a given frequency. Beware of clipping when using positive gain
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.center_hz = center_hz
self.q = q
self.gain_db = gain_db
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a two-pole peaking equalization filter. The signal-level at and around
`center_hz` can be increased or decreased, while all other frequencies are unchanged
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.peaking_equalizer(
audio, sample_rate, self.center_hz, self.q, self.gain_db, metadata=metadata
)
class Percussive(BaseTransform):
def __init__(
self,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
p: float = 1.0,
):
"""
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the percussive part of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.percussive(
audio,
sample_rate,
self.kernel_size,
self.power,
self.margin,
metadata=metadata,
)
class PitchShift(BaseTransform):
def __init__(self, n_steps: float = 1.0, p: float = 1.0):
"""
@param n_steps: each step is equal to one semitone
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.n_steps = n_steps
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Shifts the pitch of the audio by `n_steps`
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.pitch_shift(audio, sample_rate, self.n_steps, metadata=metadata)
class Reverb(BaseTransform):
def __init__(
self,
reverberance: float = 50.0,
hf_damping: float = 50.0,
room_scale: float = 100.0,
stereo_depth: float = 100.0,
pre_delay: float = 0.0,
wet_gain: float = 0.0,
wet_only: bool = False,
p: float = 1.0,
):
"""
@param reverberance: (%) sets the length of the reverberation tail. This
determines how long the reverberation continues for after the original
sound being reverbed comes to an end, and so simulates the "liveliness"
of the room acoustics
@param hf_damping: (%) increasing the damping produces a more "muted" effect.
The reverberation does not build up as much, and the high frequencies decay
faster than the low frequencies
@param room_scale: (%) sets the size of the simulated room. A high value will
simulate the reverberation effect of a large room and a low value will
simulate the effect of a small room
@param stereo_depth: (%) sets the apparent "width" of the reverb effect for
stereo tracks only. Increasing this value applies more variation between
left and right channels, creating a more "spacious" effect. When set at
zero, the effect is applied independently to left and right channels
@param pre_delay: (ms) delays the onset of the reverberation for the set time
after the start of the original input. This also delays the onset of the
reverb tail
@param wet_gain: (db) applies volume adjustment to the reverberation ("wet")
component in the mix
@param wet_only: only the wet signal (added reverberation) will be in the
resulting output, and the original audio will be removed
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.reverberance = reverberance
self.hf_damping = hf_damping
self.room_scale = room_scale
self.stereo_depth = stereo_depth
self.pre_delay = pre_delay
self.wet_gain = wet_gain
self.wet_only = wet_only
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds reverberation to the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.reverb(
audio,
sample_rate,
self.reverberance,
self.hf_damping,
self.room_scale,
self.stereo_depth,
self.pre_delay,
self.wet_gain,
self.wet_only,
metadata=metadata,
)
class Speed(BaseTransform):
def __init__(self, factor: float = 2.0, p: float = 1.0):
"""
@param factor: the speed factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the speed of the audio, affecting pitch as well
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.speed(audio, sample_rate, self.factor, metadata=metadata)
class Tempo(BaseTransform):
def __init__(self, factor: float = 2.0, p: float = 1.0):
"""
@param factor: the tempo factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor, without
affecting the pitch
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adjusts the tempo of the audio by a given factor
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.tempo(audio, sample_rate, self.factor, metadata=metadata)
class TimeStretch(BaseTransform):
def __init__(self, rate: float = 1.5, p: float = 1.0):
"""
@param rate: the time stretch factor
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.rate = rate
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Time-stretches the audio by a fixed rate
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.time_stretch(audio, sample_rate, self.rate, metadata=metadata)
class ToMono(BaseTransform):
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Converts the audio from stereo to mono by averaging samples across channels
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.to_mono(audio, sample_rate, metadata=metadata)
| 0
| 29,920
| 460
|
e0ef9bce5e87aa31386e4253a0d246ce6c621dd9
| 2,663
|
py
|
Python
|
python_code/easy/412_Fizz_Buzz_easy/solution.py
|
timshenkao/interview_coding_exercises
|
c531fa5e0c09faef976539275589e957fcb88393
|
[
"Apache-2.0"
] | null | null | null |
python_code/easy/412_Fizz_Buzz_easy/solution.py
|
timshenkao/interview_coding_exercises
|
c531fa5e0c09faef976539275589e957fcb88393
|
[
"Apache-2.0"
] | null | null | null |
python_code/easy/412_Fizz_Buzz_easy/solution.py
|
timshenkao/interview_coding_exercises
|
c531fa5e0c09faef976539275589e957fcb88393
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from collections import OrderedDict
from typing import List
# 412. Fizz Buzz https://leetcode.com/problems/fizz-buzz/
# Given an integer n, return a string array resultwer (1-indexed) where:
# resultwer[i] == "FizzBuzz" if i is divisible by 3 and 5.
# resultwer[i] == "Fizz" if i is divisible by 3.
# resultwer[i] == "Buzz" if i is divisible by 5.
# resultwer[i] == i (as a string) if none of the above conditions are true.
# 1 <= n <= 104
| 37.507042
| 117
| 0.579046
|
# Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from collections import OrderedDict
from typing import List
# 412. Fizz Buzz https://leetcode.com/problems/fizz-buzz/
# Given an integer n, return a string array resultwer (1-indexed) where:
# resultwer[i] == "FizzBuzz" if i is divisible by 3 and 5.
# resultwer[i] == "Fizz" if i is divisible by 3.
# resultwer[i] == "Buzz" if i is divisible by 5.
# resultwer[i] == i (as a string) if none of the above conditions are true.
# 1 <= n <= 104
class Solution:
def fizz_buzz(self, n: int) -> List[str]:
""" Time complexity: O(n). We iterate from 1 to n.
Space complexity: O(n). We create output list of strings.
"""
fizz = 'Fizz'
buzz = 'Buzz'
fizz_buzz = 'FizzBuzz'
result = list()
for i in range(1, n + 1):
if (i % 3 == 0) and (i % 5 == 0):
result.append(fizz_buzz)
elif i % 3 == 0:
result.append(fizz)
elif i % 5 == 0:
result.append(buzz)
else:
result.append(str(i))
return result
def fizz_buzz_lookup(self, n: int) -> List[str]:
""" Time complexity: O(n). We iterate from 1 to n. We perform fixed amount of computations on each iteration.
Space complexity: O(n). We create output list of strings.
"""
# Lookup for all fizzbuzz mappings
fizz_buzz_dict = OrderedDict({3 : "Fizz", 5 : "Buzz"})
result = list()
i_result = list()
for i in range(1, n + 1):
i_result.clear()
for key in fizz_buzz_dict.keys():
# If the number is divisible by key,
# then add the corresponding string mapping to current i_result
if i % key == 0:
i_result.append(fizz_buzz_dict[key])
if not i_result:
i_result.append(str(i))
result.append(''.join(i_result))
return result
| 0
| 1,490
| 23
|
aa8755d6383871ea335e941c7857fa0ecccd50d3
| 1,458
|
py
|
Python
|
heltour/tournament/migrations/0110_scheduledevent.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/migrations/0110_scheduledevent.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/migrations/0110_scheduledevent.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-08 18:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 42.882353
| 138
| 0.613855
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-08 18:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0109_auto_20161108_0128'),
]
operations = [
migrations.CreateModel(
name='ScheduledEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('type', models.CharField(choices=[('notify_mods_unscheduled', 'Notify mods of unscheduled games')], max_length=255)),
('offset', models.DurationField()),
('relative_to', models.CharField(choices=[('round_start', 'Round start'), ('round_end', 'Round end')], max_length=255)),
('last_run', models.DateTimeField(blank=True, null=True)),
('league', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.League')),
('season', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.Season')),
],
options={
'abstract': False,
},
),
]
| 0
| 1,247
| 23
|
09cf4e8dc61f85f2ebaa498eb81cabb195f04722
| 3,358
|
py
|
Python
|
Sketches/MH/Layout/Visualisation/Graph/RenderingParticle.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/MH/Layout/Visualisation/Graph/RenderingParticle.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/MH/Layout/Visualisation/Graph/RenderingParticle.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Simple topography viewer server - takes textual commands from a single socket
# and renders the appropriate graph
import pygame
from Physics import Particle as BaseParticle
class RenderingParticle(BaseParticle):
"""Version of Physics.Particle with added rendering functions. """
def render(self, surface):
"""Rendering passes. A generator method that renders in multiple passes.
Use yields to specify a wait until the pass the next stage of rendering
should take place at.
Example, that renders bonds 'behind' the blobs.
def render(self, surface):
yield 1
self.renderBonds(surface) # render bonds on pass 1
yield 5
self.renderSelf(surface) # render 'blob' on pass 5
If another particle type rendered, for example, on pass 3, then it
would be rendered on top of the bonds, but behind the blobs.
Use this mechanism to order rendering into layers.
"""
x = int(self.pos[0]) - self.left
y = int(self.pos[1]) - self.top
yield 1
for p in self.bondedTo:
pygame.draw.line(surface, (128,128,255), (x,y), (int(p.pos[0] -self.left),int(p.pos[1] - self.top)) )
yield 2
pygame.draw.circle(surface, (255,128,128), (x,y), self.radius)
if self.selected:
pygame.draw.circle(surface, (0,0,0), (x,y), self.radius, 2)
surface.blit(self.label, (x - self.label.get_width()/2, y - self.label.get_height()/2))
def setOffset( self, (left,top) ):
"""Inform of a change to the coords of the top left of the drawing surface,
so that this entity can render, as if the top left had moved
"""
self.left = left
self.top = top
def select( self ):
"""Tell this particle it is selected"""
self.selected = True
def deselect( self ):
"""Tell this particle it is selected"""
self.selected = False
| 37.730337
| 114
| 0.611674
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Simple topography viewer server - takes textual commands from a single socket
# and renders the appropriate graph
import pygame
from Physics import Particle as BaseParticle
class RenderingParticle(BaseParticle):
"""Version of Physics.Particle with added rendering functions. """
def __init__(self, ID, position, name):
super(RenderingParticle,self).__init__(position=position, ID = ID )
self.radius = 20
self.labelText = name
font = pygame.font.Font(None, 24)
self.label = font.render(self.labelText, False, (0,0,0))
self.left = 0
self.top = 0
self.selected = False
def render(self, surface):
"""Rendering passes. A generator method that renders in multiple passes.
Use yields to specify a wait until the pass the next stage of rendering
should take place at.
Example, that renders bonds 'behind' the blobs.
def render(self, surface):
yield 1
self.renderBonds(surface) # render bonds on pass 1
yield 5
self.renderSelf(surface) # render 'blob' on pass 5
If another particle type rendered, for example, on pass 3, then it
would be rendered on top of the bonds, but behind the blobs.
Use this mechanism to order rendering into layers.
"""
x = int(self.pos[0]) - self.left
y = int(self.pos[1]) - self.top
yield 1
for p in self.bondedTo:
pygame.draw.line(surface, (128,128,255), (x,y), (int(p.pos[0] -self.left),int(p.pos[1] - self.top)) )
yield 2
pygame.draw.circle(surface, (255,128,128), (x,y), self.radius)
if self.selected:
pygame.draw.circle(surface, (0,0,0), (x,y), self.radius, 2)
surface.blit(self.label, (x - self.label.get_width()/2, y - self.label.get_height()/2))
def setOffset( self, (left,top) ):
"""Inform of a change to the coords of the top left of the drawing surface,
so that this entity can render, as if the top left had moved
"""
self.left = left
self.top = top
def select( self ):
"""Tell this particle it is selected"""
self.selected = True
def deselect( self ):
"""Tell this particle it is selected"""
self.selected = False
| 339
| 0
| 27
|
3ccdf549310d1c10291d371e3807c060ab2fe1c2
| 2,130
|
py
|
Python
|
bindings/python/benchmark.py
|
wangjia3015/marisa-trie
|
da2924831c1e8f90dae7223cfe7a2bc1bd8b5132
|
[
"BSD-2-Clause"
] | 388
|
2016-01-28T15:16:43.000Z
|
2022-03-28T08:18:07.000Z
|
bindings/python/benchmark.py
|
wangjia3015/marisa-trie
|
da2924831c1e8f90dae7223cfe7a2bc1bd8b5132
|
[
"BSD-2-Clause"
] | 38
|
2016-02-12T14:51:12.000Z
|
2022-02-12T09:10:25.000Z
|
bindings/python/benchmark.py
|
wangjia3015/marisa-trie
|
da2924831c1e8f90dae7223cfe7a2bc1bd8b5132
|
[
"BSD-2-Clause"
] | 79
|
2016-03-16T15:47:50.000Z
|
2022-03-15T22:21:08.000Z
|
import datetime
import marisa
import sys
time_begin = datetime.datetime.now()
keys = []
for line in sys.stdin:
keys.append(line.rstrip())
time_end = datetime.datetime.now()
print "input:", time_end - time_begin
time_begin = datetime.datetime.now()
dic = dict()
for i in range(len(keys)):
dic[keys[i]] = i
time_end = datetime.datetime.now()
print "dict_build:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
dic.get(key)
time_end = datetime.datetime.now()
print "dict_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
keyset = marisa.Keyset()
for key in keys:
keyset.push_back(key)
time_end = datetime.datetime.now()
print "keyset_build:", time_end - time_begin
time_begin = datetime.datetime.now()
trie = marisa.Trie()
trie.build(keyset)
time_end = datetime.datetime.now()
print "trie_build:", time_end - time_begin
time_begin = datetime.datetime.now()
agent = marisa.Agent()
for key in keys:
agent.set_query(key)
trie.lookup(agent)
agent.key_id()
time_end = datetime.datetime.now()
print "trie_agent_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
trie.lookup(key)
time_end = datetime.datetime.now()
print "trie_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
agent.set_query(i)
trie.reverse_lookup(agent)
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
trie.reverse_lookup(i)
time_end = datetime.datetime.now()
print "trie_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.common_prefix_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_common_prefix_search:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.predictive_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_predictive_search:", time_end - time_begin
| 25.97561
| 63
| 0.753052
|
import datetime
import marisa
import sys
time_begin = datetime.datetime.now()
keys = []
for line in sys.stdin:
keys.append(line.rstrip())
time_end = datetime.datetime.now()
print "input:", time_end - time_begin
time_begin = datetime.datetime.now()
dic = dict()
for i in range(len(keys)):
dic[keys[i]] = i
time_end = datetime.datetime.now()
print "dict_build:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
dic.get(key)
time_end = datetime.datetime.now()
print "dict_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
keyset = marisa.Keyset()
for key in keys:
keyset.push_back(key)
time_end = datetime.datetime.now()
print "keyset_build:", time_end - time_begin
time_begin = datetime.datetime.now()
trie = marisa.Trie()
trie.build(keyset)
time_end = datetime.datetime.now()
print "trie_build:", time_end - time_begin
time_begin = datetime.datetime.now()
agent = marisa.Agent()
for key in keys:
agent.set_query(key)
trie.lookup(agent)
agent.key_id()
time_end = datetime.datetime.now()
print "trie_agent_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
trie.lookup(key)
time_end = datetime.datetime.now()
print "trie_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
agent.set_query(i)
trie.reverse_lookup(agent)
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
trie.reverse_lookup(i)
time_end = datetime.datetime.now()
print "trie_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.common_prefix_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_common_prefix_search:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.predictive_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_predictive_search:", time_end - time_begin
| 0
| 0
| 0
|
7f9c95359486993b762a90f03b356fc2e537a3c5
| 5,868
|
py
|
Python
|
SPACE/random_concept_building.py
|
lkreiskoether/SPACE
|
ba7e697bd10c5881cd6a87f9f877664978436597
|
[
"Apache-2.0"
] | null | null | null |
SPACE/random_concept_building.py
|
lkreiskoether/SPACE
|
ba7e697bd10c5881cd6a87f9f877664978436597
|
[
"Apache-2.0"
] | null | null | null |
SPACE/random_concept_building.py
|
lkreiskoether/SPACE
|
ba7e697bd10c5881cd6a87f9f877664978436597
|
[
"Apache-2.0"
] | 1
|
2021-10-05T09:07:36.000Z
|
2021-10-05T09:07:36.000Z
|
"""
Copyright 2021 Lukas Kreisköther
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import PIL.Image
import numpy as np
import os
import random
class RandomConceptBuilder:
"""RandomConceptBuilder objects capsule the functionality for building random concept images necessary for using the
TCAV framework in industrial usecases. For that random crops from defined sets of images (e.g. from good class
when testing the bad class) with size crop_size are build. The random concept images are stored in folders
with name prefix 'random500_' so that they can be used for the TCAV framework.
"""
def __init__(self, path, folders_for_building, store_fmt, image_shape, crop_size, num_fold=30,
num_imgs_per_fold=100):
"""Initializes a RandomConceptBuilder object.
Args:
path (str): path which leads to the directory in which the folders are laying based upon which the random
concept images should be build (e.g. '/home/lukas/Documents/02_Data/FGUSS_subsets_grey/').
folders_for_building (list of str): list of strings for all folders in the directory from which the algorithm should
choose images to build the random concept images (e.g. ['good'] or ['one', 'two', 'three'])
image_shape (list of int): list with len=2 which defines the shape the produced images should have
(normally equals the input size of the model to investigate).
crop_size (list of int): list with len=3 defining the size of the random crops (e.g. [56, 56, 3]).
num_fold (int): number of folders of random concept images the algorithm should build.
num_imgs_per_fold (int): number of images per folder for the folders of random concept images.
store_fmt (str): store format of produced images.
"""
self.path = path
self.folders_for_building = folders_for_building
self.name_prefix = 'random500_'
self.store_fmt = store_fmt
self.image_shape = image_shape
self.crop_size = crop_size
self.num_fold = num_fold
self.num_imgs_per_fold = num_imgs_per_fold
if len(self.folders_for_building) == 1:
self.X_names = [str(self.folders_for_building[0] + '/' + name) for name in
os.listdir(self.path + self.folders_for_building[0])
if not os.path.isdir(self.path + self.folders_for_building[0] + '/' + name)]
else:
X_temp = []
for folder_name in self.folders_for_building:
X_temp = X_temp + ([str(folder_name + '/' + name) for name in os.listdir(self.path + folder_name)
if not os.path.isdir(self.path + self.folders_for_building[0] + '/' + name)])
self.X_names = X_temp
np.random.shuffle(self.X_names)
self.img_tensor = tf.placeholder(tf.float32, shape=(self.image_shape[0], self.image_shape[1], 3))
self.out = tf.image.random_crop(value=self.img_tensor, size=self.crop_size)
def build_random_concept_image(self, img):
"""Method for building the random concept image from an input image.
Args:
img (numpy.ndarray[float]): image to build a random concept image from.
Returns: PIL.Image: Random concept image as PIL.Image.
"""
img = np.array(img, dtype=np.float32)
with tf.Session():
i = self.out.eval(feed_dict={self.img_tensor: img})
i = np.tile(i, (int(img.shape[0] / i.shape[0]), int(img.shape[1] / i.shape[1]), 1))
img = np.pad(array=i, pad_width=((0, img.shape[0] % i.shape[0]), (0, img.shape[1] % i.shape[1]), (0, 0)),
mode='wrap')
return PIL.Image.fromarray(img.astype(np.uint8))
def build(self):
"""Method to call to start building the concept images. Function looks how many
images are already in the folders and fills the folders respectively.
"""
for i in range(self.num_fold):
sub_fold = self.name_prefix + str(i)
if not os.path.isdir(self.path + sub_fold):
try:
os.mkdir(self.path + sub_fold + '/')
except Exception as e:
print("Creation of the directory %s failed" % sub_fold)
print(e)
else:
print("Successfully created the directory %s " % sub_fold)
num_files = len([name for name in os.listdir(self.path + sub_fold) if
os.path.isfile(os.path.join(self.path + sub_fold, name))])
if not (num_files == self.num_imgs_per_fold):
for j in range(self.num_imgs_per_fold - num_files):
img = random.choice(self.X_names)
img = np.array(PIL.Image.open(tf.gfile.Open(self.path + '/' + img, 'rb')).convert('RGB'),
dtype=np.float32)
# todo: resize (right now, we don't do it since images have to be in right size for TCAV anyway)
img_ran = self.build_random_concept_image(img)
img_ran.save(self.path + sub_fold + '/' + str(num_files + j) + '.' + self.store_fmt)
| 54.333333
| 128
| 0.63514
|
"""
Copyright 2021 Lukas Kreisköther
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import PIL.Image
import numpy as np
import os
import random
class RandomConceptBuilder:
"""RandomConceptBuilder objects capsule the functionality for building random concept images necessary for using the
TCAV framework in industrial usecases. For that random crops from defined sets of images (e.g. from good class
when testing the bad class) with size crop_size are build. The random concept images are stored in folders
with name prefix 'random500_' so that they can be used for the TCAV framework.
"""
def __init__(self, path, folders_for_building, store_fmt, image_shape, crop_size, num_fold=30,
num_imgs_per_fold=100):
"""Initializes a RandomConceptBuilder object.
Args:
path (str): path which leads to the directory in which the folders are laying based upon which the random
concept images should be build (e.g. '/home/lukas/Documents/02_Data/FGUSS_subsets_grey/').
folders_for_building (list of str): list of strings for all folders in the directory from which the algorithm should
choose images to build the random concept images (e.g. ['good'] or ['one', 'two', 'three'])
image_shape (list of int): list with len=2 which defines the shape the produced images should have
(normally equals the input size of the model to investigate).
crop_size (list of int): list with len=3 defining the size of the random crops (e.g. [56, 56, 3]).
num_fold (int): number of folders of random concept images the algorithm should build.
num_imgs_per_fold (int): number of images per folder for the folders of random concept images.
store_fmt (str): store format of produced images.
"""
self.path = path
self.folders_for_building = folders_for_building
self.name_prefix = 'random500_'
self.store_fmt = store_fmt
self.image_shape = image_shape
self.crop_size = crop_size
self.num_fold = num_fold
self.num_imgs_per_fold = num_imgs_per_fold
if len(self.folders_for_building) == 1:
self.X_names = [str(self.folders_for_building[0] + '/' + name) for name in
os.listdir(self.path + self.folders_for_building[0])
if not os.path.isdir(self.path + self.folders_for_building[0] + '/' + name)]
else:
X_temp = []
for folder_name in self.folders_for_building:
X_temp = X_temp + ([str(folder_name + '/' + name) for name in os.listdir(self.path + folder_name)
if not os.path.isdir(self.path + self.folders_for_building[0] + '/' + name)])
self.X_names = X_temp
np.random.shuffle(self.X_names)
self.img_tensor = tf.placeholder(tf.float32, shape=(self.image_shape[0], self.image_shape[1], 3))
self.out = tf.image.random_crop(value=self.img_tensor, size=self.crop_size)
def build_random_concept_image(self, img):
"""Method for building the random concept image from an input image.
Args:
img (numpy.ndarray[float]): image to build a random concept image from.
Returns: PIL.Image: Random concept image as PIL.Image.
"""
img = np.array(img, dtype=np.float32)
with tf.Session():
i = self.out.eval(feed_dict={self.img_tensor: img})
i = np.tile(i, (int(img.shape[0] / i.shape[0]), int(img.shape[1] / i.shape[1]), 1))
img = np.pad(array=i, pad_width=((0, img.shape[0] % i.shape[0]), (0, img.shape[1] % i.shape[1]), (0, 0)),
mode='wrap')
return PIL.Image.fromarray(img.astype(np.uint8))
def build(self):
"""Method to call to start building the concept images. Function looks how many
images are already in the folders and fills the folders respectively.
"""
for i in range(self.num_fold):
sub_fold = self.name_prefix + str(i)
if not os.path.isdir(self.path + sub_fold):
try:
os.mkdir(self.path + sub_fold + '/')
except Exception as e:
print("Creation of the directory %s failed" % sub_fold)
print(e)
else:
print("Successfully created the directory %s " % sub_fold)
num_files = len([name for name in os.listdir(self.path + sub_fold) if
os.path.isfile(os.path.join(self.path + sub_fold, name))])
if not (num_files == self.num_imgs_per_fold):
for j in range(self.num_imgs_per_fold - num_files):
img = random.choice(self.X_names)
img = np.array(PIL.Image.open(tf.gfile.Open(self.path + '/' + img, 'rb')).convert('RGB'),
dtype=np.float32)
# todo: resize (right now, we don't do it since images have to be in right size for TCAV anyway)
img_ran = self.build_random_concept_image(img)
img_ran.save(self.path + sub_fold + '/' + str(num_files + j) + '.' + self.store_fmt)
| 0
| 0
| 0
|
3178a9110e2900570b8a0543edc4ea7b69019a8e
| 13,724
|
py
|
Python
|
examples/model_compression/distill_lstm/data.py
|
wzzju/PaddleNLP
|
1757a4fc2a3cd5a45f75c6482746777752b414d8
|
[
"Apache-2.0"
] | 1
|
2021-07-13T02:21:15.000Z
|
2021-07-13T02:21:15.000Z
|
examples/model_compression/distill_lstm/data.py
|
wzzju/PaddleNLP
|
1757a4fc2a3cd5a45f75c6482746777752b414d8
|
[
"Apache-2.0"
] | null | null | null |
examples/model_compression/distill_lstm/data.py
|
wzzju/PaddleNLP
|
1757a4fc2a3cd5a45f75c6482746777752b414d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from functools import partial
import numpy as np
import jieba
import paddle
from paddlenlp.data import Stack, Tuple, Pad, Vocab
from paddlenlp.transformers import BertTokenizer
from paddlenlp.datasets import load_dataset
from utils import convert_example_for_lstm, convert_example_for_distill, convert_pair_example
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n").split("\t")[0]
vocab[token] = index
return vocab
def apply_data_augmentation(data,
task_name,
tokenizer,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 6),
whole_word_mask=False,
seed=0):
"""
Data Augmentation contains Masking and n-gram sampling. Tokenization and
Masking are performed at the same time, so that the masked token can be
directly replaced by `mask_token`, after what sampling is performed.
"""
np.random.seed(seed)
new_data = []
for example in data:
if task_name == 'qqp':
data_list = tokenizer.tokenize(example['sentence1'])
data_list_2 = tokenizer.tokenize(example['sentence2'])
new_data.append({
"sentence1": data_list,
"sentence2": data_list_2,
"labels": example['labels']
})
else:
data_list = tokenizer.tokenize(example['sentence'])
new_data.append({
"sentence": data_list,
"labels": example['labels']
})
for example in data:
for _ in range(n_iter):
if task_name == 'qqp':
words = _data_augmentation(example['sentence1'], data_list)
words_2 = _data_augmentation(example['sentence2'], data_list_2)
new_data.append({
"sentence1": words,
"sentence2": words_2,
"labels": example['labels']
})
else:
words = _data_augmentation(example['sentence'], data_list)
new_data.append({
"sentence": words,
"labels": example['labels']
})
return new_data
def apply_data_augmentation_for_cn(data,
tokenizer,
vocab,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 10),
seed=0):
"""
Because BERT and jieba have different `tokenize` function, it returns
jieba_tokenizer(example['text'], bert_tokenizer(example['text']) and
example['label]) for each example in data.
jieba tokenization and Masking are performed at the same time, so that the
masked token can be directly replaced by `mask_token`, and other tokens
could be tokenized by BERT's tokenizer, from which tokenized example for
student model and teacher model would get at the same time.
"""
np.random.seed(seed)
new_data = []
for example in data:
text_tokenized = list(jieba.cut(example['text']))
lstm_tokens = text_tokenized
bert_tokens = tokenizer.tokenize(example['text'])
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
for _ in range(n_iter):
# 1. Masking
lstm_tokens, bert_tokens = [], []
for word in text_tokenized:
if np.random.rand() < p_mask:
lstm_tokens.append([vocab.unk_token])
bert_tokens.append([tokenizer.unk_token])
else:
lstm_tokens.append([word])
bert_tokens.append(tokenizer.tokenize(word))
# 2. N-gram sampling
lstm_tokens, bert_tokens = ngram_sampling(lstm_tokens, bert_tokens,
p_ng, ngram_range)
lstm_tokens, bert_tokens = flatten(lstm_tokens), flatten(
bert_tokens)
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
return new_data
def create_data_loader_for_small_model(task_name,
vocab_path,
model_name=None,
batch_size=64,
max_seq_length=128,
shuffle=True):
"""Data loader for bi-lstm, not bert."""
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
if task_name == 'chnsenticorp':
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
else:
vocab = BertTokenizer.from_pretrained(model_name)
pad_val = vocab.pad_token_id
trans_fn = partial(
convert_example_for_lstm,
task_name=task_name,
vocab=vocab,
max_seq_length=max_seq_length,
is_test=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=pad_val), # input_ids
Stack(dtype="int64"), # seq len
Stack(dtype="int64") # label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_distill_loader(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
n_iter=20,
whole_word_mask=False,
seed=0):
"""
Returns batch data for bert and small model.
Bert and small model have different input representations.
"""
tokenizer = BertTokenizer.from_pretrained(model_name)
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
data_aug_fn = partial(
apply_data_augmentation_for_cn,
tokenizer=tokenizer,
vocab=vocab,
n_iter=n_iter,
seed=seed)
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
vocab = tokenizer
pad_val = tokenizer.pad_token_id
data_aug_fn = partial(
apply_data_augmentation,
task_name=task_name,
tokenizer=tokenizer,
n_iter=n_iter,
whole_word_mask=whole_word_mask,
seed=seed)
train_ds = train_ds.map(data_aug_fn, batched=True)
print("Data augmentation has been applied.")
trans_fn = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab)
trans_fn_dev = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab,
is_tokenized=False)
if task_name == 'qqp':
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
else:
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn_dev, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_pair_loader_for_small_model(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
is_test=False):
"""Only support QQP now."""
tokenizer = BertTokenizer.from_pretrained(model_name)
train_ds, dev_ds = load_dataset('glue', task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
trans_func = partial(
convert_pair_example,
task_name=task_name,
vocab=tokenizer,
is_tokenized=False,
max_seq_length=max_seq_length,
is_test=is_test)
train_ds = train_ds.map(trans_func, lazy=True)
dev_ds = dev_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
| 36.403183
| 93
| 0.573302
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from functools import partial
import numpy as np
import jieba
import paddle
from paddlenlp.data import Stack, Tuple, Pad, Vocab
from paddlenlp.transformers import BertTokenizer
from paddlenlp.datasets import load_dataset
from utils import convert_example_for_lstm, convert_example_for_distill, convert_pair_example
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n").split("\t")[0]
vocab[token] = index
return vocab
def ngram_sampling(words, words_2=None, p_ng=0.25, ngram_range=(2, 6)):
if np.random.rand() < p_ng:
ngram_len = np.random.randint(ngram_range[0], ngram_range[1] + 1)
ngram_len = min(ngram_len, len(words))
start = np.random.randint(0, len(words) - ngram_len + 1)
words = words[start:start + ngram_len]
if words_2:
words_2 = words_2[start:start + ngram_len]
return words if not words_2 else (words, words_2)
def flatten(list_of_list):
final_list = []
for each_list in list_of_list:
final_list += each_list
return final_list
def apply_data_augmentation(data,
task_name,
tokenizer,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 6),
whole_word_mask=False,
seed=0):
"""
Data Augmentation contains Masking and n-gram sampling. Tokenization and
Masking are performed at the same time, so that the masked token can be
directly replaced by `mask_token`, after what sampling is performed.
"""
def _data_augmentation(data,
tokenized_list,
whole_word_mask=whole_word_mask):
# 1. Masking
words = []
if not whole_word_mask:
words = [
tokenizer.mask_token if np.random.rand() < p_mask else word
for word in tokenized_list
]
else:
for word in data.split():
words += [[tokenizer.mask_token]] if np.random.rand(
) < p_mask else [tokenizer.tokenize(word)]
# 2. N-gram sampling
words = ngram_sampling(words, p_ng=p_ng, ngram_range=ngram_range)
words = flatten(words) if isinstance(words[0], list) else words
return words
np.random.seed(seed)
new_data = []
for example in data:
if task_name == 'qqp':
data_list = tokenizer.tokenize(example['sentence1'])
data_list_2 = tokenizer.tokenize(example['sentence2'])
new_data.append({
"sentence1": data_list,
"sentence2": data_list_2,
"labels": example['labels']
})
else:
data_list = tokenizer.tokenize(example['sentence'])
new_data.append({
"sentence": data_list,
"labels": example['labels']
})
for example in data:
for _ in range(n_iter):
if task_name == 'qqp':
words = _data_augmentation(example['sentence1'], data_list)
words_2 = _data_augmentation(example['sentence2'], data_list_2)
new_data.append({
"sentence1": words,
"sentence2": words_2,
"labels": example['labels']
})
else:
words = _data_augmentation(example['sentence'], data_list)
new_data.append({
"sentence": words,
"labels": example['labels']
})
return new_data
def apply_data_augmentation_for_cn(data,
tokenizer,
vocab,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 10),
seed=0):
"""
Because BERT and jieba have different `tokenize` function, it returns
jieba_tokenizer(example['text'], bert_tokenizer(example['text']) and
example['label]) for each example in data.
jieba tokenization and Masking are performed at the same time, so that the
masked token can be directly replaced by `mask_token`, and other tokens
could be tokenized by BERT's tokenizer, from which tokenized example for
student model and teacher model would get at the same time.
"""
np.random.seed(seed)
new_data = []
for example in data:
text_tokenized = list(jieba.cut(example['text']))
lstm_tokens = text_tokenized
bert_tokens = tokenizer.tokenize(example['text'])
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
for _ in range(n_iter):
# 1. Masking
lstm_tokens, bert_tokens = [], []
for word in text_tokenized:
if np.random.rand() < p_mask:
lstm_tokens.append([vocab.unk_token])
bert_tokens.append([tokenizer.unk_token])
else:
lstm_tokens.append([word])
bert_tokens.append(tokenizer.tokenize(word))
# 2. N-gram sampling
lstm_tokens, bert_tokens = ngram_sampling(lstm_tokens, bert_tokens,
p_ng, ngram_range)
lstm_tokens, bert_tokens = flatten(lstm_tokens), flatten(
bert_tokens)
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
return new_data
def create_data_loader_for_small_model(task_name,
vocab_path,
model_name=None,
batch_size=64,
max_seq_length=128,
shuffle=True):
"""Data loader for bi-lstm, not bert."""
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
if task_name == 'chnsenticorp':
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
else:
vocab = BertTokenizer.from_pretrained(model_name)
pad_val = vocab.pad_token_id
trans_fn = partial(
convert_example_for_lstm,
task_name=task_name,
vocab=vocab,
max_seq_length=max_seq_length,
is_test=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=pad_val), # input_ids
Stack(dtype="int64"), # seq len
Stack(dtype="int64") # label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_distill_loader(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
n_iter=20,
whole_word_mask=False,
seed=0):
"""
Returns batch data for bert and small model.
Bert and small model have different input representations.
"""
tokenizer = BertTokenizer.from_pretrained(model_name)
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
data_aug_fn = partial(
apply_data_augmentation_for_cn,
tokenizer=tokenizer,
vocab=vocab,
n_iter=n_iter,
seed=seed)
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
vocab = tokenizer
pad_val = tokenizer.pad_token_id
data_aug_fn = partial(
apply_data_augmentation,
task_name=task_name,
tokenizer=tokenizer,
n_iter=n_iter,
whole_word_mask=whole_word_mask,
seed=seed)
train_ds = train_ds.map(data_aug_fn, batched=True)
print("Data augmentation has been applied.")
trans_fn = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab)
trans_fn_dev = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab,
is_tokenized=False)
if task_name == 'qqp':
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
else:
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn_dev, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_pair_loader_for_small_model(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
is_test=False):
"""Only support QQP now."""
tokenizer = BertTokenizer.from_pretrained(model_name)
train_ds, dev_ds = load_dataset('glue', task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
trans_func = partial(
convert_pair_example,
task_name=task_name,
vocab=tokenizer,
is_tokenized=False,
max_seq_length=max_seq_length,
is_test=is_test)
train_ds = train_ds.map(trans_func, lazy=True)
dev_ds = dev_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_dataloader(train_ds, dev_ds, batch_size, batchify_fn, shuffle=True):
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=batch_size, shuffle=shuffle)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=batch_size, shuffle=False)
train_data_loader = paddle.io.DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
dev_data_loader = paddle.io.DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
return train_data_loader, dev_data_loader
| 1,987
| 0
| 96
|
b3cfc2a4680ba5fb688c8c605dffbd3378abcff5
| 472
|
py
|
Python
|
pametis/__init__.py
|
avnr/pametis
|
1037c7b50e5825770f2296761f3a0ad3cb37eae4
|
[
"MIT"
] | null | null | null |
pametis/__init__.py
|
avnr/pametis
|
1037c7b50e5825770f2296761f3a0ad3cb37eae4
|
[
"MIT"
] | null | null | null |
pametis/__init__.py
|
avnr/pametis
|
1037c7b50e5825770f2296761f3a0ad3cb37eae4
|
[
"MIT"
] | null | null | null |
from .pametis import *
__all__ = [
'OPT',
'configure',
'reset',
'sitemap',
'PametisException',
'AmbiguousOptions',
'BadParam',
'PametisCacheError',
'BadDomain',
'CantRemove',
'Pametis_cache',
'Sql_cache',
'postgres',
'sqlite',
'Pametis_spider',
'file_spider',
'sitemap_spider',
]
__version__ = "0.4"
__version_info__ = ( 0, 4, 0 )
| 19.666667
| 30
| 0.489407
|
from .pametis import *
__all__ = [
'OPT',
'configure',
'reset',
'sitemap',
'PametisException',
'AmbiguousOptions',
'BadParam',
'PametisCacheError',
'BadDomain',
'CantRemove',
'Pametis_cache',
'Sql_cache',
'postgres',
'sqlite',
'Pametis_spider',
'file_spider',
'sitemap_spider',
]
__version__ = "0.4"
__version_info__ = ( 0, 4, 0 )
| 0
| 0
| 0
|
727b56502133746fee15b7edcec9513b698ea9ac
| 513
|
py
|
Python
|
proxy/parser.py
|
GavinHan/sina_weibo_crawler
|
5fcbd4007fb8d2fad1aa3ad68b73aec6b7669b49
|
[
"BSD-2-Clause"
] | 1
|
2016-03-15T16:21:28.000Z
|
2016-03-15T16:21:28.000Z
|
proxy/parser.py
|
GavinHan/sina_weibo_crawler
|
5fcbd4007fb8d2fad1aa3ad68b73aec6b7669b49
|
[
"BSD-2-Clause"
] | null | null | null |
proxy/parser.py
|
GavinHan/sina_weibo_crawler
|
5fcbd4007fb8d2fad1aa3ad68b73aec6b7669b49
|
[
"BSD-2-Clause"
] | null | null | null |
#coding: utf-8
import re
from pyquery import PyQuery as pq
from lxml import etree
page = '''
'''
doc = pq(page)
div = doc('div').find('.proxylistitem')
div.each(perser)
#print d('p') #返回<p>test 1</p><p>test 2</p>
#print d('p').html() #返回test 1
#print d('p').eq(1).html() #返回test 2
| 19.730769
| 65
| 0.608187
|
#coding: utf-8
import re
from pyquery import PyQuery as pq
from lxml import etree
page = '''
'''
def perser(i):
node = pq(this)
#import pdb; pdb.set_trace()
ip = node.find('.tbBottomLine:first').html().strip()
port = node.find('.tbBottomLine:first').next().html().strip()
print ('%s:%s %s')%(ip, port)
doc = pq(page)
div = doc('div').find('.proxylistitem')
div.each(perser)
#print d('p') #返回<p>test 1</p><p>test 2</p>
#print d('p').html() #返回test 1
#print d('p').eq(1).html() #返回test 2
| 203
| 0
| 23
|
a1b30ecc1b479a04796b2d974aafc93c7541b6f8
| 2,964
|
py
|
Python
|
picoCTF-web/api/common.py
|
minhnq1618/picoCTF
|
f634f0e55be6b1a8552a33e4f94e7487142e8bce
|
[
"MIT"
] | 280
|
2016-03-23T05:16:07.000Z
|
2022-03-25T10:45:33.000Z
|
picoCTF-web/api/common.py
|
minhnq1618/picoCTF
|
f634f0e55be6b1a8552a33e4f94e7487142e8bce
|
[
"MIT"
] | 384
|
2016-03-22T05:14:47.000Z
|
2021-09-13T23:46:14.000Z
|
picoCTF-web/api/common.py
|
minhnq1618/picoCTF
|
f634f0e55be6b1a8552a33e4f94e7487142e8bce
|
[
"MIT"
] | 142
|
2016-03-15T16:27:21.000Z
|
2022-02-23T23:41:28.000Z
|
"""Classes and functions used by multiple modules in the system."""
import uuid
from hashlib import md5
import bcrypt
from voluptuous import Invalid, MultipleInvalid
def token():
"""
Generate a random but insecure token.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hash a string.
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
class PicoException(Exception):
"""
General class for exceptions in the picoCTF API.
Allows specification of a message and response code to display to the
client, as well as an optional field for arbitrary data.
The 'data' field will not be displayed to clients but will be stored
in the database, making it ideal for storing stack traces, etc.
"""
def __init__(self, message, status_code=500, data=None):
"""Initialize a new PicoException."""
Exception.__init__(self)
self.message = message
self.status_code = status_code
self.data = data
def to_dict(self):
"""Convert a PicoException to a dict for serialization."""
rv = dict()
rv["message"] = self.message
return rv
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our PicoException.
Args:
callback_tuples: a callback_tuple should contain
(status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Try to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
PicoException with 400 status code and error msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise PicoException(msg, 400)
return value
return v
def validate(schema, data):
"""
Wrap the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
PicoException with 400 status code and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise PicoException(error.msg, 400)
def hash_password(password):
"""
Hash plaintext password.
Args:
password: plaintext password
Returns:
Secure hash of password.
"""
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(8))
| 23.903226
| 75
| 0.612686
|
"""Classes and functions used by multiple modules in the system."""
import uuid
from hashlib import md5
import bcrypt
from voluptuous import Invalid, MultipleInvalid
def token():
"""
Generate a random but insecure token.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hash a string.
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
class PicoException(Exception):
"""
General class for exceptions in the picoCTF API.
Allows specification of a message and response code to display to the
client, as well as an optional field for arbitrary data.
The 'data' field will not be displayed to clients but will be stored
in the database, making it ideal for storing stack traces, etc.
"""
def __init__(self, message, status_code=500, data=None):
"""Initialize a new PicoException."""
Exception.__init__(self)
self.message = message
self.status_code = status_code
self.data = data
def to_dict(self):
"""Convert a PicoException to a dict for serialization."""
rv = dict()
rv["message"] = self.message
return rv
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our PicoException.
Args:
callback_tuples: a callback_tuple should contain
(status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Try to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
PicoException with 400 status code and error msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise PicoException(msg, 400)
return value
return v
def validate(schema, data):
"""
Wrap the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
PicoException with 400 status code and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise PicoException(error.msg, 400)
def hash_password(password):
"""
Hash plaintext password.
Args:
password: plaintext password
Returns:
Secure hash of password.
"""
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(8))
| 0
| 0
| 0
|
83823ad614f73f1c0d4cb2b4bbf506ba88b266f8
| 573
|
py
|
Python
|
src/active_learner.py
|
shenw33/ML_DLD
|
e83b5237a6f8dce6f9b347258f04b59345c59678
|
[
"BSD-3-Clause"
] | null | null | null |
src/active_learner.py
|
shenw33/ML_DLD
|
e83b5237a6f8dce6f9b347258f04b59345c59678
|
[
"BSD-3-Clause"
] | null | null | null |
src/active_learner.py
|
shenw33/ML_DLD
|
e83b5237a6f8dce6f9b347258f04b59345c59678
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import os
from keras import layers, optimizers
from keras.models import Model, Sequential
from keras.layers import Dense, LSTM, Dropout
from keras import optimizers, regularizers
from tensorflow import keras
from tensorflow.keras import layers
from train_model import *
if __name__ == "__main__":
learning_cycle = 0
for _ in range(learning_cycle):
mymodel = train()
multi_step_inference()
new_exp()
query_new_data()
| 20.464286
| 45
| 0.710297
|
import numpy as np
import matplotlib.pyplot as plt
import os
from keras import layers, optimizers
from keras.models import Model, Sequential
from keras.layers import Dense, LSTM, Dropout
from keras import optimizers, regularizers
from tensorflow import keras
from tensorflow.keras import layers
from train_model import *
def new_exp():
get_model()
load_data()
if __name__ == "__main__":
learning_cycle = 0
for _ in range(learning_cycle):
mymodel = train()
multi_step_inference()
new_exp()
query_new_data()
| 25
| 0
| 23
|
db1ede0805cc9287d3f9c347c54ac16a2d07b94d
| 374
|
py
|
Python
|
server/src/api/usecases/firebase.py
|
Mokumoku-Rin/client
|
7352c651edff79a28a0056d60d483a1753a7354d
|
[
"Apache-2.0"
] | null | null | null |
server/src/api/usecases/firebase.py
|
Mokumoku-Rin/client
|
7352c651edff79a28a0056d60d483a1753a7354d
|
[
"Apache-2.0"
] | 109
|
2020-03-02T08:44:44.000Z
|
2022-02-27T00:32:30.000Z
|
server/src/api/usecases/firebase.py
|
Mokumoku-Rin/client
|
7352c651edff79a28a0056d60d483a1753a7354d
|
[
"Apache-2.0"
] | 1
|
2020-03-19T06:13:33.000Z
|
2020-03-19T06:13:33.000Z
|
from firebase_admin import storage
| 31.166667
| 62
| 0.756684
|
from firebase_admin import storage
def fetch_landmark_image(filename: str) -> bytes:
bucket = storage.bucket()
blob = bucket.blob(filename)
b = blob.download_as_string(raw_download=True)
return b
def upload_landmark_image(filename: str, data: bytes) -> bool:
blob = storage.bucket().blob()
blob.upload_from_string(data, content_type="image/jpeg")
return True
| 294
| 0
| 46
|
6bbbf7ab3429580d63cba356479214e356e08185
| 16
|
py
|
Python
|
shapelet_features/shapelets/__init__.py
|
ratschlab/circEWS
|
b2b1f00dac4f5d46856a2c7abe2ca4f12d4c612d
|
[
"MIT"
] | 34
|
2020-03-17T16:42:00.000Z
|
2022-03-29T15:53:24.000Z
|
shapelet_features/utils/__init__.py
|
ranxiao/circEWS
|
1e52880c268f8f763bbc16763131634ffc217153
|
[
"MIT"
] | 3
|
2020-07-30T22:37:10.000Z
|
2021-08-10T00:02:30.000Z
|
shapelet_features/utils/__init__.py
|
ranxiao/circEWS
|
1e52880c268f8f763bbc16763131634ffc217153
|
[
"MIT"
] | 14
|
2020-04-22T01:13:54.000Z
|
2021-11-27T20:23:41.000Z
|
# Do not REMOVE
| 8
| 15
| 0.6875
|
# Do not REMOVE
| 0
| 0
| 0
|
f706c175ce9374eac08af908306915c436098b21
| 239
|
py
|
Python
|
gym_simpleflappy/__init__.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | null | null | null |
gym_simpleflappy/__init__.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | null | null | null |
gym_simpleflappy/__init__.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | 1
|
2019-09-19T05:26:02.000Z
|
2019-09-19T05:26:02.000Z
|
from gym.envs.registration import register
register(
id='SimpleFlappy-v0',
entry_point='gym_simpleflappy.envs:FlappyEnv',
)
register(
id='SimpleFlappyDistance-v0',
entry_point='gym_simpleflappy.envs:FlappyEnvDistance',
)
| 19.916667
| 58
| 0.757322
|
from gym.envs.registration import register
register(
id='SimpleFlappy-v0',
entry_point='gym_simpleflappy.envs:FlappyEnv',
)
register(
id='SimpleFlappyDistance-v0',
entry_point='gym_simpleflappy.envs:FlappyEnvDistance',
)
| 0
| 0
| 0
|
96c6bc44c1be86be9f511ff65006a4d582768b84
| 10,154
|
py
|
Python
|
gym_bandits/scoreboard.py
|
ThomasLecat/gym-bandits-environments
|
adafed5952e00f1601e8a5294078cf7a2e83c836
|
[
"MIT"
] | 11
|
2018-06-10T18:20:26.000Z
|
2021-09-02T03:25:29.000Z
|
gym_bandits/scoreboard.py
|
ThomasLecat/gym-bandits-environments
|
adafed5952e00f1601e8a5294078cf7a2e83c836
|
[
"MIT"
] | null | null | null |
gym_bandits/scoreboard.py
|
ThomasLecat/gym-bandits-environments
|
adafed5952e00f1601e8a5294078cf7a2e83c836
|
[
"MIT"
] | 4
|
2019-05-07T17:41:26.000Z
|
2020-10-08T21:02:40.000Z
|
from gym.scoreboard.registration import add_task, add_group
add_group(
id='bandits',
name='Bandits',
description='Various N-Armed Bandit environments'
)
add_task(
id='BanditTwoArmedDeterministicFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Simplest bandit where one action always pays, and the other never does.",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1, 0]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTwoArmedHighHighFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are likely",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.9]
r_dist = [1, 1]
""",
background="Bandit B Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedLowLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are unlikley",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.1, 0.2]
r_dist = [1, 1]
""",
background="Bandit A Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedHighLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a large difference between which bandit pays out of two choices",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.2]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTenArmedGaussian-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit mentioned with reward based on a Gaussian distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1] (* 10)
r_dist = [numpy.random.normal(0, 1), 1] (* 10)
Every bandit always pays out
Each action has a reward mean (selected from a normal distribution with mean 0 and std 1), and the actual
reward returns is selected with a std of 1 around the selected mean
""",
background="Described on page 30 of Sutton and Barto's [Reinforcement Learning: An Introduction](https://www.dropbox.com/s/b3psxv2r0ccmf80/book2015oct.pdf?dl=0)"
)
add_task(
id='BanditTenArmedRandomRandom-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to both payouts and rewards",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.random.uniform(size=10)
Bandits have uniform probability of paying out and payout a reward of uniform probability
""",
background=""
)
add_task(
id='BanditTenArmedRandomFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to how often the action will provide a reward",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.full(bandits, 1)
Bandits have a uniform probability of rewarding and always reward 1
""",
background=""
)
add_task(
id='BanditTenArmedUniformDistributedReward-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with that always pays out with a reward selected from a uniform distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.full(bandits, 1)
r_dist = numpy.random.uniform(size=10)
Bandits always pay out. Reward is selected from uniform distribution
""",
background="Based on comparisons from http://sudeepraja.github.io/Bandits/"
)
add_task(
id='BanditTwoArmedIndependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Simple two independent armed bandit giving a reward of one with probabilities p_1 and p_2",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = np.random.uniform(2)
r_dist = [1, 1]
""",
background="For the first experience, called 'Bandit with independent arms' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0,1] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = np.random.uniform()
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentEasy-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.1,0.9] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.1,0,9][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentMedium-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.25,0.75] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.25,0,75][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentHard-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.4,0.6] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.4,0,6][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditEleveArmedWithIndex-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="11 armed bandit with deterministic payouts. \
Nine 'non-target' return a reward of 1.1, \
one 'target' returns a reward of 5, \
the 11th arm has reward = 0.1 * index of the target arm (ranging from 0.1 to 1.0)",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
index = np.random.randint(0,10)
p_dist = np.full(11,1)
r_dist = np.full(11,1.1)
r_dist[index] = 5
r_dist[-1] = 0.1*index
BanditEnv.__init__(self, p_dist = p_dist, r_dist = r_dist)
""",
background="For the experience called 'Bandits with dependent arms (II)' of https://arxiv.org/abs/1611.05763"
| 38.755725
| 168
| 0.701792
|
from gym.scoreboard.registration import add_task, add_group
add_group(
id='bandits',
name='Bandits',
description='Various N-Armed Bandit environments'
)
add_task(
id='BanditTwoArmedDeterministicFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Simplest bandit where one action always pays, and the other never does.",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1, 0]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTwoArmedHighHighFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are likely",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.9]
r_dist = [1, 1]
""",
background="Bandit B Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedLowLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are unlikley",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.1, 0.2]
r_dist = [1, 1]
""",
background="Bandit A Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedHighLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a large difference between which bandit pays out of two choices",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.2]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTenArmedGaussian-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit mentioned with reward based on a Gaussian distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1] (* 10)
r_dist = [numpy.random.normal(0, 1), 1] (* 10)
Every bandit always pays out
Each action has a reward mean (selected from a normal distribution with mean 0 and std 1), and the actual
reward returns is selected with a std of 1 around the selected mean
""",
background="Described on page 30 of Sutton and Barto's [Reinforcement Learning: An Introduction](https://www.dropbox.com/s/b3psxv2r0ccmf80/book2015oct.pdf?dl=0)"
)
add_task(
id='BanditTenArmedRandomRandom-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to both payouts and rewards",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.random.uniform(size=10)
Bandits have uniform probability of paying out and payout a reward of uniform probability
""",
background=""
)
add_task(
id='BanditTenArmedRandomFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to how often the action will provide a reward",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.full(bandits, 1)
Bandits have a uniform probability of rewarding and always reward 1
""",
background=""
)
add_task(
id='BanditTenArmedUniformDistributedReward-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with that always pays out with a reward selected from a uniform distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.full(bandits, 1)
r_dist = numpy.random.uniform(size=10)
Bandits always pay out. Reward is selected from uniform distribution
""",
background="Based on comparisons from http://sudeepraja.github.io/Bandits/"
)
add_task(
id='BanditTwoArmedIndependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Simple two independent armed bandit giving a reward of one with probabilities p_1 and p_2",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = np.random.uniform(2)
r_dist = [1, 1]
""",
background="For the first experience, called 'Bandit with independent arms' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0,1] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = np.random.uniform()
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentEasy-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.1,0.9] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.1,0,9][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentMedium-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.25,0.75] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.25,0,75][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentHard-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.4,0.6] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.4,0,6][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditEleveArmedWithIndex-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="11 armed bandit with deterministic payouts. \
Nine 'non-target' return a reward of 1.1, \
one 'target' returns a reward of 5, \
the 11th arm has reward = 0.1 * index of the target arm (ranging from 0.1 to 1.0)",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
index = np.random.randint(0,10)
p_dist = np.full(11,1)
r_dist = np.full(11,1.1)
r_dist[index] = 5
r_dist[-1] = 0.1*index
BanditEnv.__init__(self, p_dist = p_dist, r_dist = r_dist)
""",
background="For the experience called 'Bandits with dependent arms (II)' of https://arxiv.org/abs/1611.05763"
| 0
| 0
| 0
|
dbe5e2a057d822c9b7627e69b232caa21bca193c
| 5,368
|
py
|
Python
|
books/forms.py
|
burhan/hellowebbooks-website
|
96ca56f6d32716e5ce694664c760aa6a7bfce419
|
[
"MIT"
] | null | null | null |
books/forms.py
|
burhan/hellowebbooks-website
|
96ca56f6d32716e5ce694664c760aa6a7bfce419
|
[
"MIT"
] | null | null | null |
books/forms.py
|
burhan/hellowebbooks-website
|
96ca56f6d32716e5ce694664c760aa6a7bfce419
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.models import User
from books.widgets import NoNameTextInput
# TODO: Might be good to update this later to update the username too so we aren't doing two database saves
PRODUCTS = [
('ebook', 'eBook Only'),
('paperback', 'Paperback'),
('video', 'Video'),
]
| 47.087719
| 126
| 0.698398
|
from django import forms
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.models import User
from books.widgets import NoNameTextInput
# TODO: Might be good to update this later to update the username too so we aren't doing two database saves
class EditEmailForm(forms.ModelForm):
class Meta:
model = User
fields = ('email',)
def __init__(self, *args, **kwargs):
super(EditEmailForm, self).__init__(*args, **kwargs)
self.fields['email'].label = "Update your email address"
class AddEmailForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super(AddEmailForm, self).__init__(*args, **kwargs)
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['placeholder'] = 'buttercup@florin.com'
self.fields['password'].widget.attrs['class'] = 'form-control'
def clean_email(self):
email = self.data['email']
if "@" not in email:
raise forms.ValidationError("Please enter a valid email address.")
return email
class CardForm(forms.Form):
last_4_digits = forms.CharField(required=True, min_length=4, max_length=4, widget=forms.HiddenInput())
stripe_token = forms.CharField(required=True, widget=forms.HiddenInput())
coupon = forms.CharField(required=False, widget=forms.HiddenInput())
def addError(self, message):
self._errors[NON_FIELD_ERRORS] = self.error_class([message])
class StripePaymentForm(CardForm):
card_number = forms.CharField(required=False, max_length=20, widget=NoNameTextInput())
card_cvc = forms.CharField(required=False, max_length=4, widget=NoNameTextInput())
card_expiry_month = forms.CharField(required=False, max_length=2, widget=NoNameTextInput())
card_expiry_year = forms.CharField(required=False, max_length=4, widget=NoNameTextInput())
#card_address_zip = forms.CharField(required=False, max_length=10, widget=NoNameTextInput(attrs={'style':'width:100px'}))
coupon_code = forms.CharField(required=False, max_length=20, widget=NoNameTextInput())
def __init__(self, *args, **kwargs):
super(StripePaymentForm, self).__init__(*args, **kwargs)
self.fields['card_number'].label = "Credit card number:"
self.fields['card_number'].widget.attrs['autocompletetype'] = 'cc-number'
self.fields['card_number'].widget.attrs['class'] = 'form-control card-number'
self.fields['card_cvc'].label = "Credit card CVC:"
self.fields['card_cvc'].widget.attrs['autocomplete'] = 'off'
self.fields['card_cvc'].widget.attrs['autocompletetype'] = 'cc-csc'
self.fields['card_cvc'].widget.attrs['pattern'] = '\d*'
self.fields['card_cvc'].widget.attrs['class'] = 'form-control card-cvc'
self.fields['card_cvc'].widget.attrs['style'] = 'display:inline-block;width:80px'
self.fields['card_expiry_month'].widget.attrs['placeholder'] = 'MM'
self.fields['card_expiry_month'].widget.attrs['pattern'] = '\d*'
self.fields['card_expiry_month'].widget.attrs['class'] = 'form-control card-expiry-month'
self.fields['card_expiry_month'].widget.attrs['style'] = 'display:inline-block;width:63px'
self.fields['card_expiry_year'].widget.attrs['placeholder'] = 'YYYY'
self.fields['card_expiry_year'].widget.attrs['pattern'] = '\d*'
self.fields['card_expiry_year'].widget.attrs['class'] = 'form-control card-expiry-year'
self.fields['card_expiry_year'].widget.attrs['style'] = 'display:inline-block;width:76px'
self.fields['coupon_code'].label = "Coupon code (optional):"
self.fields['coupon_code'].widget.attrs['class'] = 'form-control coupon-code'
def clean_card_number(self):
card_number = self.cleaned_data['card_number'].replace("-","").replace(" ","")
return card_number
class MyAuthenticationForm(auth_forms.AuthenticationForm):
def __init__(self, request=None, *args, **kwargs):
super(MyAuthenticationForm, self).__init__(*args, **kwargs)
self.fields['username'].label = "Email"
def clean_username(self):
# use the email address to get the username for the account
email = self.cleaned_data.get('username')
username = email.replace("@", "").replace(".", "")
return username
PRODUCTS = [
('ebook', 'eBook Only'),
('paperback', 'Paperback'),
('video', 'Video'),
]
class AdminAddCustomerForm(forms.Form):
email = forms.EmailField()
hello_web_app = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
hello_web_design = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
class AdminAddCustomerBulkForm(forms.Form):
emails = forms.CharField(widget=forms.Textarea)
hello_web_app = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
hello_web_design = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
def __init__(self, *args, **kwargs):
super(AdminAddCustomerBulkForm, self).__init__(*args, **kwargs)
self.fields['emails'].label = "Comma delimited emails"
| 2,895
| 1,879
| 213
|
9cfd1b79359a086dbd7fc0769ab8bcefa649fbcf
| 5,944
|
py
|
Python
|
test_discretization/test_reduction_diff_by_class.py
|
wsgan001/AnomalyDetection
|
397673dc6ce978361a3fc6f2fd34879f69bc962a
|
[
"MIT"
] | null | null | null |
test_discretization/test_reduction_diff_by_class.py
|
wsgan001/AnomalyDetection
|
397673dc6ce978361a3fc6f2fd34879f69bc962a
|
[
"MIT"
] | null | null | null |
test_discretization/test_reduction_diff_by_class.py
|
wsgan001/AnomalyDetection
|
397673dc6ce978361a3fc6f2fd34879f69bc962a
|
[
"MIT"
] | 1
|
2020-03-16T21:50:52.000Z
|
2020-03-16T21:50:52.000Z
|
# -*- coding: utf-8 -*-
"""
It generates plots that shows similarity for anomalies in each dataset.
"""
import copy
import math
import numpy as np
import matplotlib
import matplotlib.mlab
import matplotlib.pyplot as plt
from matplotlib import gridspec
import nslkdd.preprocessing as preprocessing
import nslkdd.data.model as model
if __name__ == '__main__':
import time
start = time.time()
df_training_20, df_training_full, gmms_training_20, gmms_training_full = preprocessing.get_preprocessed_training_data()
df_test_plus, df_test_21, gmms_test_plus, gmms_test_21 = preprocessing.get_preprocessed_test_data()
generate_plots_for_df(df_training_20, gmms_training_20, "training20")
generate_plots_for_df(df_training_full, gmms_training_full, "trainingfull")
generate_plots_for_df(df_test_plus, gmms_test_plus, "testplus")
generate_plots_for_df(df_test_21, gmms_test_21, "test21")
| 33.965714
| 126
| 0.621803
|
# -*- coding: utf-8 -*-
"""
It generates plots that shows similarity for anomalies in each dataset.
"""
import copy
import math
import numpy as np
import matplotlib
import matplotlib.mlab
import matplotlib.pyplot as plt
from matplotlib import gridspec
import nslkdd.preprocessing as preprocessing
import nslkdd.data.model as model
def get_score(gmm, value):
minval = 1e+20
minidx = -1
# one of distribution
# or density of distributions
for mi, _ in enumerate(gmm.means_):
det = abs(mi - value)
m1 = gmm.means_[mi]
c1 = gmm.covars_[mi]
if minval > det :
minval = matplotlib.mlab.normpdf(value,m1,np.sqrt(c1))[0][0]
minval = minval*len(gmm.means_)
sums = 0
for mi, _ in enumerate(gmm.means_) :
m1 = gmm.means_[mi]
c1 = gmm.covars_[mi]
w1 = gmm.weights_[mi]
ys = matplotlib.mlab.normpdf(value,m1,np.sqrt(c1))[0]*w1
sums = sums + ys[0]
# if sums > minval :
# print "=== sums ==="
# else :
# print "=== minval ==="
# print minval
# print sums
score = max(sums, minval)
if score == 0:
score = 1e-20
# print "score : " + str(score)
score = math.log(score)
return score
def generate_plots(df_abnormal, df_normal, headers, gmms, title, path="", protcls_name=""):
proj = []
gmm_normals = gmms[0]
gmm_abnormals = gmms[1]
fig, ax = plt.subplots()
plt.subplot(2, 1, 1)
plt.title("normal scores")
plt.subplot(2, 1, 2)
plt.title("abnormal scores")
for di, d in df_normal.iterrows() :
# print str(di) + "/" + str(len(df_normal))
normal_score = 0
abnormal_score = 0
normal_scores = []
abnormal_scores = []
for hi, header in enumerate(headers) :
if header in ["protocol_type", "attack", "difficulty"] :
continue
val = d[header]
gmm_normal = gmm_normals[hi]
gmm_abnormal = gmm_abnormals[hi]
score = get_score(gmm_normal,val)
normal_scores.append(score)
score = get_score(gmm_abnormal,val)
abnormal_scores.append(score)
xs = range(len(headers))
plt.subplot(2, 1, 1)
plt.plot(xs,normal_scores,color='y', lw=3)
plt.subplot(2, 1, 2)
plt.plot(xs,abnormal_scores,color='y', lw=3)
for di, d in df_abnormal.iterrows() :
print str(di) + "/" + str(len(df_abnormal))
normal_score = 0
abnormal_score = 0
normal_scores = []
abnormal_scores = []
for hi, header in enumerate(headers) :
if header in ["protocol_type", "attack", "difficulty"] :
continue
val = d[header]
gmm_normal = gmm_normals[hi]
gmm_abnormal = gmm_abnormals[hi]
score = get_score(gmm_normal,val)
normal_scores.append(score)
score = get_score(gmm_abnormal,val)
abnormal_scores.append(score)
xs = range(len(headers))
plt.subplot(2, 1, 1)
plt.plot(xs,normal_scores,color='b', lw=1)
plt.subplot(2, 1, 2)
plt.plot(xs,abnormal_scores,color='b', lw=1)
# save and close
filename = "./plots/" + path + "/" + title + "_" + protcls_name + "_" + path + ".png"
print filename
fig.savefig(filename)
plt.close()
def generate_plots_for_df(df, gmms, path="") :
headers, _ = preprocessing.get_header_data()
headers.remove('protocol_type')
headers.remove('attack')
headers.remove('difficulty')
# plot for classes
protocol_types = model.protocol_types #["udp","tcp","icmp"]
for protocol_index, protocol_type in enumerate(protocol_types):
gmm_normals = gmms[0][protocol_index]
gmm_abnormals = gmms[1][protocol_index]
# normal data
df_normal = copy.deepcopy(df)
df_normal = df_normal[(df_normal["attack"] == 11)] # only select for 1 class
df_normal = df_normal[(df_normal["protocol_type"] == protocol_index)]
df_normal.drop('attack',1,inplace=True) # remove useless
df_normal.drop('difficulty',1,inplace=True) # remove useless
df_normal.drop('protocol_type',1,inplace=True)
df_normal.reset_index(drop=True)
df_normal = df_normal[0:10]
# abnormal data
for i, attack_type in enumerate(model.attack_types) :
if i == 11 :
continue
df_abnormal = copy.deepcopy(df)
df_abnormal = df_abnormal[(df_abnormal["attack"] == i)] # only select for 1 class
df_abnormal = df_abnormal[(df_abnormal["protocol_type"] == protocol_index)]
if 1 > len(df_abnormal) :
continue
df_abnormal.drop('attack',1,inplace=True) # remove useless
df_abnormal.drop('difficulty',1,inplace=True) # remove useless
df_abnormal.drop('protocol_type',1,inplace=True)
df_abnormal.reset_index(drop=True)
df_abnormal = df_abnormal[0:10]
gmm_normals_protcl = gmms[0][protocol_index]
gmm_abnormals_protcl = gmms[1][protocol_index]
gmms_protcl = [gmm_normals_protcl, gmm_abnormals_protcl]
generate_plots(df_abnormal, df_normal, headers, gmms_protcl, attack_type, path=path, protcls_name = protocol_type)
if __name__ == '__main__':
import time
start = time.time()
df_training_20, df_training_full, gmms_training_20, gmms_training_full = preprocessing.get_preprocessed_training_data()
df_test_plus, df_test_21, gmms_test_plus, gmms_test_21 = preprocessing.get_preprocessed_test_data()
generate_plots_for_df(df_training_20, gmms_training_20, "training20")
generate_plots_for_df(df_training_full, gmms_training_full, "trainingfull")
generate_plots_for_df(df_test_plus, gmms_test_plus, "testplus")
generate_plots_for_df(df_test_21, gmms_test_21, "test21")
| 4,959
| 0
| 69
|
dfa8e89860cca2eb77dbf1ea4151fd82a146dc80
| 7,277
|
py
|
Python
|
src/tests/test_aligngraph.py
|
dthadi3/pbdagcon
|
c14c422e609a914f0139f7222202ac1bce7e3ef1
|
[
"BSD-3-Clause-Clear"
] | 31
|
2015-02-24T19:17:22.000Z
|
2022-02-23T18:45:36.000Z
|
src/tests/test_aligngraph.py
|
dthadi3/pbdagcon
|
c14c422e609a914f0139f7222202ac1bce7e3ef1
|
[
"BSD-3-Clause-Clear"
] | 44
|
2015-06-04T00:03:39.000Z
|
2018-04-27T05:16:59.000Z
|
src/tests/test_aligngraph.py
|
dthadi3/pbdagcon
|
c14c422e609a914f0139f7222202ac1bce7e3ef1
|
[
"BSD-3-Clause-Clear"
] | 31
|
2015-01-26T09:59:29.000Z
|
2022-02-23T18:45:40.000Z
|
from nose.tools import assert_equal
from nose import SkipTest
import random
from pbtools.pbdagcon.aligngraph import *
| 35.497561
| 86
| 0.615776
|
from nose.tools import assert_equal
from nose import SkipTest
import random
from pbtools.pbdagcon.aligngraph import *
def generate_simulated_reads(pi=None, pd=None, n = 4):
import random
random.seed(42)
seq = "ATATTTGGC"
seq1 = "ATAGCCGGC"
seq2 = "ATACCCGGC"
seq3 = "ATATCCGGC"
seq4 = "ATATCGGC"
if pi == None:
pi = 0.03
if pd == None:
pd = 0.03
out_seq = []
for i in range(n):
c = 0
s = []
if i % 4 == 0:
ss = seq1
elif i % 4 == 1:
ss = seq2
elif i % 4 == 2:
ss = seq3
else:
ss = seq4
while 1:
if random.uniform(0,1) < pi:
s.append(random.choice( ("A","G","C","T") ) )
continue
if random.uniform(0,1) < pd:
c += 1
continue
if c < len(ss):
s.append(ss[c])
c += 1
else:
break
out_seq.append( "".join(s) )
return seq, out_seq
class TestPhiCoeff:
def test_phi_coeff(self):
# assert_equal(expected, phi_coeff(xvec, yvec))
raise SkipTest # TODO: implement your test here
class TestConvertMismatches:
def test_convert_mismatches(self):
assert_equal( ('C-AC', 'CG-C'), convert_mismatches("CAC","CGC") )
assert_equal( ('CAACAT', 'CAA--T'), convert_mismatches("CAACAT","C-A-AT" ) )
assert_equal( ('CCG--T', 'CCGACT'), convert_mismatches("-C--CGT","CCGAC-T") )
class TestAlnEdge:
def test___init__(self):
# aln_edge = AlnEdge(in_node, out_node)
raise SkipTest # TODO: implement your test here
def test___repr__(self):
# aln_edge = AlnEdge(in_node, out_node)
# assert_equal(expected, aln_edge.__repr__())
raise SkipTest # TODO: implement your test here
def test_add_to_score(self):
# aln_edge = AlnEdge(in_node, out_node)
# assert_equal(expected, aln_edge.add_to_score(s))
raise SkipTest # TODO: implement your test here
def test_increase_count(self):
# aln_edge = AlnEdge(in_node, out_node)
# assert_equal(expected, aln_edge.increase_count())
raise SkipTest # TODO: implement your test here
def test_set_score(self):
# aln_edge = AlnEdge(in_node, out_node)
# assert_equal(expected, aln_edge.set_score(s))
raise SkipTest # TODO: implement your test here
class TestAlnNode:
def test___init__(self):
# aln_node = AlnNode(base)
raise SkipTest # TODO: implement your test here
def test___repr__(self):
# aln_node = AlnNode(base)
# assert_equal(expected, aln_node.__repr__())
raise SkipTest # TODO: implement your test here
def test_add_in_edge(self):
# aln_node = AlnNode(base)
# assert_equal(expected, aln_node.add_in_edge(in_edge))
raise SkipTest # TODO: implement your test here
def test_addout_edge(self):
# aln_node = AlnNode(base)
# assert_equal(expected, aln_node.addout_edge(out_edge))
raise SkipTest # TODO: implement your test here
def test_increase_weight(self):
# aln_node = AlnNode(base)
# assert_equal(expected, aln_node.increase_weight(w))
raise SkipTest # TODO: implement your test here
class TestAlnGraph:
def test___init__(self):
backbone_seq, reads = generate_simulated_reads()
aln_graph = AlnGraph(backbone_seq)
assert len(aln_graph.nodes) == len(backbone_seq) + 2
def test_add_alignment(self):
aln_graph = AlnGraph("ATATTAGGC")
alns = [((0, 9, 'A-TAGCCGGC'), (2, 9, 'ATTA---GGC')),
((0, 10, 'ATA-TACCGAG-'), (0, 9, 'ATATTA--G-GC')),
((0, 10, 'ATCATCC--GGC'), (0, 9, 'AT-AT--TAGGC')),
((0, 9, 'ATA-TACGGC'), (0, 9, 'ATATTA-GGC'))]
for aln in alns:
aln_graph.add_alignment( aln )
assert len(aln_graph.nodes) != 0
assert len(aln_graph.edges) != 0
def test_add_edge(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.add_edge(edge))
raise SkipTest # TODO: implement your test here
def test_add_node(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.add_node(node))
raise SkipTest # TODO: implement your test here
def test_delete_edge(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.delete_edge(edge))
raise SkipTest # TODO: implement your test here
def test_delete_node(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.delete_node(node))
raise SkipTest # TODO: implement your test here
def test_find_best_path(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.find_best_path())
raise SkipTest # TODO: implement your test here
def test_generate_consensus(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.generate_consensus())
raise SkipTest # TODO: implement your test here
def test_merge_in_nodes(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.merge_in_nodes(nodes, node))
raise SkipTest # TODO: implement your test here
def test_merge_nodes(self):
aln_graph = AlnGraph("ATAATTGGC")
alns = [((0, 9, 'ATAG--CTGGC'), (0, 9, 'ATA-AT-TGGC')),
((0, 9, 'ATAG--CTGGC'), (0, 9, 'ATA-AT-TGGC')),
((0, 9, 'ATAG-TTGGC'), (0, 9, 'ATA-ATTGGC')),
((0, 9, 'ATAG-TTGGC'), (0, 9, 'ATA-ATTGGC')),
((0, 9, 'ATAG--CTGGC'), (0, 9, 'ATA-AT-TGGC'))]
for aln in alns:
aln_graph.add_alignment( aln )
aln_graph.merge_nodes()
def test_merge_out_nodes(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.merge_out_nodes(node, nodes))
raise SkipTest # TODO: implement your test here
def test_output_consensus_fasta(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.output_consensus_fasta(fn, rID))
raise SkipTest # TODO: implement your test here
def test_track_path(self):
# aln_graph = AlnGraph(backbone_seq)
# assert_equal(expected, aln_graph.track_path(seq, node))
raise SkipTest # TODO: implement your test here
class TestOutputDot:
def test_output_dot(self):
# assert_equal(expected, output_dot(aln_graph, fn, r))
raise SkipTest # TODO: implement your test here
class TestOutputDot2:
def test_output_dot_2(self):
# assert_equal(expected, output_dot_2(aln_graph, fn))
raise SkipTest # TODO: implement your test here
class TestGenerateSimulatedReads:
def test_generate_simulated_reads(self):
# assert_equal(expected, generate_simulated_reads())
raise SkipTest # TODO: implement your test here
class TestSimpleTest:
def test_simple_test(self):
# assert_equal(expected, simple_test())
raise SkipTest # TODO: implement your test here
| 6,144
| 8
| 1,005
|
d8a9c2251631c9b59e6ef4e09bf81e4fe06fc445
| 2,005
|
py
|
Python
|
ggongsul/visitation/serializers.py
|
blc-cruise/ggongsul-api
|
0cdfc09ea75688ffc297bc0c0f08897091896f3e
|
[
"MIT"
] | 2
|
2021-05-22T07:33:34.000Z
|
2021-09-18T04:22:25.000Z
|
ggongsul/visitation/serializers.py
|
blc-cruise/ggongsul-api
|
0cdfc09ea75688ffc297bc0c0f08897091896f3e
|
[
"MIT"
] | null | null | null |
ggongsul/visitation/serializers.py
|
blc-cruise/ggongsul-api
|
0cdfc09ea75688ffc297bc0c0f08897091896f3e
|
[
"MIT"
] | null | null | null |
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from ggongsul.member.models import Member
from ggongsul.member.serializers import MemberSerializer
from ggongsul.partner.models import Partner
from ggongsul.partner.serializers import PartnerShortInfoSerializer
from ggongsul.visitation.models import Visitation
| 33.416667
| 82
| 0.672319
|
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from ggongsul.member.models import Member
from ggongsul.member.serializers import MemberSerializer
from ggongsul.partner.models import Partner
from ggongsul.partner.serializers import PartnerShortInfoSerializer
from ggongsul.visitation.models import Visitation
class VisitationSerializer(serializers.ModelSerializer):
cert_num = serializers.CharField(
required=True, allow_null=False, max_length=10, write_only=True
)
def validate(self, attrs: dict):
attrs["member"] = self.context["request"].user
member: Member = attrs["member"]
# partner password validation
partner: Partner = attrs["partner"]
if not partner.is_active:
raise ValidationError(_("활성화 되지 않은 업체입니다."))
cert_num = attrs["cert_num"]
if partner.cert_num != cert_num:
raise ValidationError(_("인증번호가 일치하지 않습니다."))
del attrs["cert_num"]
# 한 업체는 하루에 한번만 인증 할 수 있음
today_datetime = timezone.now().replace(
hour=0, minute=0, second=0, microsecond=0
)
if member.visitations.filter(
partner=partner, created_on__gt=today_datetime
).exists():
raise ValidationError(_("오늘 이미 방문한 업체입니다."))
return attrs
class Meta:
model = Visitation
fields = ["id", "partner", "member", "cert_num"]
extra_kwargs = {
"partner": {"required": True, "allow_null": False},
"member": {"required": True, "allow_null": False, "write_only": True},
}
class VisitationInfoSerializer(serializers.ModelSerializer):
member = MemberSerializer(read_only=True)
partner = PartnerShortInfoSerializer(read_only=True)
class Meta:
model = Visitation
fields = ["id", "member", "partner", "is_reviewed", "created_on"]
| 897
| 721
| 46
|
84d6e4d646bae0fc1e7b329a0d2484ed91b465ac
| 6,596
|
py
|
Python
|
app/main.py
|
Ackaman/starter-snake-python
|
450d24c72b9f3af6bffaef2369913bd4d827acf2
|
[
"MIT"
] | null | null | null |
app/main.py
|
Ackaman/starter-snake-python
|
450d24c72b9f3af6bffaef2369913bd4d827acf2
|
[
"MIT"
] | null | null | null |
app/main.py
|
Ackaman/starter-snake-python
|
450d24c72b9f3af6bffaef2369913bd4d827acf2
|
[
"MIT"
] | null | null | null |
import json
import os
import random
import bottle
from api import ping_response, start_response, move_response, end_response
# Moving towards a tail is safe as long as that snake does not have food witihn reach.
# If it is te only possible move, that move should be made anyway
@bottle.route('/')
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
@bottle.post('/move')
# int x,y or tuple (NEXT STEP)
##Only looks for dead end
##def snake_head_area(snake_heads, my_head):
## avoid_heads = []
## snake_heads1 = snake_heads
## snake_heads1.remove(my_head)
##
## for heads in snake_heads1:
## avoid_heads.append((heads[0]+1, heads[1]))
## avoid_heads.append((heads[0] - 1, heads[1]))
## avoid_heads.append((heads[0], heads[1] + 1))
## avoid_heads.append((heads[0], heads[1] - 1))
##
## return avoid_heads
# def safetyLevel(x,y, stuffToAvoid):
@bottle.post('/end')
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
| 27.831224
| 111
| 0.626289
|
import json
import os
import random
import bottle
from api import ping_response, start_response, move_response, end_response
# Moving towards a tail is safe as long as that snake does not have food witihn reach.
# If it is te only possible move, that move should be made anyway
@bottle.route('/')
def index():
return '''
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.io">https://docs.battlesnake.io</a>.
'''
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
def start():
data = bottle.request.json
"""
TODO: If you intend to have a stateful snake AI,
initialize your snake state here using the
request's data if necessary.
"""
print(json.dumps(data))
color = "#00FF00"
return start_response(color)
@bottle.post('/move')
def move():
data = bottle.request.json
foodposition = []
for food in data['food']['data']:
foodposition.append((food['x'], food['y']))
my_head = (data['you']['body']['data'][0]['x'], data['you']['body']['data'][0]['y'])
my_length = len((data['you']['body']['data']))
snakePositions = []
myPositions = []
for pos in data['you']['body']['data']:
myPositions.append((pos['x'], pos['y']))
snake_heads = []
for snakes in data['snakes']['data']: ## alla ormar
x = snakes['body']['data'][0]['x']
y = snakes['body']['data'][0]['y']
snake_heads.append((x, y))
for pos in snakes['body']['data']: ## alla ormens positioner
snakePositions.append((pos['x'], pos['y']))
snake_heads.remove(my_head)
snake_head_area = []
for snake_head in snake_heads:
snake_head_area.append((snake_head[0]-1, snake_head[1]))
snake_head_area.append((snake_head[0]+1, snake_head[1]))
snake_head_area.append((snake_head[0], snake_head[1]+1))
snake_head_area.append((snake_head[0], snake_head[1]-1))
walls = []
width = data['height']
for i in range(width + 1):
walls.append((0 - 1, i))
walls.append((i, 0 - 1))
walls.append((width, i))
walls.append((i, width))
stuffToAvoid = []
for position in myPositions:
stuffToAvoid.append(position)
for position in walls:
stuffToAvoid.append(position)
for position in snakePositions:
stuffToAvoid.append(position)
xhead = my_head[0]
yhead = my_head[1]
possiblemoves = []
if (xhead + 1, yhead) not in stuffToAvoid and safe_path(xhead + 1, yhead, stuffToAvoid):
possiblemoves.append('right')
if (xhead, yhead + 1) not in stuffToAvoid and safe_path(xhead, yhead + 1, stuffToAvoid):
possiblemoves.append('down')
if (xhead - 1, yhead) not in stuffToAvoid and safe_path(xhead - 1, yhead, stuffToAvoid):
possiblemoves.append('left')
if (xhead, yhead - 1) not in stuffToAvoid and safe_path(xhead, yhead - 1, stuffToAvoid):
possiblemoves.append('up')
##Find closest food
currentDist = 1000000
for i in foodposition:
xfood = i[0]
yfood = i[1]
dist = ((abs(xhead - xfood)) + (abs(yhead - yfood)))
if (dist < currentDist):
closestFoodPos = (xfood, yfood)
currentDist = dist
xdistancetofood = abs(xhead - closestFoodPos[0])
ydistancetofood = abs(yhead - closestFoodPos[1])
# foodtotheright = ((xhead - closestFoodPos[0]) < 0)
# foodtothetop = ((yhead - closestFoodPos[1]) > 0)
prioritymoves = []
if (xdistancetofood >= ydistancetofood) and ((xhead - closestFoodPos[0]) < 0) and 'right' in possiblemoves:
prioritymoves.append('right')
if (xdistancetofood >= ydistancetofood) and ((xhead - closestFoodPos[0]) > 0) and 'left' in possiblemoves:
prioritymoves.append('left')
if (ydistancetofood >= xdistancetofood) and ((yhead - closestFoodPos[1]) > 0) and 'up' in possiblemoves:
prioritymoves.append('up')
if (ydistancetofood >= xdistancetofood) and ((yhead - closestFoodPos[1]) < 0) and 'down' in possiblemoves:
prioritymoves.append('down')
if (xhead + 1, yhead) in snake_head_area and 'right' in prioritymoves:
prioritymoves.remove('right')
# prioritymoves.append('right')
if (xhead - 1, yhead) in snake_head_area and 'left' in prioritymoves:
prioritymoves.remove('left')
# prioritymoves.append('left')
if (xhead, yhead + 1) in snake_head_area and 'down' in prioritymoves:
prioritymoves.remove('down')
# prioritymoves.append('down')
if (xhead, yhead - 1) in snake_head_area and 'up' in prioritymoves:
prioritymoves.remove('up')
# prioritymoves.append('up')
prioritymoves.append(random.choice(possiblemoves))
direction = prioritymoves[0]
return move_response(direction)
# int x,y or tuple (NEXT STEP)
##Only looks for dead end
def safe_path(x, y, stuffToAvoid):
right = (x + 1, y)
left = (x - 1, y)
down = (x, y + 1)
up = (x, y - 1)
if right in stuffToAvoid and left in stuffToAvoid and down in stuffToAvoid and up in stuffToAvoid:
safe = False
else:
safe = True
return safe
##def snake_head_area(snake_heads, my_head):
## avoid_heads = []
## snake_heads1 = snake_heads
## snake_heads1.remove(my_head)
##
## for heads in snake_heads1:
## avoid_heads.append((heads[0]+1, heads[1]))
## avoid_heads.append((heads[0] - 1, heads[1]))
## avoid_heads.append((heads[0], heads[1] + 1))
## avoid_heads.append((heads[0], heads[1] - 1))
##
## return avoid_heads
# def safetyLevel(x,y, stuffToAvoid):
@bottle.post('/end')
def end():
data = bottle.request.json
"""
TODO: If your snake AI was stateful,
clean up any stateful objects here.
"""
print(json.dumps(data))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
| 4,830
| 0
| 110
|
83b39d103baf95f5e28b9da6371e0b29b75f4428
| 17,696
|
py
|
Python
|
pygsti/report/report.py
|
drewrisinger/pyGSTi
|
dd4ad669931c7f75e026456470cf33ac5b682d0d
|
[
"Apache-2.0"
] | 1
|
2021-12-19T15:11:09.000Z
|
2021-12-19T15:11:09.000Z
|
pygsti/report/report.py
|
drewrisinger/pyGSTi
|
dd4ad669931c7f75e026456470cf33ac5b682d0d
|
[
"Apache-2.0"
] | null | null | null |
pygsti/report/report.py
|
drewrisinger/pyGSTi
|
dd4ad669931c7f75e026456470cf33ac5b682d0d
|
[
"Apache-2.0"
] | null | null | null |
""" Internal model of a report during generation """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import time as _time
import warnings as _warnings
from pathlib import Path as _Path
import shutil as _shutil
from collections import defaultdict as _defaultdict, OrderedDict as _OrderedDict
import pickle as _pickle
from . import autotitle as _autotitle
from . import merge_helpers as _merge
from .. import _version, tools as _tools
from ..objects import VerbosityPrinter as _VerbosityPrinter, ExplicitOpModel as _ExplicitOpModel
from . import workspace as _ws
from .notebook import Notebook as _Notebook
# TODO this whole thing needs to be rewritten with different reports as derived classes
class Report:
""" The internal model of a report.
This class should never be instantiated directly. Instead, users
should use the appropriate factory method in
`pygsti.report.factory`.
"""
def _build(self, build_options=None):
""" Render all sections to a map of report elements for templating """
full_params = {
'results': self._results,
**self._report_params
}
full_params.update(self._build_defaults)
full_params.update(build_options or {})
qtys = self._global_qtys.copy()
for section in self._sections:
qtys.update(section.render(self._workspace, **full_params))
return qtys
def write_html(self, path, auto_open=False, link_to=None,
connected=False, build_options=None, brevity=0,
precision=None, resizable=True, autosize='initial',
single_file=False, verbosity=0):
""" Write this report to the disk as a collection of HTML documents.
Parameters
----------
path : str or path-like object
The filesystem path of a directory to write the report
to. If the specified directory does not exist, it will be
created automatically
auto_open : bool, optional
Whether the output file should be automatically opened in a web browser.
link_to : list, optional
If not None, a list of one or more items from the set
{"tex", "pdf", "pkl"} indicating whether or not to
create and include links to Latex, PDF, and Python pickle
files, respectively.
connected : bool, optional
Whether output HTML should assume an active internet connection. If
True, then the resulting HTML file size will be reduced because it
will link to web resources (e.g. CDN libraries) instead of embedding
them.
build_options : dict
Dict of options for building plots. Expected values are
defined during construction of this report object.
brevity : int, optional
Amount of detail to include in the report. Larger values mean smaller
"more briefr" reports, which reduce generation time, load time, and
disk space consumption. In particular:
- 1: Plots showing per-sequences quantities disappear at brevity=1
- 2: Reference sections disappear at brevity=2
- 3: Germ-level estimate tables disappear at brevity=3
- 4: Everything but summary figures disappears at brevity=4
precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
resizable : bool, optional
Whether plots and tables are made with resize handles and can be
resized within the report.
autosize : {'none', 'initial', 'continual'}
Whether tables and plots should be resized, either initially --
i.e. just upon first rendering (`"initial"`) -- or whenever
the browser window is resized (`"continual"`).
single_file : bool, optional
If true, the report will be written to a single HTML
document, with external dependencies baked-in. This mode
is not recommended for large reports, because this file
can grow large enough that major web browsers may struggle
to render it.
verbosity : int, optional
Amount of detail to print to stdout.
"""
build_options = build_options or {}
toggles = _defaultdict(lambda: False)
toggles.update(
{k: True for k in self._flags}
)
for k in range(brevity, 4):
toggles['BrevityLT' + str(k + 1)] = True
# Render sections
qtys = self._build(build_options)
# TODO this really should be a parameter of this method
embed_figures = self._report_params.get('embed_figures', True)
if single_file:
assert(embed_figures), \
"Single-file mode requires `embed_figures` to be True"
_merge.merge_jinja_template(
qtys, path, templateDir=self._templates['html'],
auto_open=auto_open, precision=precision,
link_to=link_to, connected=connected, toggles=toggles,
renderMath=True, resizable=resizable,
autosize=autosize, verbosity=verbosity
)
else:
_merge.merge_jinja_template_dir(
qtys, path, templateDir=self._templates['html'],
auto_open=auto_open, precision=precision,
link_to=link_to, connected=connected, toggles=toggles,
renderMath=True, resizable=resizable,
autosize=autosize, embed_figures=embed_figures,
verbosity=verbosity
)
def write_notebook(self, path, auto_open=False, connected=False, verbosity=0):
""" Write this report to the disk as an IPython notebook
A notebook report allows the user to interact more flexibly with the data
underlying the figures, and to easily generate customized variants on the
figures. As such, this type of report will be most useful for experts
who want to tinker with the standard analysis presented in the static
HTML or LaTeX format reports.
Parameters
----------
path : str or path-like object
The filesystem path to write the report to. By convention,
this should use the `.ipynb` file extension.
auto_open : bool, optional
If True, automatically open the report in a web browser after it
has been generated.
connected : bool, optional
Whether output notebook should assume an active internet connection. If
True, then the resulting file size will be reduced because it will link
to web resources (e.g. CDN libraries) instead of embedding them.
verbosity : int, optional
How much detail to send to stdout.
"""
# TODO this only applies to standard reports; rewrite generally
title = self._global_qtys['title']
confidenceLevel = self._report_params['confidence_level']
path = _Path(path)
printer = _VerbosityPrinter.build_printer(verbosity)
templatePath = _Path(__file__).parent / 'templates' / self._templates['notebook']
outputDir = path.parent
#Copy offline directory into position
if not connected:
_merge.rsync_offline_dir(outputDir)
#Save results to file
# basename = _os.path.splitext(_os.path.basename(filename))[0]
basename = path.stem
results_file_base = basename + '_results.pkl'
results_file = outputDir / results_file_base
with open(str(results_file), 'wb') as f:
_pickle.dump(self._results, f)
nb = _Notebook()
nb.add_markdown('# {title}\n(Created on {date})'.format(
title=title, date=_time.strftime("%B %d, %Y")))
nb.add_code("""\
import pickle
import pygsti""")
dsKeys = list(self._results.keys())
results = self._results[dsKeys[0]]
#Note: `results` is always a single Results obj from here down
nb.add_code("""\
#Load results dictionary
with open('{infile}', 'rb') as infile:
results_dict = pickle.load(infile)
print("Available dataset keys: ", ', '.join(results_dict.keys()))\
""".format(infile=results_file_base))
nb.add_code("""\
#Set which dataset should be used below
results = results_dict['{dsKey}']
print("Available estimates: ", ', '.join(results.estimates.keys()))\
""".format(dsKey=dsKeys[0]))
estLabels = list(results.estimates.keys())
estimate = results.estimates[estLabels[0]]
nb.add_code("""\
#Set which estimate is to be used below
estimate = results.estimates['{estLabel}']
print("Available gauge opts: ", ', '.join(estimate.goparameters.keys()))\
""".format(estLabel=estLabels[0]))
goLabels = list(estimate.goparameters.keys())
nb.add_code("""\
gopt = '{goLabel}'
ds = results.dataset
gssFinal = results.circuit_structs['final']
Ls = results.circuit_structs['final'].Ls
gssPerIter = results.circuit_structs['iteration'] #ALL_L
prepStrs = results.circuit_lists['prep fiducials']
effectStrs = results.circuit_lists['meas fiducials']
germs = results.circuit_lists['germs']
strs = (prepStrs, effectStrs)
params = estimate.parameters
objective = estimate.parameters['objective']
if objective == "logl":
mpc = estimate.parameters['minProbClip']
else:
mpc = estimate.parameters['minProbClipForWeighting']
clifford_compilation = estimate.parameters.get('clifford_compilation',None)
effective_ds, scale_subMxs = estimate.get_effective_dataset(True)
scaledSubMxsDict = {{'scaling': scale_subMxs, 'scaling.colormap': "revseq"}}
models = estimate.models
mdl = models[gopt] #FINAL
mdl_final = models['final iteration estimate'] #ITER
target_model = models['target']
mdlPerIter = models['iteration estimates']
mdl_eigenspace_projected = pygsti.tools.project_to_target_eigenspace(mdl, target_model)
goparams = estimate.goparameters[gopt]
confidenceLevel = {CL}
if confidenceLevel is None:
cri = None
else:
crfactory = estimate.get_confidence_region_factory(gopt)
region_type = "normal" if confidenceLevel >= 0 else "non-markovian"
cri = crfactory.view(abs(confidenceLevel), region_type)\
""".format(goLabel=goLabels[0], CL=confidenceLevel))
nb.add_code("""\
from pygsti.report import Workspace
ws = Workspace()
ws.init_notebook_mode(connected={conn}, autodisplay=True)\
""".format(conn=str(connected)))
nb.add_notebook_text_files([
templatePath / 'summary.txt',
templatePath / 'goodness.txt',
templatePath / 'gauge_invariant.txt',
templatePath / 'gauge_variant.txt'])
#Insert multi-dataset specific analysis
if len(dsKeys) > 1:
nb.add_markdown(('# Dataset comparisons\n'
'This report contains information for more than one data set.'
'This page shows comparisons between different data sets.'))
nb.add_code("""\
dslbl1 = '{dsLbl1}'
dslbl2 = '{dsLbl2}'
dscmp_gss = results_dict[dslbl1].circuit_structs['final']
ds1 = results_dict[dslbl1].dataset
ds2 = results_dict[dslbl2].dataset
dscmp = pygsti.obj.DataComparator([ds1, ds2], DS_names=[dslbl1, dslbl2])
""".format(dsLbl1=dsKeys[0], dsLbl2=dsKeys[1]))
nb.add_notebook_text_files([
templatePath / 'data_comparison.txt'])
#Add reference material
nb.add_notebook_text_files([
templatePath / 'input.txt',
templatePath / 'meta.txt'])
printer.log("Report Notebook created as %s" % path)
if auto_open:
port = "auto" if auto_open is True else int(auto_open)
nb.launch(str(path), port=port)
else:
nb.save_to(str(path))
def write_pdf(self, path, latex_cmd='pdflatex', latex_flags=None,
build_options=None,
brevity=0, precision=None, auto_open=False,
comm=None, verbosity=0):
""" Write this report to the disk as a PDF document.
Parameters
----------
path : str or path-like object
The filesystem path to write the report to. By convention,
this should use the `.pdf` file extension.
latex_cmd : str, optional
Shell command to run to compile a PDF document from the
generated LaTeX source.
latex_flags : [str], optional
List of flags to pass when calling `latex_cmd`.
build_options : dict
Dict of options for building plots. Expected values are
defined during construction of this report object.
brevity : int, optional
Amount of detail to include in the report. Larger values mean smaller
"more briefr" reports, which reduce generation time, load time, and
disk space consumption. In particular:
- 1: Plots showing per-sequences quantities disappear at brevity=1
- 2: Reference sections disappear at brevity=2
- 3: Germ-level estimate tables disappear at brevity=3
- 4: Everything but summary figures disappears at brevity=4
precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
auto_open : bool, optional
Whether the output file should be automatically opened in a web browser.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
verbosity : int, optional
Amount of detail to print to stdout.
"""
if not self._pdf_available:
raise ValueError(("PDF output unavailable. (Usually this is because this report"
" has multiple gauge optimizations and/or datasets.)"))
toggles = _defaultdict(lambda: False)
toggles.update(
{k: True for k in self._flags}
)
for k in range(brevity, 4):
toggles['BrevityLT' + str(k + 1)] = True
printer = _VerbosityPrinter.build_printer(verbosity, comm=comm)
path = _Path(path)
latex_flags = latex_flags or ["-interaction=nonstopmode", "-halt-on-error", "-shell-escape"]
# Render sections
qtys = self._build(build_options)
# TODO: filter while generating plots to remove need for sanitization
qtys = {k: v for k, v in qtys.items()
if not(isinstance(v, _ws.Switchboard) or isinstance(v, _ws.SwitchboardView))}
printer.log("Generating LaTeX source...")
_merge.merge_latex_template(
qtys, self._templates['pdf'], str(path.with_suffix('.tex')),
toggles, precision, printer
)
printer.log("Compiling with `{} {}`".format(latex_cmd, ' '.join(latex_flags)))
_merge.compile_latex_report(str(path.parent / path.stem), [latex_cmd] + latex_flags, printer, auto_open)
| 42.640964
| 112
| 0.61415
|
""" Internal model of a report during generation """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import time as _time
import warnings as _warnings
from pathlib import Path as _Path
import shutil as _shutil
from collections import defaultdict as _defaultdict, OrderedDict as _OrderedDict
import pickle as _pickle
from . import autotitle as _autotitle
from . import merge_helpers as _merge
from .. import _version, tools as _tools
from ..objects import VerbosityPrinter as _VerbosityPrinter, ExplicitOpModel as _ExplicitOpModel
from . import workspace as _ws
from .notebook import Notebook as _Notebook
# TODO this whole thing needs to be rewritten with different reports as derived classes
class Report:
""" The internal model of a report.
This class should never be instantiated directly. Instead, users
should use the appropriate factory method in
`pygsti.report.factory`.
"""
def __init__(self, templates, results, sections, flags,
global_qtys, report_params, build_defaults=None,
pdf_available=True, workspace=None):
self._templates = templates
self._results = results
self._sections = sections
self._flags = flags
self._global_qtys = global_qtys
self._report_params = report_params
self._workspace = workspace or _ws.Workspace()
self._build_defaults = build_defaults or {}
self._pdf_available = pdf_available
def _build(self, build_options=None):
""" Render all sections to a map of report elements for templating """
full_params = {
'results': self._results,
**self._report_params
}
full_params.update(self._build_defaults)
full_params.update(build_options or {})
qtys = self._global_qtys.copy()
for section in self._sections:
qtys.update(section.render(self._workspace, **full_params))
return qtys
def write_html(self, path, auto_open=False, link_to=None,
connected=False, build_options=None, brevity=0,
precision=None, resizable=True, autosize='initial',
single_file=False, verbosity=0):
""" Write this report to the disk as a collection of HTML documents.
Parameters
----------
path : str or path-like object
The filesystem path of a directory to write the report
to. If the specified directory does not exist, it will be
created automatically
auto_open : bool, optional
Whether the output file should be automatically opened in a web browser.
link_to : list, optional
If not None, a list of one or more items from the set
{"tex", "pdf", "pkl"} indicating whether or not to
create and include links to Latex, PDF, and Python pickle
files, respectively.
connected : bool, optional
Whether output HTML should assume an active internet connection. If
True, then the resulting HTML file size will be reduced because it
will link to web resources (e.g. CDN libraries) instead of embedding
them.
build_options : dict
Dict of options for building plots. Expected values are
defined during construction of this report object.
brevity : int, optional
Amount of detail to include in the report. Larger values mean smaller
"more briefr" reports, which reduce generation time, load time, and
disk space consumption. In particular:
- 1: Plots showing per-sequences quantities disappear at brevity=1
- 2: Reference sections disappear at brevity=2
- 3: Germ-level estimate tables disappear at brevity=3
- 4: Everything but summary figures disappears at brevity=4
precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
resizable : bool, optional
Whether plots and tables are made with resize handles and can be
resized within the report.
autosize : {'none', 'initial', 'continual'}
Whether tables and plots should be resized, either initially --
i.e. just upon first rendering (`"initial"`) -- or whenever
the browser window is resized (`"continual"`).
single_file : bool, optional
If true, the report will be written to a single HTML
document, with external dependencies baked-in. This mode
is not recommended for large reports, because this file
can grow large enough that major web browsers may struggle
to render it.
verbosity : int, optional
Amount of detail to print to stdout.
"""
build_options = build_options or {}
toggles = _defaultdict(lambda: False)
toggles.update(
{k: True for k in self._flags}
)
for k in range(brevity, 4):
toggles['BrevityLT' + str(k + 1)] = True
# Render sections
qtys = self._build(build_options)
# TODO this really should be a parameter of this method
embed_figures = self._report_params.get('embed_figures', True)
if single_file:
assert(embed_figures), \
"Single-file mode requires `embed_figures` to be True"
_merge.merge_jinja_template(
qtys, path, templateDir=self._templates['html'],
auto_open=auto_open, precision=precision,
link_to=link_to, connected=connected, toggles=toggles,
renderMath=True, resizable=resizable,
autosize=autosize, verbosity=verbosity
)
else:
_merge.merge_jinja_template_dir(
qtys, path, templateDir=self._templates['html'],
auto_open=auto_open, precision=precision,
link_to=link_to, connected=connected, toggles=toggles,
renderMath=True, resizable=resizable,
autosize=autosize, embed_figures=embed_figures,
verbosity=verbosity
)
def write_notebook(self, path, auto_open=False, connected=False, verbosity=0):
""" Write this report to the disk as an IPython notebook
A notebook report allows the user to interact more flexibly with the data
underlying the figures, and to easily generate customized variants on the
figures. As such, this type of report will be most useful for experts
who want to tinker with the standard analysis presented in the static
HTML or LaTeX format reports.
Parameters
----------
path : str or path-like object
The filesystem path to write the report to. By convention,
this should use the `.ipynb` file extension.
auto_open : bool, optional
If True, automatically open the report in a web browser after it
has been generated.
connected : bool, optional
Whether output notebook should assume an active internet connection. If
True, then the resulting file size will be reduced because it will link
to web resources (e.g. CDN libraries) instead of embedding them.
verbosity : int, optional
How much detail to send to stdout.
"""
# TODO this only applies to standard reports; rewrite generally
title = self._global_qtys['title']
confidenceLevel = self._report_params['confidence_level']
path = _Path(path)
printer = _VerbosityPrinter.build_printer(verbosity)
templatePath = _Path(__file__).parent / 'templates' / self._templates['notebook']
outputDir = path.parent
#Copy offline directory into position
if not connected:
_merge.rsync_offline_dir(outputDir)
#Save results to file
# basename = _os.path.splitext(_os.path.basename(filename))[0]
basename = path.stem
results_file_base = basename + '_results.pkl'
results_file = outputDir / results_file_base
with open(str(results_file), 'wb') as f:
_pickle.dump(self._results, f)
nb = _Notebook()
nb.add_markdown('# {title}\n(Created on {date})'.format(
title=title, date=_time.strftime("%B %d, %Y")))
nb.add_code("""\
import pickle
import pygsti""")
dsKeys = list(self._results.keys())
results = self._results[dsKeys[0]]
#Note: `results` is always a single Results obj from here down
nb.add_code("""\
#Load results dictionary
with open('{infile}', 'rb') as infile:
results_dict = pickle.load(infile)
print("Available dataset keys: ", ', '.join(results_dict.keys()))\
""".format(infile=results_file_base))
nb.add_code("""\
#Set which dataset should be used below
results = results_dict['{dsKey}']
print("Available estimates: ", ', '.join(results.estimates.keys()))\
""".format(dsKey=dsKeys[0]))
estLabels = list(results.estimates.keys())
estimate = results.estimates[estLabels[0]]
nb.add_code("""\
#Set which estimate is to be used below
estimate = results.estimates['{estLabel}']
print("Available gauge opts: ", ', '.join(estimate.goparameters.keys()))\
""".format(estLabel=estLabels[0]))
goLabels = list(estimate.goparameters.keys())
nb.add_code("""\
gopt = '{goLabel}'
ds = results.dataset
gssFinal = results.circuit_structs['final']
Ls = results.circuit_structs['final'].Ls
gssPerIter = results.circuit_structs['iteration'] #ALL_L
prepStrs = results.circuit_lists['prep fiducials']
effectStrs = results.circuit_lists['meas fiducials']
germs = results.circuit_lists['germs']
strs = (prepStrs, effectStrs)
params = estimate.parameters
objective = estimate.parameters['objective']
if objective == "logl":
mpc = estimate.parameters['minProbClip']
else:
mpc = estimate.parameters['minProbClipForWeighting']
clifford_compilation = estimate.parameters.get('clifford_compilation',None)
effective_ds, scale_subMxs = estimate.get_effective_dataset(True)
scaledSubMxsDict = {{'scaling': scale_subMxs, 'scaling.colormap': "revseq"}}
models = estimate.models
mdl = models[gopt] #FINAL
mdl_final = models['final iteration estimate'] #ITER
target_model = models['target']
mdlPerIter = models['iteration estimates']
mdl_eigenspace_projected = pygsti.tools.project_to_target_eigenspace(mdl, target_model)
goparams = estimate.goparameters[gopt]
confidenceLevel = {CL}
if confidenceLevel is None:
cri = None
else:
crfactory = estimate.get_confidence_region_factory(gopt)
region_type = "normal" if confidenceLevel >= 0 else "non-markovian"
cri = crfactory.view(abs(confidenceLevel), region_type)\
""".format(goLabel=goLabels[0], CL=confidenceLevel))
nb.add_code("""\
from pygsti.report import Workspace
ws = Workspace()
ws.init_notebook_mode(connected={conn}, autodisplay=True)\
""".format(conn=str(connected)))
nb.add_notebook_text_files([
templatePath / 'summary.txt',
templatePath / 'goodness.txt',
templatePath / 'gauge_invariant.txt',
templatePath / 'gauge_variant.txt'])
#Insert multi-dataset specific analysis
if len(dsKeys) > 1:
nb.add_markdown(('# Dataset comparisons\n'
'This report contains information for more than one data set.'
'This page shows comparisons between different data sets.'))
nb.add_code("""\
dslbl1 = '{dsLbl1}'
dslbl2 = '{dsLbl2}'
dscmp_gss = results_dict[dslbl1].circuit_structs['final']
ds1 = results_dict[dslbl1].dataset
ds2 = results_dict[dslbl2].dataset
dscmp = pygsti.obj.DataComparator([ds1, ds2], DS_names=[dslbl1, dslbl2])
""".format(dsLbl1=dsKeys[0], dsLbl2=dsKeys[1]))
nb.add_notebook_text_files([
templatePath / 'data_comparison.txt'])
#Add reference material
nb.add_notebook_text_files([
templatePath / 'input.txt',
templatePath / 'meta.txt'])
printer.log("Report Notebook created as %s" % path)
if auto_open:
port = "auto" if auto_open is True else int(auto_open)
nb.launch(str(path), port=port)
else:
nb.save_to(str(path))
def write_pdf(self, path, latex_cmd='pdflatex', latex_flags=None,
build_options=None,
brevity=0, precision=None, auto_open=False,
comm=None, verbosity=0):
""" Write this report to the disk as a PDF document.
Parameters
----------
path : str or path-like object
The filesystem path to write the report to. By convention,
this should use the `.pdf` file extension.
latex_cmd : str, optional
Shell command to run to compile a PDF document from the
generated LaTeX source.
latex_flags : [str], optional
List of flags to pass when calling `latex_cmd`.
build_options : dict
Dict of options for building plots. Expected values are
defined during construction of this report object.
brevity : int, optional
Amount of detail to include in the report. Larger values mean smaller
"more briefr" reports, which reduce generation time, load time, and
disk space consumption. In particular:
- 1: Plots showing per-sequences quantities disappear at brevity=1
- 2: Reference sections disappear at brevity=2
- 3: Germ-level estimate tables disappear at brevity=3
- 4: Everything but summary figures disappears at brevity=4
precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
auto_open : bool, optional
Whether the output file should be automatically opened in a web browser.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
verbosity : int, optional
Amount of detail to print to stdout.
"""
if not self._pdf_available:
raise ValueError(("PDF output unavailable. (Usually this is because this report"
" has multiple gauge optimizations and/or datasets.)"))
toggles = _defaultdict(lambda: False)
toggles.update(
{k: True for k in self._flags}
)
for k in range(brevity, 4):
toggles['BrevityLT' + str(k + 1)] = True
printer = _VerbosityPrinter.build_printer(verbosity, comm=comm)
path = _Path(path)
latex_flags = latex_flags or ["-interaction=nonstopmode", "-halt-on-error", "-shell-escape"]
# Render sections
qtys = self._build(build_options)
# TODO: filter while generating plots to remove need for sanitization
qtys = {k: v for k, v in qtys.items()
if not(isinstance(v, _ws.Switchboard) or isinstance(v, _ws.SwitchboardView))}
printer.log("Generating LaTeX source...")
_merge.merge_latex_template(
qtys, self._templates['pdf'], str(path.with_suffix('.tex')),
toggles, precision, printer
)
printer.log("Compiling with `{} {}`".format(latex_cmd, ' '.join(latex_flags)))
_merge.compile_latex_report(str(path.parent / path.stem), [latex_cmd] + latex_flags, printer, auto_open)
| 519
| 0
| 26
|
056e5bfd74cdb3c57ea5d1772797214b876ae034
| 6,878
|
py
|
Python
|
function/tests/testTask/test_update.py
|
kohski/serverless_todo
|
60e90caf86f5d921150193beac4acbd90752c814
|
[
"MIT"
] | null | null | null |
function/tests/testTask/test_update.py
|
kohski/serverless_todo
|
60e90caf86f5d921150193beac4acbd90752c814
|
[
"MIT"
] | 32
|
2021-02-25T01:18:20.000Z
|
2021-03-03T23:42:27.000Z
|
function/tests/testTask/test_update.py
|
kohski/serverless_todo
|
60e90caf86f5d921150193beac4acbd90752c814
|
[
"MIT"
] | null | null | null |
import pytest
from datetime import datetime
from update import lambda_handler
import boto3
import os
import json
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['TABLE_NAME'])
# ------------------------------------------
# valid pattern
# ------------------------------------------
@pytest.mark.parametrize("word,is_done,priority", [
(word, is_done, priority)
for word in [None, "", "修正後内容"]
for is_done in ['true', 'false', True, False]
for priority in ['high', 'medium', 'low']
])
# ------------------------------------------
# not found pattern
# ------------------------------------------
@pytest.fixture()
# ------------------------------------------
# invalid pattern
# ------------------------------------------
INVALID_PAYLOAD_LIST = [
{
"title": ""
},
{
"title": None
},
{
"title": "a" * 101
},
{
"content": "a" * 2001
},
{
"priority": "invalid_priority_value"
},
{
"is_done": "invalid_is_done_value"
},
]
@pytest.fixture(params=INVALID_PAYLOAD_LIST)
| 32.443396
| 109
| 0.486915
|
import pytest
from datetime import datetime
from update import lambda_handler
import boto3
import os
import json
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['TABLE_NAME'])
# ------------------------------------------
# valid pattern
# ------------------------------------------
@pytest.mark.parametrize("word,is_done,priority", [
(word, is_done, priority)
for word in [None, "", "修正後内容"]
for is_done in ['true', 'false', True, False]
for priority in ['high', 'medium', 'low']
])
def test_existing_task_and_requested_by_task_owner(word, is_done, priority, context, ulid_mock):
event = {
"resource": "/task/",
"path": "/task",
"httpMethod": 'POST',
"headers": {},
"multiValueHeaders": {},
"queryStringParameters": None,
"multiValueQueryStringParameters": None,
"pathParameters": {
"task_id": "ABCDEFGHIJKLMNOPQRSTUVW000"
},
"stageVariables": None,
"requestContext": {
"authorizer": {
"claims": {
"sub": "68f2ed3a-3726-439d-81cb-171dab716733",
"aud": "19gqr90c608tn8gp7j9nvop7h7",
"event_id": "55536ceb-c042-4c18-8a25-8bd4e4c2b28d",
"token_use": "id",
"auth_time": str(int(datetime.now().timestamp())),
"iss": "https://cognito-idp.ap-northeast-1.amazonaws.com/ap-northeast-*",
"cognito:username": "existing_user_id",
"exp": "Sun Feb 28 01:38:19 UTC 2021",
"iat": "Sun Feb 28 00:38:20 UTC 2021",
"email": "test@gmail.com"
}
},
"resourcePath": "/task",
"httpMethod": "GET",
"path": "/prod/task/123456",
"requestTimeEpoch": int(datetime.now().timestamp()),
"identity": {}
},
"body": json.dumps({
"title": "修正後タイトル",
"priority": priority,
"is_done": is_done,
"content": word
}),
"isBase64Encoded": False
}
response = lambda_handler(event, context)
del response['body']
assert response == {
'statusCode': 201,
'isBase64Encoded': False
}
item = table.get_item(
Key={
'id': 'Task:ABCDEFGHIJKLMNOPQRSTUVW000',
'meta': 'latest'
}
)
assert item['Item']['title'] == '修正後タイトル'
# ------------------------------------------
# not found pattern
# ------------------------------------------
@pytest.fixture()
def not_found_event():
return {
"resource": "/task/",
"path": "/task",
"httpMethod": 'POST',
"headers": {},
"multiValueHeaders": {},
"queryStringParameters": None,
"multiValueQueryStringParameters": None,
"pathParameters": {
'task_id': 'NOTEXISTINGTASK'
},
"stageVariables": None,
"requestContext": {
"authorizer": {
"claims": {
"sub": "68f2ed3a-3726-439d-81cb-171dab716733",
"aud": "19gqr90c608tn8gp7j9nvop7h7",
"event_id": "55536ceb-c042-4c18-8a25-8bd4e4c2b28d",
"token_use": "id",
"auth_time": str(int(datetime.now().timestamp())),
"iss": "https://cognito-idp.ap-northeast-1.amazonaws.com/ap-northeast-*",
"cognito:username": "existing_user_id",
"exp": "Sun Feb 28 01:38:19 UTC 2021",
"iat": "Sun Feb 28 00:38:20 UTC 2021",
"email": "test@gmail.com"
}
},
"resourcePath": "/task",
"httpMethod": "GET",
"path": "/prod/task/123456",
"requestTimeEpoch": int(datetime.now().timestamp()),
"identity": {}
},
"body": json.dumps({
"title": "タイトル",
"priority": "medium",
"is_done": 'true',
"content": "内容"
}),
"isBase64Encoded": False
}
def test_raise_not_found_case(not_found_event, context):
response = lambda_handler(not_found_event, context)
assert response == {
'statusCode': 404,
'body': 'task is not found',
'isBase64Encoded': False
}
# ------------------------------------------
# invalid pattern
# ------------------------------------------
INVALID_PAYLOAD_LIST = [
{
"title": ""
},
{
"title": None
},
{
"title": "a" * 101
},
{
"content": "a" * 2001
},
{
"priority": "invalid_priority_value"
},
{
"is_done": "invalid_is_done_value"
},
]
@pytest.fixture(params=INVALID_PAYLOAD_LIST)
def invalid_event(request):
return {
"resource": "/task/",
"path": "/task",
"httpMethod": 'POST',
"headers": {},
"multiValueHeaders": {},
"queryStringParameters": None,
"multiValueQueryStringParameters": None,
"pathParameters": {},
"stageVariables": None,
"requestContext": {
"authorizer": {
"claims": {
"sub": "68f2ed3a-3726-439d-81cb-171dab716733",
"aud": "19gqr90c608tn8gp7j9nvop7h7",
"event_id": "55536ceb-c042-4c18-8a25-8bd4e4c2b28d",
"token_use": "id",
"auth_time": str(int(datetime.now().timestamp())),
"iss": "https://cognito-idp.ap-northeast-1.amazonaws.com/ap-northeast-*",
"cognito:username": "existing_user_id",
"exp": "Sun Feb 28 01:38:19 UTC 2021",
"iat": "Sun Feb 28 00:38:20 UTC 2021",
"email": "test@gmail.com"
}
},
"resourcePath": "/task",
"httpMethod": "GET",
"path": "/prod/task/123456",
"requestTimeEpoch": int(datetime.now().timestamp()),
"identity": {}
},
"body": json.dumps({
"title": request.param.get('title'),
"priority": "medium" if request.param.get('priority') is None else request.param.get('priority'),
"is_done": 'true' if request.param.get('is_done') is None else request.param.get('is_done'),
"content": "内容" if request.param.get('content') is None else request.param.get('content')
}),
"isBase64Encoded": False
}
def test_raise_invalid_case(invalid_event, context, ulid_mock):
response = lambda_handler(invalid_event, context)
assert response == {
'statusCode': 400,
'body': 'invalid parameter',
'isBase64Encoded': False
}
| 5,659
| 0
| 112
|
877b0945aabc974e781666ff3dc8ba39b0ce5990
| 3,195
|
py
|
Python
|
bot/helper/mirror_utils/status_utils/qbit_download_status.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
bot/helper/mirror_utils/status_utils/qbit_download_status.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
bot/helper/mirror_utils/status_utils/qbit_download_status.py
|
vincreator/Eunha
|
85a702a5b5f30ccea1798122c261d4ff07fe0c0c
|
[
"Apache-2.0"
] | null | null | null |
from bot import DOWNLOAD_DIR, LOGGER
from bot.helper.ext_utils.bot_utils import MirrorStatus, get_readable_file_size, get_readable_time
from time import sleep
| 29.859813
| 98
| 0.634429
|
from bot import DOWNLOAD_DIR, LOGGER
from bot.helper.ext_utils.bot_utils import MirrorStatus, get_readable_file_size, get_readable_time
from time import sleep
def get_download(client, hash_):
try:
return client.torrents_info(torrent_hashes=hash_)[0]
except:
pass
class QbDownloadStatus:
def __init__(self, listener, client, hash_, select):
self.__gid = hash_[:12]
self.__hash = hash_
self.__select = select
self.__client = client
self.__listener = listener
self.__uid = listener.uid
self.__info = get_download(client, hash_)
self.message = listener.message
def __update(self):
self.__info = get_download(self.__client, self.__hash)
def progress(self):
"""
Calculates the progress of the mirror (upload or download)
:return: returns progress in percentage
"""
return f'{round(self.__info.progress*100, 2)}%'
def size_raw(self):
"""
Gets total size of the mirror file/folder
:return: total size of mirror
"""
if self.__select:
return self.__info.size
else:
return self.__info.total_size
def processed_bytes(self):
return self.__info.downloaded
def speed(self):
return f"{get_readable_file_size(self.__info.dlspeed)}/s"
def name(self):
self.__update()
return self.__info.name
def path(self):
return f"{DOWNLOAD_DIR}{self.__uid}"
def size(self):
return get_readable_file_size(self.__info.size)
def eta(self):
return get_readable_time(self.__info.eta)
def status(self):
download = self.__info.state
if download in ["queuedDL", "queuedUP"]:
return MirrorStatus.STATUS_WAITING
elif download in ["metaDL", "checkingResumeData"]:
return MirrorStatus.STATUS_DOWNLOADING + " (Metadata)"
elif download in ["pausedDL", "pausedUP"]:
return MirrorStatus.STATUS_PAUSE
elif download in ["checkingUP", "checkingDL"]:
return MirrorStatus.STATUS_CHECKING
elif download in ["stalledUP", "uploading", "forcedUP"]:
return MirrorStatus.STATUS_SEEDING
else:
return MirrorStatus.STATUS_DOWNLOADING
def torrent_info(self):
return self.__info
def download(self):
return self
def uid(self):
return self.__uid
def gid(self):
return self.__gid
def client(self):
return self.__client
def listener(self):
return self.__listener
def cancel_download(self):
self.__update()
if self.status() == MirrorStatus.STATUS_SEEDING:
LOGGER.info(f"Cancelling Seed: {self.name()}")
self.__client.torrents_pause(torrent_hashes=self.__hash)
else:
LOGGER.info(f"Cancelling Download: {self.name()}")
self.__client.torrents_pause(torrent_hashes=self.__hash)
sleep(0.3)
self.__listener.onDownloadError('Download stopped by user!')
self.__client.torrents_delete(torrent_hashes=self.__hash, delete_files=True)
| 2,080
| 909
| 46
|
339c00d28f3cce8e0930a2efcf6717be89f5a16d
| 642
|
py
|
Python
|
micron/tests/audiodevice.test.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 6
|
2015-11-26T15:03:38.000Z
|
2020-10-05T14:08:54.000Z
|
micron/tests/audiodevice.test.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 7
|
2015-12-09T06:44:34.000Z
|
2021-12-14T15:51:28.000Z
|
micron/tests/audiodevice.test.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 3
|
2016-07-25T10:43:21.000Z
|
2021-12-07T14:12:47.000Z
|
#!/usr/bin/env python3
import pyaudio
import sys
sys.path.insert(0, "../")
from pwmaudio import noALSAerror
with noALSAerror():
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
print(p.get_host_api_count())
print(info)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:
# print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i))
| 33.789474
| 116
| 0.661994
|
#!/usr/bin/env python3
import pyaudio
import sys
sys.path.insert(0, "../")
from pwmaudio import noALSAerror
with noALSAerror():
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
print(p.get_host_api_count())
print(info)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:
# print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i))
| 0
| 0
| 0
|
38f4cfc7cbdf2d4521cd2cce0f4533e5bc58ff43
| 2,384
|
py
|
Python
|
app/src/main/assets/code/controller/controller.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/assets/code/controller/controller.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/assets/code/controller/controller.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
import sys
import math
import random
import imp
from java import jclass
from controller.leds import leds
from controller.message import message
from controller.sensor import sensor
from controller.motion_sensor import motion_sensor
from controller.button import button
from controller.color_sensor import color_sensor
from controller.infrared_sensor import infrared_sensor
from controller.sound_sensor import sound_sensor
from controller.timer import timer
imp.load_source('controllerleds', '/data/data/com.matatalab.matatacode/run/controller/leds.py')
imp.load_source('controllermessage', '/data/data/com.matatalab.matatacode/run/controller/message.py')
imp.load_source('controllesensor', '/data/data/com.matatalab.matatacode/run/controller/sensor.py')
imp.load_source('controllemotion_sensor', '/data/data/com.matatalab.matatacode/run/controller/motion_sensor.py')
imp.load_source('controllebutton', '/data/data/com.matatalab.matatacode/run/controller/button.py')
imp.load_source('controllecolor_sensor', '/data/data/com.matatalab.matatacode/run/controller/color_sensor.py')
imp.load_source('controlleinfrared_sensor', '/data/data/com.matatalab.matatacode/run/controller/infrared_sensor.py')
imp.load_source('controllesound_sensor', '/data/data/com.matatalab.matatacode/run/controller/sound_sensor.py')
imp.load_source('controlletimer', '/data/data/com.matatalab.matatacode/run/controller/timer.py')
| 41.824561
| 116
| 0.75797
|
import sys
import math
import random
import imp
from java import jclass
from controller.leds import leds
from controller.message import message
from controller.sensor import sensor
from controller.motion_sensor import motion_sensor
from controller.button import button
from controller.color_sensor import color_sensor
from controller.infrared_sensor import infrared_sensor
from controller.sound_sensor import sound_sensor
from controller.timer import timer
imp.load_source('controllerleds', '/data/data/com.matatalab.matatacode/run/controller/leds.py')
imp.load_source('controllermessage', '/data/data/com.matatalab.matatacode/run/controller/message.py')
imp.load_source('controllesensor', '/data/data/com.matatalab.matatacode/run/controller/sensor.py')
imp.load_source('controllemotion_sensor', '/data/data/com.matatalab.matatacode/run/controller/motion_sensor.py')
imp.load_source('controllebutton', '/data/data/com.matatalab.matatacode/run/controller/button.py')
imp.load_source('controllecolor_sensor', '/data/data/com.matatalab.matatacode/run/controller/color_sensor.py')
imp.load_source('controlleinfrared_sensor', '/data/data/com.matatalab.matatacode/run/controller/infrared_sensor.py')
imp.load_source('controllesound_sensor', '/data/data/com.matatalab.matatacode/run/controller/sound_sensor.py')
imp.load_source('controlletimer', '/data/data/com.matatalab.matatacode/run/controller/timer.py')
class controller:
call=None
leds=None
message=None
sensor=None
motion_sensor=None
button=None
color_sensor=None
infrared_sensor=None
sound_sensor=None
timer=None
def __init__(self):
Python2Java = jclass("com.matatalab.matatacode.model.Python2Java")
self.call = Python2Java("python")
self.leds=leds(self.call)
self.message=message(self.call)
self.sensor=sensor(self.call)
self.motion_sensor=motion_sensor(self.call)
self.button=button(self.call)
self.color_sensor=color_sensor(self.call)
self.infrared_sensor=infrared_sensor(self.call)
self.sound_sensor=sound_sensor(self.call)
self.timer=timer(self.call)
#data = [0x7e,0x02,0x02,0x00,0x00]
#print("控制器 设置为新协议")
#self.call.blewrite(data)
#self.call.blewait()
def test(self):
data = [0x39,0x04]
self.call.blewrite(data)
self.call.blewait()
| 744
| 232
| 23
|
9126ebac0a1ed3389e3b8adbc570ccd9cc668771
| 4,684
|
py
|
Python
|
crowdstrike/src/crowdstrike/actor/importer.py
|
galonsososa/connectors
|
6272128a2ca69ffca13cec63ff0f7bc55ee902a5
|
[
"Apache-2.0"
] | null | null | null |
crowdstrike/src/crowdstrike/actor/importer.py
|
galonsososa/connectors
|
6272128a2ca69ffca13cec63ff0f7bc55ee902a5
|
[
"Apache-2.0"
] | 2
|
2021-02-16T20:48:43.000Z
|
2021-03-03T06:20:13.000Z
|
crowdstrike/src/crowdstrike/actor/importer.py
|
galonsososa/connectors
|
6272128a2ca69ffca13cec63ff0f7bc55ee902a5
|
[
"Apache-2.0"
] | 2
|
2021-02-16T20:45:11.000Z
|
2021-03-03T05:47:53.000Z
|
# -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike actor importer module."""
from typing import Any, Generator, List, Mapping, Optional
from crowdstrike_client.api.intel.actors import Actors
from crowdstrike_client.api.models import Response
from crowdstrike_client.api.models.actor import Actor
from pycti.connector.opencti_connector_helper import OpenCTIConnectorHelper # type: ignore # noqa: E501
from stix2 import Bundle, Identity, MarkingDefinition # type: ignore
from crowdstrike.actor.builder import ActorBundleBuilder
from crowdstrike.importer import BaseImporter
from crowdstrike.utils import datetime_to_timestamp, paginate, timestamp_to_datetime
class ActorImporter(BaseImporter):
"""CrowdStrike actor importer."""
_LATEST_ACTOR_TIMESTAMP = "latest_actor_timestamp"
def __init__(
self,
helper: OpenCTIConnectorHelper,
actors_api: Actors,
update_existing_data: bool,
author: Identity,
default_latest_timestamp: int,
tlp_marking: MarkingDefinition,
) -> None:
"""Initialize CrowdStrike actor importer."""
super().__init__(helper, author, tlp_marking, update_existing_data)
self.actors_api = actors_api
self.default_latest_timestamp = default_latest_timestamp
def run(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Run importer."""
self._info("Running actor importer with state: {0}...", state)
fetch_timestamp = state.get(
self._LATEST_ACTOR_TIMESTAMP, self.default_latest_timestamp
)
latest_fetched_actor_timestamp = None
for actors_batch in self._fetch_actors(fetch_timestamp):
if not actors_batch:
break
if latest_fetched_actor_timestamp is None:
first_in_batch = actors_batch[0]
created_date = first_in_batch.created_date
if created_date is None:
self._error(
"Missing created date for actor {0} ({1})",
first_in_batch.name,
first_in_batch.id,
)
break
latest_fetched_actor_timestamp = datetime_to_timestamp(created_date)
self._process_actors(actors_batch)
state_timestamp = latest_fetched_actor_timestamp or fetch_timestamp
self._info(
"Actor importer completed, latest fetch {0}.",
timestamp_to_datetime(state_timestamp),
)
return {self._LATEST_ACTOR_TIMESTAMP: state_timestamp}
| 33.219858
| 105
| 0.638343
|
# -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike actor importer module."""
from typing import Any, Generator, List, Mapping, Optional
from crowdstrike_client.api.intel.actors import Actors
from crowdstrike_client.api.models import Response
from crowdstrike_client.api.models.actor import Actor
from pycti.connector.opencti_connector_helper import OpenCTIConnectorHelper # type: ignore # noqa: E501
from stix2 import Bundle, Identity, MarkingDefinition # type: ignore
from crowdstrike.actor.builder import ActorBundleBuilder
from crowdstrike.importer import BaseImporter
from crowdstrike.utils import datetime_to_timestamp, paginate, timestamp_to_datetime
class ActorImporter(BaseImporter):
"""CrowdStrike actor importer."""
_LATEST_ACTOR_TIMESTAMP = "latest_actor_timestamp"
def __init__(
self,
helper: OpenCTIConnectorHelper,
actors_api: Actors,
update_existing_data: bool,
author: Identity,
default_latest_timestamp: int,
tlp_marking: MarkingDefinition,
) -> None:
"""Initialize CrowdStrike actor importer."""
super().__init__(helper, author, tlp_marking, update_existing_data)
self.actors_api = actors_api
self.default_latest_timestamp = default_latest_timestamp
def run(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Run importer."""
self._info("Running actor importer with state: {0}...", state)
fetch_timestamp = state.get(
self._LATEST_ACTOR_TIMESTAMP, self.default_latest_timestamp
)
latest_fetched_actor_timestamp = None
for actors_batch in self._fetch_actors(fetch_timestamp):
if not actors_batch:
break
if latest_fetched_actor_timestamp is None:
first_in_batch = actors_batch[0]
created_date = first_in_batch.created_date
if created_date is None:
self._error(
"Missing created date for actor {0} ({1})",
first_in_batch.name,
first_in_batch.id,
)
break
latest_fetched_actor_timestamp = datetime_to_timestamp(created_date)
self._process_actors(actors_batch)
state_timestamp = latest_fetched_actor_timestamp or fetch_timestamp
self._info(
"Actor importer completed, latest fetch {0}.",
timestamp_to_datetime(state_timestamp),
)
return {self._LATEST_ACTOR_TIMESTAMP: state_timestamp}
def _fetch_actors(self, start_timestamp: int) -> Generator[List[Actor], None, None]:
limit = 50
sort = "created_date|desc"
fql_filter = f"created_date:>{start_timestamp}"
fields = ["__full__"]
paginated_query = paginate(self._query_actor_entities)
return paginated_query(
limit=limit, sort=sort, fql_filter=fql_filter, fields=fields
)
def _query_actor_entities(
self,
limit: int = 50,
offset: int = 0,
sort: Optional[str] = None,
fql_filter: Optional[str] = None,
fields: Optional[List[str]] = None,
) -> Response[Actor]:
self._info(
"Query actors limit: {0}, offset: {1}, sort: {2}, filter: {3}, fields: {4}",
limit,
offset,
sort,
fql_filter,
fields,
)
return self.actors_api.query_entities(
limit=limit, offset=offset, sort=sort, fql_filter=fql_filter, fields=fields
)
def _process_actors(self, actors: List[Actor]) -> None:
actor_count = len(actors)
self._info("Processing {0} actors...", actor_count)
for actor in actors:
self._process_actor(actor)
self._info("Processing actors completed (imported: {0})", actor_count)
def _process_actor(self, actor: Actor) -> None:
self._info("Processing actor {0} ({1})...", actor.name, actor.id)
actor_bundle = self._create_actor_bundle(actor)
# with open(f"actor_bundle_{actor.id}.json", "w") as f:
# f.write(actor_bundle.serialize(pretty=True))
self._send_bundle(actor_bundle)
def _create_actor_bundle(self, actor: Actor) -> Optional[Bundle]:
author = self.author
source_name = self._source_name()
object_marking_refs = [self.tlp_marking]
confidence_level = self._confidence_level()
bundle_builder = ActorBundleBuilder(
actor, author, source_name, object_marking_refs, confidence_level
)
return bundle_builder.build()
| 1,953
| 0
| 135
|
f6bfb6ffbc2d0285ca49b8bc43649c6454ef1f28
| 3,024
|
py
|
Python
|
Sample.py
|
zerobounce-llc/zero-bounce-python-sdk-setup
|
46dcdce9ece529e23d65fa92fb81f8ac19ce5c2e
|
[
"MIT"
] | null | null | null |
Sample.py
|
zerobounce-llc/zero-bounce-python-sdk-setup
|
46dcdce9ece529e23d65fa92fb81f8ac19ce5c2e
|
[
"MIT"
] | null | null | null |
Sample.py
|
zerobounce-llc/zero-bounce-python-sdk-setup
|
46dcdce9ece529e23d65fa92fb81f8ac19ce5c2e
|
[
"MIT"
] | null | null | null |
from datetime import date
from zerobouncesdk import zerobouncesdk, ZBApiException, \
ZBMissingApiKeyException
test()
| 29.076923
| 85
| 0.652116
|
from datetime import date
from zerobouncesdk import zerobouncesdk, ZBApiException, \
ZBMissingApiKeyException
def test_validate():
try:
response = zerobouncesdk.validate(email="<EMAIL_TO_TEST>")
print("validate success response: " + str(response))
except ZBApiException as e:
print("validate error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_credits error message: " + str(e))
def test_get_credits():
try:
response = zerobouncesdk.get_credits()
print("get_credits success response: " + str(response))
except ZBApiException as e:
print("get_credits error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_credits error message: " + str(e))
def test_send_file():
try:
response = zerobouncesdk.send_file(
file_path='./email_file.csv',
email_address_column=1,
return_url=None,
first_name_column=2,
last_name_column=3,
has_header_row=True)
print("sendfile success response: " + str(response))
except ZBApiException as e:
print("sendfile error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_credits error message: " + str(e))
def test_file_status():
try:
response = zerobouncesdk.file_status("<YOUR_FILE_ID>")
print("file_status success response: " + str(response))
except ZBApiException as e:
print("file_status error message: " + str(e))
except ZBMissingApiKeyException as e:
print("file_status error message: " + str(e))
def test_delete_file():
try:
response = zerobouncesdk.delete_file("<YOUR_FILE_ID>")
print("delete_file success response: " + str(response))
except ZBApiException as e:
print("delete_file error message: " + str(e))
except ZBMissingApiKeyException as e:
print("delete_file error message: " + str(e))
def test_get_api_usage():
try:
start_date = date(2019, 7, 5)
end_date = date(2019, 7, 15)
response = zerobouncesdk.get_api_usage(start_date, end_date)
print("get_api_usage success response: " + str(response))
except ZBApiException as e:
print("get_api_usage error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_api_usage error message: " + str(e))
def test_get_file():
try:
response = zerobouncesdk.get_file("<YOUR_FILE_ID>", "./downloads/emails.csv")
print("get_file success response: " + str(response))
except ZBApiException as e:
print("get_file error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_file error message: " + str(e))
def test():
zerobouncesdk.initialize("<YOUR_API_KEY>")
# test_validate()
# test_send_file()
# test_get_credits()
# test_file_status()
# test_delete_file()
# test_get_api_usage()
test_get_file()
test()
| 2,709
| 0
| 184
|
7bc2b1b8575ca1fb963e3e27a6dc57290ad35330
| 2,305
|
py
|
Python
|
Software/Sensors/IAQ_SCD30.py
|
xJohnnyBravo/zephyrus-iaq
|
31d39ae21080de55d39bc0dde6e49f5749d39477
|
[
"MIT"
] | 2
|
2019-10-01T23:08:25.000Z
|
2019-11-05T23:37:38.000Z
|
Software/Sensors/IAQ_SCD30.py
|
aaronjense/raspberrypi-indoor-air-quality-pcb
|
7e1fc68b31dea88229866c8cbc6b221a4a679134
|
[
"MIT"
] | 1
|
2019-11-14T02:28:30.000Z
|
2019-11-14T02:28:30.000Z
|
Software/Sensors/IAQ_SCD30.py
|
aaronjense/raspberrypi-indoor-air-quality-pcb
|
7e1fc68b31dea88229866c8cbc6b221a4a679134
|
[
"MIT"
] | 6
|
2019-10-01T22:44:44.000Z
|
2019-11-14T20:19:46.000Z
|
#!/usr/bin/python
#################################################################################
# MIT License
#
# Copyright (c) 2019 Aaron Jense, Amy Heidner, Dennis Heidner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
from third_party.Adafruit_I2C import *
from IAQ_Exceptions import *
import struct
| 39.741379
| 82
| 0.6282
|
#!/usr/bin/python
#################################################################################
# MIT License
#
# Copyright (c) 2019 Aaron Jense, Amy Heidner, Dennis Heidner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
from third_party.Adafruit_I2C import *
from IAQ_Exceptions import *
import struct
class IAQ_SCD30():
sid = None
i2c = None
I2C_ADDRESS = 0x61
def __init__(self, sensor_id=None):
self.sid = sensor_id
try:
self.i2c = Adafruit_I2C(self.I2C_ADDRESS, debug=False)
except IOError:
raise SensorSetupError('Could not setup SCD30 I2C.')
def getData(self):
try:
self.i2c.write8(0x0,0)
self.i2c.write8(0x46,2)
rawdata = self.i2c.readList(0x03,18)
struct_co2 = struct.pack('BBBB', rawdata[0], rawdata[1],
rawdata[3], rawdata[4])
float_co2 = struct.unpack('>f', struct_co2)
data = "%.4f"%float_co2
except IOError:
raise SensorReadError('Unable to read SCD30.')
except TypeError:
raise SensorReadError('Unable to read SCD30.')
return data
| 736
| 112
| 24
|
64b668a6aa6c762d7927caa38bc992ca22f6db7c
| 3,424
|
py
|
Python
|
dc_gym/iroko_reward.py
|
matthieu637/iroko
|
3905caa46328a7c011762f8a96a15fbde9826899
|
[
"Apache-2.0"
] | 56
|
2018-12-01T00:11:27.000Z
|
2022-03-08T04:10:10.000Z
|
dc_gym/iroko_reward.py
|
matthieu637/iroko
|
3905caa46328a7c011762f8a96a15fbde9826899
|
[
"Apache-2.0"
] | 33
|
2018-12-13T20:18:07.000Z
|
2022-03-23T16:03:26.000Z
|
dc_gym/iroko_reward.py
|
matthieu637/iroko
|
3905caa46328a7c011762f8a96a15fbde9826899
|
[
"Apache-2.0"
] | 17
|
2019-02-19T05:31:23.000Z
|
2022-03-14T15:20:00.000Z
|
import numpy as np
import math
import logging
log = logging.getLogger(__name__)
def fairness_reward(actions, queues=None):
"""Compute Jain"s fairness index for a list of values.
See http://en.wikipedia.org/wiki/Fairness_measure for fairness equations.
@param values: list of values
@return fairness: JFI
"""
if len(actions) == 0:
return 1.0
num = sum(actions) ** 2
denom = len(actions) * sum([i ** 2 for i in actions])
return num / float(denom)
def gini_reward(actions, queues=None):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
# Values must be sorted:
actions = np.sort(actions)
# Number of array elements:
n = actions.shape[0]
# Index per array element:
index = np.arange(1, n + 1)
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * actions)) / (n * np.sum(actions)))
# small script to visualize the reward output
if __name__ == "__main__":
import matplotlib.pyplot as plt
queues = [i * 0.1 for i in range(0, 11)]
actions = [i * .001 for i in range(0, 1000)]
for queue in queues:
rewards = []
queue_input = np.array([queue])
for action in actions:
action_input = np.array([action])
rewards.append((joint_queue_reward(action_input, queue_input)))
plt.plot(actions, rewards, label="Queue Size %f" % queue)
plt.xlabel("Action Input")
plt.ylabel("Reward")
plt.legend()
plt.show()
| 29.264957
| 80
| 0.651577
|
import numpy as np
import math
import logging
log = logging.getLogger(__name__)
def fairness_reward(actions, queues=None):
"""Compute Jain"s fairness index for a list of values.
See http://en.wikipedia.org/wiki/Fairness_measure for fairness equations.
@param values: list of values
@return fairness: JFI
"""
if len(actions) == 0:
return 1.0
num = sum(actions) ** 2
denom = len(actions) * sum([i ** 2 for i in actions])
return num / float(denom)
def gini_reward(actions, queues=None):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
# Values must be sorted:
actions = np.sort(actions)
# Number of array elements:
n = actions.shape[0]
# Index per array element:
index = np.arange(1, n + 1)
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * actions)) / (n * np.sum(actions)))
def action_reward(actions, queues=None):
return np.mean(actions)
def fair_queue_reward(actions, queues):
queue = np.max(queues)
action = np.mean(actions)
fairness = fairness_reward(actions[actions < 1.0])
reward = action - queue * action + (fairness * (1 - queue))
return reward
def joint_queue_reward(actions, queues):
queue = np.max(queues)
action = np.mean(actions)
reward = action - 2 * (queue * action)
return reward
def step_reward(actions, queues):
queue = np.max(queues)
action = np.mean(actions)
if queue > 0.30:
return -action - queue
else:
return action * (1 + (1 - gini_reward(actions))) - queue
def std_dev_reward(actions, queues=None):
return -np.std(actions)
def queue_reward(actions, queues):
queue_reward = -np.sum(queues)**2
return queue_reward
def selu_reward(reward):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * (max(0, reward) + min(0, alpha * (math.exp(reward) - 1)))
class RewardFunction:
__slots__ = ["stats_dict", "reward_funs"]
def __init__(self, reward_models, stats_dict):
self.stats_dict = stats_dict
self.reward_funs = self._set_reward(reward_models)
def _set_reward(self, reward_models):
reward_funs = []
for model in reward_models:
reward_funs.append(globals()["%s_reward" % model])
return reward_funs
def get_reward(self, stats, actions):
queues = stats[self.stats_dict["backlog"]]
reward = 0.0
for reward_fun in self.reward_funs:
reward += reward_fun(actions, queues)
return reward
# small script to visualize the reward output
if __name__ == "__main__":
import matplotlib.pyplot as plt
queues = [i * 0.1 for i in range(0, 11)]
actions = [i * .001 for i in range(0, 1000)]
for queue in queues:
rewards = []
queue_input = np.array([queue])
for action in actions:
action_input = np.array([action])
rewards.append((joint_queue_reward(action_input, queue_input)))
plt.plot(actions, rewards, label="Queue Size %f" % queue)
plt.xlabel("Action Input")
plt.ylabel("Reward")
plt.legend()
plt.show()
| 1,379
| 127
| 184
|
e25ff3df493ac431d6d60b22582cb70b4670f2a3
| 727
|
py
|
Python
|
cellar/fs.py
|
JonathanHuot/cellarpy
|
74fe9f144b63b891d6cda45f10f63d310c0d0f58
|
[
"MIT"
] | null | null | null |
cellar/fs.py
|
JonathanHuot/cellarpy
|
74fe9f144b63b891d6cda45f10f63d310c0d0f58
|
[
"MIT"
] | 4
|
2018-03-03T22:08:22.000Z
|
2021-09-07T23:44:54.000Z
|
cellar/fs.py
|
JonathanHuot/cellarpy
|
74fe9f144b63b891d6cda45f10f63d310c0d0f58
|
[
"MIT"
] | 1
|
2017-06-08T13:01:02.000Z
|
2017-06-08T13:01:02.000Z
|
# -*- coding: utf-8 -*-
import os
| 25.964286
| 73
| 0.639615
|
# -*- coding: utf-8 -*-
import os
def iswritable(directory):
if not os.path.exists(directory):
try:
os.makedirs(directory)
except:
return False
return os.access(directory, os.W_OK | os.X_OK | os.R_OK)
def static_file_path(root, filename):
root = os.path.abspath(root) + os.sep
return os.path.abspath(os.path.join(root, filename.strip('/\\')))
def static_file_exists(root, filename):
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
if not filename.startswith(root):
return False
if not os.path.exists(filename) or not os.path.isfile(filename):
return False
return True
| 621
| 0
| 69
|
3c950daaa32f79dd0904fd8cf520966d77491761
| 371
|
py
|
Python
|
ex043.py
|
GuilhermeAntony14/Estudando-Python
|
b020f6d2625e7fcc42d30658bcbd881b093434dd
|
[
"MIT"
] | null | null | null |
ex043.py
|
GuilhermeAntony14/Estudando-Python
|
b020f6d2625e7fcc42d30658bcbd881b093434dd
|
[
"MIT"
] | null | null | null |
ex043.py
|
GuilhermeAntony14/Estudando-Python
|
b020f6d2625e7fcc42d30658bcbd881b093434dd
|
[
"MIT"
] | null | null | null |
print('vamos calcular seu IMC')
a = float(input('Sua altura: '))
p = float(input('Seu peso: '))
n = (p/(a**2))
print(f'Seu IMC e: {n:.1f}')
if n < 18.5:
print('Abaixo do peso.')
elif n <= 25 and n > 18.5:
print('Peso ideal.')
elif n < 30 and n > 25:
print('Sobrepeso.')
elif n <= 40 and 30 < n:
print('obsidade.')
else:
print('obsidade mórbida.')
| 23.1875
| 32
| 0.566038
|
print('vamos calcular seu IMC')
a = float(input('Sua altura: '))
p = float(input('Seu peso: '))
n = (p/(a**2))
print(f'Seu IMC e: {n:.1f}')
if n < 18.5:
print('Abaixo do peso.')
elif n <= 25 and n > 18.5:
print('Peso ideal.')
elif n < 30 and n > 25:
print('Sobrepeso.')
elif n <= 40 and 30 < n:
print('obsidade.')
else:
print('obsidade mórbida.')
| 0
| 0
| 0
|
e5bd5f40b426cef7283c560a2796fb22b549035d
| 667
|
py
|
Python
|
tests/test_data_cleanser.py
|
luisccalves/supplychainpy
|
63a10b77ffdcc5bca71e815c70667c819d8f9af0
|
[
"BSD-3-Clause"
] | 231
|
2016-05-30T02:34:45.000Z
|
2022-03-28T17:00:29.000Z
|
tests/test_data_cleanser.py
|
luisccalves/supplychainpy
|
63a10b77ffdcc5bca71e815c70667c819d8f9af0
|
[
"BSD-3-Clause"
] | 77
|
2016-03-23T16:28:34.000Z
|
2021-09-30T22:08:03.000Z
|
tests/test_data_cleanser.py
|
luisccalves/supplychainpy
|
63a10b77ffdcc5bca71e815c70667c819d8f9af0
|
[
"BSD-3-Clause"
] | 103
|
2016-08-10T19:53:09.000Z
|
2022-03-16T16:34:38.000Z
|
from unittest import TestCase
import logging
from supplychainpy._helpers import _data_cleansing
from supplychainpy.sample_data.config import ABS_FILE_PATH
#logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
| 39.235294
| 104
| 0.728636
|
from unittest import TestCase
import logging
from supplychainpy._helpers import _data_cleansing
from supplychainpy.sample_data.config import ABS_FILE_PATH
#logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class TestCleanser(TestCase):
def test_incorrect_row_length(self):
""" Tests for incorrect specification of number of columns after initial SKU identification. """
with open(ABS_FILE_PATH['COMPLETE_CSV_XSM']) as f:
for i in range(0, 11):
with self.assertRaises(expected_exception=Exception):
_data_cleansing.clean_orders_data_row_csv(f, length=i)
| 0
| 393
| 23
|
e738317c6f5cf90c7eb6eb5ab706d4e66f9a907d
| 438
|
py
|
Python
|
sikuli-ide/sample-scripts/mute.sikuli/mute.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 1,292
|
2015-01-09T17:48:46.000Z
|
2022-03-30T20:08:15.000Z
|
sikuli-ide/sample-scripts/mute.sikuli/mute.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 31
|
2015-01-20T15:01:24.000Z
|
2022-03-03T11:02:06.000Z
|
sikuli-ide/sample-scripts/mute.sikuli/mute.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 267
|
2015-02-08T19:51:25.000Z
|
2022-03-19T22:16:01.000Z
|
switchApp("System Preferences.app")
click("1273526123226.png")
click("1273526171905.png")
thumbs = findAll("1273527194228.png")
for t in list(thumbs)[:2]: # only take the first two
dragLeft(t) # off
#dragRight(t) # on
#dragToMute(t)
| 23.052632
| 58
| 0.691781
|
def dragLeft(t):
dragDrop(t, t.getCenter().left(200))
def dragRight(t):
dragDrop(t, t.getCenter().right(200))
def dragToMute(t):
dragDrop(t, t.nearby().left().find("1273527108356.png"))
switchApp("System Preferences.app")
click("1273526123226.png")
click("1273526171905.png")
thumbs = findAll("1273527194228.png")
for t in list(thumbs)[:2]: # only take the first two
dragLeft(t) # off
#dragRight(t) # on
#dragToMute(t)
| 127
| 0
| 68
|
5a5a83ad47518f4946babfacb96a920c30542e02
| 4,869
|
py
|
Python
|
pytest_func_cov/plugin.py
|
radug0314/pytest_func_cov
|
cc689ba68d083e69d399baad83189861a7adb199
|
[
"MIT"
] | 4
|
2020-04-03T19:36:51.000Z
|
2021-04-11T23:41:59.000Z
|
pytest_func_cov/plugin.py
|
RaduG/pytest_func_cov
|
cc689ba68d083e69d399baad83189861a7adb199
|
[
"MIT"
] | 5
|
2020-02-23T20:37:04.000Z
|
2021-07-07T07:53:39.000Z
|
pytest_func_cov/plugin.py
|
radug0314/pytest_func_cov
|
cc689ba68d083e69d399baad83189861a7adb199
|
[
"MIT"
] | 1
|
2021-04-05T15:36:54.000Z
|
2021-04-05T15:36:54.000Z
|
import os
import sys
from .tracking import FunctionIndexer, get_full_function_name
def pytest_addoption(parser):
"""
Pytest hook - register command line arguments. We want to register the
--func_cov argument to explicitly pass the location of the package to
discover and the ignore_func_names ini setting.
Args:
parser:
"""
group = parser.getgroup("func_cov")
group.addoption(
"--func_cov",
dest="func_cov_source",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
group.addoption(
"--func_cov_report",
dest="func_cov_report",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
parser.addini("ignore_func_names", "function names to ignore", "linelist", [])
| 30.622642
| 82
| 0.586979
|
import os
import sys
from .tracking import FunctionIndexer, get_full_function_name
def pytest_addoption(parser):
"""
Pytest hook - register command line arguments. We want to register the
--func_cov argument to explicitly pass the location of the package to
discover and the ignore_func_names ini setting.
Args:
parser:
"""
group = parser.getgroup("func_cov")
group.addoption(
"--func_cov",
dest="func_cov_source",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
group.addoption(
"--func_cov_report",
dest="func_cov_report",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
parser.addini("ignore_func_names", "function names to ignore", "linelist", [])
def pytest_load_initial_conftests(early_config, parser, args):
if early_config.known_args_namespace.func_cov_source:
plugin = FuncCovPlugin(early_config)
early_config.pluginmanager.register(plugin, "_func_cov")
class FuncCovPlugin:
def __init__(self, args):
self.args = args
self.indexer = FunctionIndexer(args.getini("ignore_func_names"))
def pytest_sessionstart(self, session):
"""
Pytest hook - called when the pytest session is created. At this point,
we need to run a full module discovery and register all functions
prior to initiating the collection. If the PYTEST_FUNC_COV environment
variable is set, use that as the root discovery path, relative to the
session fspath.
Args:
session: Pytest session
"""
# Add current folder to sys.path if it is not already in
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
pytest_cov_paths = self.args.known_args_namespace.func_cov_source
if len(pytest_cov_paths) == 0:
pytest_cov_paths = [session.fspath]
else:
pytest_cov_paths = [
os.path.join(session.fspath, path.rstrip("/\\"))
for path in pytest_cov_paths
]
for package_path in pytest_cov_paths:
self.indexer.index_package(package_path)
def pytest_collect_file(self, path):
"""
Pytest hook - called before the collection of a file. At this point
we need to register the current test file as a valid function call
origin.
Args:
path (str): Path to test file
"""
self.indexer.register_source_module(str(path))
def pytest_terminal_summary(self, terminalreporter):
"""
Pytest hook - called when the test summary is outputted. Here we
output basic statistics of the number of functions registered and called,
as well as a function call test coverage (in percentage).
Args:
terminalreporter:
"""
output_options = self.args.known_args_namespace.func_cov_report
include_missing = "term-missing" in output_options
tr = terminalreporter
cwd = os.getcwd()
found = self.indexer.monitor.registered_functions
called = self.indexer.monitor.called_functions
missed = self.indexer.monitor.missed_functions
module_paths = [sys.modules[m].__file__[len(cwd) + 1 :] for m, _ in found]
max_name_len = max([len(mp) for mp in module_paths] + [5])
fmt_name = "%%- %ds " % max_name_len
header = (fmt_name % "Name") + " Funcs Miss" + "%*s" % (10, "Cover")
if include_missing:
header += "%*s" % (10, "Missing")
fmt_coverage = fmt_name + "%6d %6d" + "%%%ds%%%%" % (9,)
if include_missing:
fmt_coverage += " %s"
msg = "pytest_func_cov"
tr.write("-" * 20 + msg + "-" * 20 + "\n")
tr.write(header + "\n")
tr.write("-" * len(header) + "\n")
total_funcs = 0
total_miss = 0
for i, mp in enumerate(module_paths):
funcs = len(found[i][1])
miss = len(missed[i][1])
cover = int(((funcs - miss) / funcs) * 100)
total_funcs += funcs
total_miss += miss
args = (mp, funcs, miss, cover)
if include_missing:
args += (", ".join([f.__qualname__ for f in missed[i][1]]),)
tr.write(fmt_coverage % args)
tr.write("\n")
tr.write("-" * len(header) + "\n")
if total_funcs != 0:
total_cover = int(((total_funcs - total_miss) / total_funcs) * 100)
else:
total_cover = 0
args = ("TOTAL", total_funcs, total_miss, total_cover)
if include_missing:
args += ("",)
tr.write(fmt_coverage % args + "\n")
| 311
| 3,637
| 46
|
df932a1d318345b8235882775e0cd92939917f5c
| 1,968
|
py
|
Python
|
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | 10
|
2021-02-09T19:25:46.000Z
|
2022-03-29T13:49:23.000Z
|
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | null | null | null |
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | 5
|
2020-12-23T15:24:40.000Z
|
2022-01-06T09:42:38.000Z
|
#! /usr/bin/env python
from .toy_problem_test import ToyProblemTest
from .reconciliation_problem_test import ReconciliationProblemTest
from .reconciliation_problem_2_test import ReconciliationProblem2Test
from .recon3_test import Recon3Test
from .optgapc1_test import OptGapC1Test
from .optgapc2_test import OptGapC2Test
from .optgapc3_test import OptGapC3Test
from .optgap4_test import OptGap4Test
from .single_edge_b import SingleEdgeBTest
from .feasibility_test import FeasibilityTest
from .flow_path_construction_test import FlowPathConstructionTest
from .we_need_to_fix_this_test import WeNeedToFixThisTest
from .abstract_test import bcolors
import argparse
ALL_TESTS = [ToyProblemTest(), ReconciliationProblemTest(),
ReconciliationProblem2Test(), Recon3Test(), OptGapC1Test(),
OptGapC2Test(), OptGapC3Test(), FeasibilityTest(),
OptGap4Test(), FlowPathConstructionTest(), WeNeedToFixThisTest(),
SingleEdgeBTest()]
TEST_NAME_DICT = {test.name: test for test in ALL_TESTS}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tests', nargs='+', required=False)
args = parser.parse_args()
if args.tests is not None:
tests_to_run = [TEST_NAME_DICT[name] for name in args.tests]
else:
tests_to_run = ALL_TESTS
print('RUNNING THE FOLLOWING TESTS: {}'.format(
[test.name for test in tests_to_run]))
run_tests(tests_to_run)
| 33.931034
| 102
| 0.723069
|
#! /usr/bin/env python
from .toy_problem_test import ToyProblemTest
from .reconciliation_problem_test import ReconciliationProblemTest
from .reconciliation_problem_2_test import ReconciliationProblem2Test
from .recon3_test import Recon3Test
from .optgapc1_test import OptGapC1Test
from .optgapc2_test import OptGapC2Test
from .optgapc3_test import OptGapC3Test
from .optgap4_test import OptGap4Test
from .single_edge_b import SingleEdgeBTest
from .feasibility_test import FeasibilityTest
from .flow_path_construction_test import FlowPathConstructionTest
from .we_need_to_fix_this_test import WeNeedToFixThisTest
from .abstract_test import bcolors
import argparse
ALL_TESTS = [ToyProblemTest(), ReconciliationProblemTest(),
ReconciliationProblem2Test(), Recon3Test(), OptGapC1Test(),
OptGapC2Test(), OptGapC3Test(), FeasibilityTest(),
OptGap4Test(), FlowPathConstructionTest(), WeNeedToFixThisTest(),
SingleEdgeBTest()]
TEST_NAME_DICT = {test.name: test for test in ALL_TESTS}
def run_tests(tests_to_run):
tests_that_failed = []
for test in tests_to_run:
print('\n\n---{} TEST---\n\n'.format(test.name.upper()))
test.run()
if test.has_error:
tests_that_failed.append(test)
for test in tests_that_failed:
print()
print(bcolors.ERROR + '\n\n---{} TEST failed---\n\n'.format(test.name.upper()) + bcolors.ENDC)
if len(tests_that_failed) == 0:
print(bcolors.OKGREEN + 'All tests passed!' + bcolors.ENDC)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tests', nargs='+', required=False)
args = parser.parse_args()
if args.tests is not None:
tests_to_run = [TEST_NAME_DICT[name] for name in args.tests]
else:
tests_to_run = ALL_TESTS
print('RUNNING THE FOLLOWING TESTS: {}'.format(
[test.name for test in tests_to_run]))
run_tests(tests_to_run)
| 476
| 0
| 23
|
5eef5e446e1922c169ce5770f96bdb08b8933d69
| 17,847
|
py
|
Python
|
openmdao/core/tests/test_getset_vars.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | 451
|
2015-07-20T11:52:35.000Z
|
2022-03-28T08:04:56.000Z
|
openmdao/core/tests/test_getset_vars.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | 1,096
|
2015-07-21T03:08:26.000Z
|
2022-03-31T11:59:17.000Z
|
openmdao/core/tests/test_getset_vars.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | 301
|
2015-07-16T20:02:11.000Z
|
2022-03-28T08:04:39.000Z
|
"""Test getting/setting variables and subjacs with promoted/relative/absolute names."""
import unittest
import numpy as np
from openmdao.api import Problem, Group, ExecComp, IndepVarComp, DirectSolver, ParallelGroup
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
if __name__ == '__main__':
unittest.main()
| 39.837054
| 292
| 0.518462
|
"""Test getting/setting variables and subjacs with promoted/relative/absolute names."""
import unittest
import numpy as np
from openmdao.api import Problem, Group, ExecComp, IndepVarComp, DirectSolver, ParallelGroup
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class TestGetSetVariables(unittest.TestCase):
def test_no_promotion(self):
"""
Illustrative examples showing how to access variables and subjacs.
"""
c = ExecComp('y=2*x')
g = Group()
g.add_subsystem('c', c)
model = Group()
model.add_subsystem('g', g)
p = Problem(model)
p.setup()
# -------------------------------------------------------------------
# inputs
p['g.c.x'] = 5.0
self.assertEqual(p['g.c.x'], 5.0)
# outputs
p['g.c.y'] = 5.0
self.assertEqual(p['g.c.y'], 5.0)
# Conclude setup but don't run model.
p.final_setup()
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
inputs['c.x'] = 5.0
self.assertEqual(inputs['c.x'], 5.0)
# outputs
outputs['c.y'] = 5.0
self.assertEqual(outputs['c.y'], 5.0)
# Removed part of test where we set values into the jacobian willy-nilly.
# You can only set declared values now.
def test_with_promotion(self):
"""
Illustrative examples showing how to access variables and subjacs.
"""
c1 = IndepVarComp('x')
c2 = ExecComp('y=2*x')
c3 = ExecComp('z=3*x')
g = Group()
g.add_subsystem('c1', c1, promotes=['*'])
g.add_subsystem('c2', c2, promotes=['*'])
g.add_subsystem('c3', c3, promotes=['*'])
model = Group()
model.add_subsystem('g', g, promotes=['*'])
p = Problem(model)
p.setup()
# -------------------------------------------------------------------
# inputs
p['g.c2.x'] = 5.0
self.assertEqual(p['g.c2.x'], 5.0)
# outputs
p['g.c2.y'] = 5.0
self.assertEqual(p['g.c2.y'], 5.0)
p['y'] = 5.0
self.assertEqual(p['y'], 5.0)
# Conclude setup but don't run model.
p.final_setup()
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
inputs['c2.x'] = 5.0
self.assertEqual(inputs['c2.x'], 5.0)
# outputs
outputs['c2.y'] = 5.0
self.assertEqual(outputs['c2.y'], 5.0)
outputs['y'] = 5.0
self.assertEqual(outputs['y'], 5.0)
# Removed part of test where we set values into the jacobian willy-nilly. You can only set
# declared values now.
def test_no_promotion_errors(self):
"""
Tests for error-handling for invalid variable names and keys.
"""
g = Group(assembled_jac_type='dense')
g.linear_solver = DirectSolver(assemble_jac=True)
g.add_subsystem('c', ExecComp('y=2*x'))
p = Problem()
model = p.model
model.add_subsystem('g', g)
p.setup()
# -------------------------------------------------------------------
msg = '\'<model> <class Group>: Variable "{}" not found.\''
# inputs
with self.assertRaises(KeyError) as ctx:
p['x'] = 5.0
self.assertEqual(str(ctx.exception), msg.format('x'))
p._initial_condition_cache = {}
with self.assertRaises(KeyError) as ctx:
p['x']
self.assertEqual(str(ctx.exception), msg.format('x'))
# outputs
with self.assertRaises(KeyError) as ctx:
p['y'] = 5.0
self.assertEqual(str(ctx.exception), msg.format('y'))
p._initial_condition_cache = {}
with self.assertRaises(KeyError) as ctx:
p['y']
self.assertEqual(str(ctx.exception), msg.format('y'))
p.final_setup()
msg = "'g' <class Group>: Variable name '{}' not found."
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
for vname in ['x', 'g.c.x']:
with self.assertRaises(KeyError) as cm:
inputs[vname] = 5.0
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
with self.assertRaises(KeyError) as cm:
inputs[vname]
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
# outputs
for vname in ['y', 'g.c.y']:
with self.assertRaises(KeyError) as cm:
outputs[vname] = 5.0
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
with self.assertRaises(KeyError) as cm:
outputs[vname]
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
msg = r'Variable name pair \("{}", "{}"\) not found.'
jac = g.linear_solver._assembled_jac
# d(output)/d(input)
with self.assertRaisesRegex(KeyError, msg.format('y', 'x')):
jac['y', 'x'] = 5.0
with self.assertRaisesRegex(KeyError, msg.format('y', 'x')):
jac['y', 'x']
# allow absolute keys now
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.x')):
# jac['g.c.y', 'g.c.x'] = 5.0
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.x')):
# deriv = jac['g.c.y', 'g.c.x']
# d(output)/d(output)
with self.assertRaisesRegex(KeyError, msg.format('y', 'y')):
jac['y', 'y'] = 5.0
with self.assertRaisesRegex(KeyError, msg.format('y', 'y')):
jac['y', 'y']
# allow absoute keys now
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.y')):
# jac['g.c.y', 'g.c.y'] = 5.0
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.y')):
# deriv = jac['g.c.y', 'g.c.y']
def test_with_promotion_errors(self):
"""
Tests for error-handling for invalid variable names and keys.
"""
c1 = IndepVarComp('x')
c2 = ExecComp('y=2*x')
c3 = ExecComp('z=3*x')
g = Group(assembled_jac_type='dense')
g.add_subsystem('c1', c1, promotes=['*'])
g.add_subsystem('c2', c2, promotes=['*'])
g.add_subsystem('c3', c3, promotes=['*'])
g.linear_solver = DirectSolver(assemble_jac=True)
model = Group()
model.add_subsystem('g', g, promotes=['*'])
p = Problem(model)
p.setup()
# Conclude setup but don't run model.
p.final_setup()
# -------------------------------------------------------------------
msg1 = "'g' <class Group>: Variable name '{}' not found."
msg2 = "The promoted name x is invalid because it refers to multiple inputs: " \
"[g.c2.x ,g.c3.x]. Access the value from the connected output variable x instead."
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
with self.assertRaises(Exception) as context:
inputs['x'] = 5.0
self.assertEqual(str(context.exception), msg2)
with self.assertRaises(Exception) as context:
self.assertEqual(inputs['x'], 5.0)
self.assertEqual(str(context.exception), msg2)
with self.assertRaises(KeyError) as cm:
inputs['g.c2.x'] = 5.0
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.x'))
with self.assertRaises(KeyError) as cm:
inputs['g.c2.x']
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.x'))
# outputs
with self.assertRaises(KeyError) as cm:
outputs['g.c2.y'] = 5.0
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.y'))
with self.assertRaises(KeyError) as cm:
outputs['g.c2.y']
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.y'))
msg1 = r'Variable name pair \("{}", "{}"\) not found.'
jac = g.linear_solver._assembled_jac
# d(outputs)/d(inputs)
with self.assertRaises(Exception) as context:
jac['y', 'x'] = 5.0
self.assertEqual(str(context.exception), msg2)
with self.assertRaises(Exception) as context:
self.assertEqual(jac['y', 'x'], 5.0)
self.assertEqual(str(context.exception), msg2)
def test_serial_multi_src_inds(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)))
p.model.add_subsystem('C1', ExecComp('y=x*2.', x=np.zeros(7), y=np.zeros(7)))
p.model.add_subsystem('C2', ExecComp('y=x*3.', x=np.zeros(3), y=np.zeros(3)))
p.model.connect('indep.x', 'C1.x', src_indices=list(range(7)))
p.model.connect('indep.x', 'C2.x', src_indices=list(range(7, 10)))
p.setup()
p['C1.x'] = (np.arange(7) + 1.) * 2.
p['C2.x'] = (np.arange(7,10) + 1.) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.x'], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['C2.x'], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.y'], (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p['C2.y'], (np.arange(7,10) + 1.) * 9.)
def test_serial_multi_src_inds_promoted(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)), promotes=['x'])
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7)},
y={'val': np.zeros(7)}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3)},
y={'val': np.zeros(3)}))
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
p.setup()
p['C1.x'] = (np.arange(7) + 1.) * 2.
p['C2.x'] = (np.arange(7,10) + 1.) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.x'], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['C2.x'], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.y'], (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p['C2.y'], (np.arange(7,10) + 1.) * 9.)
def test_serial_multi_src_inds_units_promoted(self):
p = Problem()
indep = p.model.add_subsystem('indep', IndepVarComp(), promotes=['x'])
indep.add_output('x', units='inch', val=np.ones(10))
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7),
'units': 'ft'},
y={'val': np.zeros(7), 'units': 'ft'}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3),
'units': 'inch'},
y={'val': np.zeros(3), 'units': 'inch'}))
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
p.setup()
p['C1.x'] = np.ones(7) * 2.
p['C2.x'] = np.ones(3) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], np.ones(7) * 24.)
np.testing.assert_allclose(p['indep.x'][7:10], np.ones(3) * 3.)
np.testing.assert_allclose(p['C1.x'], np.ones(7) * 2.)
np.testing.assert_allclose(p['C1.y'], np.ones(7) * 4.)
np.testing.assert_allclose(p['C2.x'], np.ones(3) * 3.)
np.testing.assert_allclose(p['C2.y'], np.ones(3) * 9.)
def test_serial_multi_src_inds_units_promoted_no_src(self):
p = Problem()
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7),
'units': 'ft'},
y={'val': np.zeros(7), 'units': 'ft'}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3),
'units': 'inch'},
y={'val': np.zeros(3), 'units': 'inch'}))
p.model.add_subsystem('C3', ExecComp('y=x*4.',
x={'val': np.zeros(10), 'units': 'mm'},
y={'val': np.zeros(10), 'units': 'mm'}),
promotes=['x'])
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
with self.assertRaises(RuntimeError) as cm:
p.setup()
self.assertEqual(str(cm.exception), "<model> <class Group>: The following inputs, ['C1.x', 'C2.x', 'C3.x'], promoted to 'x', are connected but their metadata entries ['units'] differ. Call <group>.set_input_defaults('x', units=?), where <group> is the model to remove the ambiguity.")
def test_serial_multi_src_inds_units_setval_promoted(self):
p = Problem()
indep = p.model.add_subsystem('indep', IndepVarComp(), promotes=['x'])
indep.add_output('x', units='inch', val=np.ones(10))
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7),
'units': 'ft'},
y={'val': np.zeros(7), 'units': 'ft'}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3),
'units': 'inch'},
y={'val': np.zeros(3), 'units': 'inch'}))
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
p.setup()
p.set_val('C1.x', np.ones(7) * 24., units='inch')
p.set_val('C2.x', np.ones(3) * 3., units='inch')
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], np.ones(7) * 24.)
np.testing.assert_allclose(p['indep.x'][7:10], np.ones(3) * 3.)
np.testing.assert_allclose(p['C1.x'], np.ones(7) * 2.)
np.testing.assert_allclose(p['C1.y'], np.ones(7) * 4.)
np.testing.assert_allclose(p['C2.x'], np.ones(3) * 3.)
np.testing.assert_allclose(p['C2.y'], np.ones(3) * 9.)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class ParTestCase(unittest.TestCase):
N_PROCS = 2
def test_par_multi_src_inds(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)))
par = p.model.add_subsystem('par', ParallelGroup())
par.add_subsystem('C1', ExecComp('y=x*2.', x=np.zeros(7), y=np.zeros(7)))
par.add_subsystem('C2', ExecComp('y=x*3.', x=np.zeros(3), y=np.zeros(3)))
p.model.connect('indep.x', 'par.C1.x', src_indices=list(range(7)))
p.model.connect('indep.x', 'par.C2.x', src_indices=list(range(7, 10)))
p.setup()
p['indep.x'] = np.concatenate([(np.arange(7) + 1.) * 2., (np.arange(7, 10) + 1.) * 3.])
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p.get_val('par.C1.x', get_remote=True), (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p.get_val('par.C2.x', get_remote=True), (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p.get_val('par.C1.y', get_remote=True), (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p.get_val('par.C2.y', get_remote=True), (np.arange(7,10) + 1.) * 9.)
@unittest.expectedFailure
def test_par_multi_src_inds_fail(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)))
par = p.model.add_subsystem('par', ParallelGroup())
par.add_subsystem('C1', ExecComp('y=x*2.', x=np.zeros(7), y=np.zeros(7)))
par.add_subsystem('C2', ExecComp('y=x*3.', x=np.zeros(3), y=np.zeros(3)))
p.model.connect('indep.x', 'par.C1.x', src_indices=list(range(7)))
p.model.connect('indep.x', 'par.C2.x', src_indices=list(range(7, 10)))
p.setup()
p['par.C1.x'] = (np.arange(7) + 1.) * 2.
p['par.C2.x'] = (np.arange(7,10) + 1.) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['par.C1.x'], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['par.C2.x'], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['par.C1.y'], (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p['par.C2.y'], (np.arange(7,10) + 1.) * 9.)
if __name__ == '__main__':
unittest.main()
| 8,858
| 8,460
| 45
|
c4ac532576ad2e3296ef052f13dff92d03c958af
| 6,299
|
py
|
Python
|
heltour/tournament/tests/test_views.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/tests/test_views.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/tests/test_views.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from heltour.tournament.models import *
from django.core.urlresolvers import reverse
# For now we just have sanity checks for the templates used
# This could be enhanced by verifying the context data
| 41.993333
| 123
| 0.699476
|
from django.test import TestCase
from heltour.tournament.models import *
from django.core.urlresolvers import reverse
# For now we just have sanity checks for the templates used
# This could be enhanced by verifying the context data
def createCommonLeagueData():
team_count = 4
round_count = 3
board_count = 2
league = League.objects.create(name='Team League', tag='team', competitor_type='team')
season = Season.objects.create(league=league, name='Team Season', tag='team', rounds=round_count, boards=board_count)
league2 = League.objects.create(name='Lone League', tag='lone')
season2 = Season.objects.create(league=league2, name='Lone Season', tag='lone', rounds=round_count, boards=board_count)
player_num = 1
for n in range(1, team_count + 1):
team = Team.objects.create(season=season, number=n, name='Team %s' % n)
TeamScore.objects.create(team=team)
for b in range(1, board_count + 1):
player = Player.objects.create(lichess_username='Player%d' % player_num)
player_num += 1
TeamMember.objects.create(team=team, player=player, board_number=b)
class HomeTestCase(TestCase):
def setUp(self):
pass
def test_template(self):
response = self.client.get(reverse('home'))
self.assertTemplateUsed(response, 'tournament/home.html')
class LeagueHomeTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:league_home', args=['team']))
self.assertTemplateUsed(response, 'tournament/team_league_home.html')
response = self.client.get(reverse('by_league:league_home', args=['lone']))
self.assertTemplateUsed(response, 'tournament/lone_league_home.html')
class SeasonLandingTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:season_landing', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_season_landing.html')
response = self.client.get(reverse('by_league:by_season:season_landing', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_season_landing.html')
for s in Season.objects.all():
s.is_completed = True
s.save()
response = self.client.get(reverse('by_league:by_season:season_landing', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_completed_season_landing.html')
response = self.client.get(reverse('by_league:by_season:season_landing', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_completed_season_landing.html')
class RostersTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:rosters', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_rosters.html')
response = self.client.get(reverse('by_league:by_season:rosters', args=['lone', 'lone']))
self.assertEqual(404, response.status_code)
class StandingsTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:standings', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_standings.html')
response = self.client.get(reverse('by_league:by_season:standings', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_standings.html')
class CrosstableTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:crosstable', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_crosstable.html')
response = self.client.get(reverse('by_league:by_season:crosstable', args=['lone', 'lone']))
self.assertEqual(404, response.status_code)
class WallchartTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:wallchart', args=['team', 'team']))
self.assertEqual(404, response.status_code)
response = self.client.get(reverse('by_league:by_season:wallchart', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_wallchart.html')
class PairingsTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:pairings', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_pairings.html')
response = self.client.get(reverse('by_league:by_season:pairings', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_pairings.html')
class StatsTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:stats', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_stats.html')
response = self.client.get(reverse('by_league:by_season:stats', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_stats.html')
class RegisterTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:register', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/registration_closed.html')
season = Season.objects.all()[0]
season.registration_open = True
season.save()
response = self.client.get(reverse('by_league:by_season:register', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/register.html')
response = self.client.get(reverse('by_league:by_season:registration_success', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/registration_success.html')
| 5,159
| 123
| 783
|
f99a17b230e8119aba628bdff0bc3af92b2d5225
| 765
|
py
|
Python
|
tests/test_mass_fetch.py
|
dominik7680/py-skydb
|
ddf2c6c993cc75398bc3dfcf41954b6793bf3349
|
[
"MIT"
] | 14
|
2020-11-23T17:05:27.000Z
|
2022-03-22T01:52:09.000Z
|
tests/test_mass_fetch.py
|
dominik7680/py-skydb
|
ddf2c6c993cc75398bc3dfcf41954b6793bf3349
|
[
"MIT"
] | 2
|
2020-11-25T12:00:24.000Z
|
2020-12-09T18:10:50.000Z
|
tests/test_mass_fetch.py
|
dominik7680/py-skydb
|
ddf2c6c993cc75398bc3dfcf41954b6793bf3349
|
[
"MIT"
] | 5
|
2020-12-09T15:57:11.000Z
|
2022-01-30T13:17:14.000Z
|
from skydb import SkydbTable
from random import choice
from string import ascii_letters
table_name = ''.join([choice(ascii_letters) for i in range(20)])
import time
print("Creating table")
table = SkydbTable(table_name, columns=['c1','c2','c3'], seed="some_random", verbose=1)
print("Added table successfully")
| 24.677419
| 87
| 0.678431
|
from skydb import SkydbTable
from random import choice
from string import ascii_letters
table_name = ''.join([choice(ascii_letters) for i in range(20)])
import time
print("Creating table")
table = SkydbTable(table_name, columns=['c1','c2','c3'], seed="some_random", verbose=1)
print("Added table successfully")
def test_mass_fetch():
global table
rows = []
for i in range(20):
row = {}
for c in ['c1', 'c2','c3']:
row['c1'] = ''.join([choice(ascii_letters) for i in range(5)])
row['c2'] = ''.join([choice(ascii_letters) for i in range(5)])
row['c3'] = ''.join([choice(ascii_letters) for i in range(5)])
rows.append(row)
print("Adding rows")
table.add_rows(rows)
print("Successfully added rows")
out = table.fetch_rows(list(range(10)))
| 427
| 0
| 23
|
2fbbfb33fb8b668b247f87dcbbe55fe6c26a1b4c
| 545
|
py
|
Python
|
tests/db_engine_specs/athena_tests.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | null | null | null |
tests/db_engine_specs/athena_tests.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | null | null | null |
tests/db_engine_specs/athena_tests.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | 1
|
2021-07-09T16:29:50.000Z
|
2021-07-09T16:29:50.000Z
|
from rabbitai.db_engine_specs.athena import AthenaEngineSpec
from tests.db_engine_specs.base_tests import TestDbEngineSpec
| 30.277778
| 67
| 0.677064
|
from rabbitai.db_engine_specs.athena import AthenaEngineSpec
from tests.db_engine_specs.base_tests import TestDbEngineSpec
class TestAthenaDbEngineSpec(TestDbEngineSpec):
def test_convert_dttm(self):
dttm = self.get_dttm()
self.assertEqual(
AthenaEngineSpec.convert_dttm("DATE", dttm),
"from_iso8601_date('2019-01-02')",
)
self.assertEqual(
AthenaEngineSpec.convert_dttm("TIMESTAMP", dttm),
"from_iso8601_timestamp('2019-01-02T03:04:05.678900')",
)
| 346
| 26
| 49
|
f47c4459ec95c272bab38550541eef155938f3cc
| 1,733
|
py
|
Python
|
api/tests/integration/tests/layout/template_layout.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 204
|
2015-11-06T21:34:34.000Z
|
2022-03-30T16:17:01.000Z
|
api/tests/integration/tests/layout/template_layout.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 509
|
2015-11-05T13:54:43.000Z
|
2022-03-30T22:15:30.000Z
|
api/tests/integration/tests/layout/template_layout.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 89
|
2015-11-17T08:22:54.000Z
|
2022-03-17T04:26:28.000Z
|
import os
import sys
import errno
import math
from math import *
sys.path.append('../../common')
from env_indigo import *
if not os.path.exists(joinPathPy("out", __file__)):
try:
os.makedirs(joinPathPy("out", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
indigo.setOption("treat-x-as-pseudoatom", "1")
indigo.setOption("smart-layout", "1")
ref_path = getRefFilepath("template_layout.sdf")
ref = indigo.iterateSDFile(ref_path)
print("**** Test template layout *****")
saver = indigo.writeFile(joinPathPy("out/template_layout.sdf", __file__))
for idx, item in enumerate(indigo.iterateSDFile(joinPathPy("molecules/template_layout.sdf", __file__))):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, ref.at(idx).rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
print("**** Test rings templates layout *****")
ref_path = getRefFilepath("rings_templates.sdf")
ref = indigo.iterateSDFile(ref_path)
saver = indigo.writeFile(joinPathPy("out/rings_templates.sdf", __file__))
for idx, item in enumerate(ref):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, item.rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
| 31.509091
| 104
| 0.669359
|
import os
import sys
import errno
import math
from math import *
sys.path.append('../../common')
from env_indigo import *
if not os.path.exists(joinPathPy("out", __file__)):
try:
os.makedirs(joinPathPy("out", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
indigo.setOption("treat-x-as-pseudoatom", "1")
indigo.setOption("smart-layout", "1")
ref_path = getRefFilepath("template_layout.sdf")
ref = indigo.iterateSDFile(ref_path)
print("**** Test template layout *****")
saver = indigo.writeFile(joinPathPy("out/template_layout.sdf", __file__))
for idx, item in enumerate(indigo.iterateSDFile(joinPathPy("molecules/template_layout.sdf", __file__))):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, ref.at(idx).rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
print("**** Test rings templates layout *****")
ref_path = getRefFilepath("rings_templates.sdf")
ref = indigo.iterateSDFile(ref_path)
saver = indigo.writeFile(joinPathPy("out/rings_templates.sdf", __file__))
for idx, item in enumerate(ref):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, item.rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
| 0
| 0
| 0
|
90c360f1f9de7372f63bd4be872c6b44a814289d
| 1,019
|
py
|
Python
|
versionfield/forms.py
|
willseward/django-versionfield3
|
4ae365fb42f38a02c77d317cba3fef22806f2b24
|
[
"Unlicense"
] | null | null | null |
versionfield/forms.py
|
willseward/django-versionfield3
|
4ae365fb42f38a02c77d317cba3fef22806f2b24
|
[
"Unlicense"
] | null | null | null |
versionfield/forms.py
|
willseward/django-versionfield3
|
4ae365fb42f38a02c77d317cba3fef22806f2b24
|
[
"Unlicense"
] | null | null | null |
from builtins import str
from django import forms
from django.forms.widgets import TextInput
from .version import Version
from .constants import DEFAULT_NUMBER_BITS
from .utils import convert_version_int_to_string
| 30.878788
| 96
| 0.675172
|
from builtins import str
from django import forms
from django.forms.widgets import TextInput
from .version import Version
from .constants import DEFAULT_NUMBER_BITS
from .utils import convert_version_int_to_string
class VersionField(forms.IntegerField):
widget = TextInput
def __init__(self, number_bits=DEFAULT_NUMBER_BITS, **kwargs):
self.number_bits = number_bits
return super(VersionField, self).__init__(**kwargs)
def to_python(self, value):
"""
Verifies that value can be converted to a Version object
"""
if not value:
return None
if isinstance(value, str):
return int(Version(value, self.number_bits))
return Version(convert_version_int_to_string(value, self.number_bits), self.number_bits)
def widget_attrs(self, widget):
attrs = super(VersionField, self).widget_attrs(widget)
attrs['pattern'] = '^' + (r'(\d+\.)?' * (len(self.number_bits) - 1)) + r'(\*|\d+)$'
return attrs
| 326
| 454
| 23
|
b76165b2825027823939347d4036fc0458906b68
| 605
|
py
|
Python
|
src/tblink_rpc_utils/input_reader_yaml.py
|
tblink-rpc/tblink-rpc-utils
|
48a731cb8c6201e1975ba18f43737228eb1f7dee
|
[
"Apache-2.0"
] | null | null | null |
src/tblink_rpc_utils/input_reader_yaml.py
|
tblink-rpc/tblink-rpc-utils
|
48a731cb8c6201e1975ba18f43737228eb1f7dee
|
[
"Apache-2.0"
] | null | null | null |
src/tblink_rpc_utils/input_reader_yaml.py
|
tblink-rpc/tblink-rpc-utils
|
48a731cb8c6201e1975ba18f43737228eb1f7dee
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Oct 20, 2021
@author: mballance
'''
from tblink_rpc_utils.idl_spec import IDLSpec
from tblink_rpc_utils.input_reader import InputReader
from tblink_rpc_utils.input_spec import InputSpec
from tblink_rpc_utils.yaml_idl_parser import YamlIDLParser
| 25.208333
| 58
| 0.66281
|
'''
Created on Oct 20, 2021
@author: mballance
'''
from tblink_rpc_utils.idl_spec import IDLSpec
from tblink_rpc_utils.input_reader import InputReader
from tblink_rpc_utils.input_spec import InputSpec
from tblink_rpc_utils.yaml_idl_parser import YamlIDLParser
class InputReaderYaml(InputReader):
def __init__(self):
super().__init__()
def read(self, in_spec:InputSpec)->IDLSpec:
yaml_p = YamlIDLParser()
for file in in_spec.files:
with open(file, "r") as fp:
yaml_p.parse(fp)
return yaml_p.spec
| 241
| 14
| 89
|
9734061ff8c9ce101186289f0971e8af178cbcda
| 518
|
py
|
Python
|
escola/templatetags/markdown.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 1
|
2019-03-15T18:04:24.000Z
|
2019-03-15T18:04:24.000Z
|
escola/templatetags/markdown.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 22
|
2019-03-17T21:53:50.000Z
|
2021-03-31T19:12:19.000Z
|
escola/templatetags/markdown.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 1
|
2018-11-25T03:05:23.000Z
|
2018-11-25T03:05:23.000Z
|
import misaka as m
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from MedusaII.settings import MARKDOWNX_MARKDOWN_EXTENSIONS
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
| 28.777778
| 78
| 0.702703
|
import misaka as m
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from MedusaII.settings import MARKDOWNX_MARKDOWN_EXTENSIONS
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def md(value):
rendered_text = mark_safe(m.html(value,
extensions=MARKDOWNX_MARKDOWN_EXTENSIONS,
render_flags=('skip-html',)))
return rendered_text
| 208
| 0
| 22
|
297983f1224fa368c806bb3709b78cd221f4c7f1
| 1,720
|
py
|
Python
|
targetrsqueak-embedded.py
|
shiplift/RSqueakOnABoat
|
ac449758ddb7aef1721e65a13171547761dd6e39
|
[
"BSD-3-Clause"
] | 44
|
2015-02-08T09:38:46.000Z
|
2017-11-15T01:19:40.000Z
|
targetrsqueak-embedded.py
|
shiplift/RSqueakOnABoat
|
ac449758ddb7aef1721e65a13171547761dd6e39
|
[
"BSD-3-Clause"
] | 112
|
2015-02-08T09:34:40.000Z
|
2017-04-10T19:06:30.000Z
|
targetrsqueak-embedded.py
|
shiplift/RSqueakOnABoat
|
ac449758ddb7aef1721e65a13171547761dd6e39
|
[
"BSD-3-Clause"
] | 7
|
2015-04-08T11:49:10.000Z
|
2017-01-19T06:36:27.000Z
|
#! /usr/bin/env python
import sys
from rpython.jit.codewriter.policy import JitPolicy
from rsqueakvm import model, objspace, interpreter, squeakimage
# This loads an image file in advance and includes it in the
# translation-output. At run-time, the defined selector is sent
# to the defined SmallInteger. This way we get an RPython
# "image" frozen into the executable, mmap'ed by the OS from
# there and loaded lazily when needed :-)
# Besides testing etc., this can be used to create standalone
# binaries executing a smalltalk program.
sys.setrecursionlimit(100000)
imagefile = "images/mini.image"
selector = "loopTest"
receiver = 0
interp, s_frame = setup()
# _____ Define and setup target ___
if __name__ == "__main__":
entry_point(sys.argv)
| 30.714286
| 85
| 0.73314
|
#! /usr/bin/env python
import sys
from rpython.jit.codewriter.policy import JitPolicy
from rsqueakvm import model, objspace, interpreter, squeakimage
# This loads an image file in advance and includes it in the
# translation-output. At run-time, the defined selector is sent
# to the defined SmallInteger. This way we get an RPython
# "image" frozen into the executable, mmap'ed by the OS from
# there and loaded lazily when needed :-)
# Besides testing etc., this can be used to create standalone
# binaries executing a smalltalk program.
sys.setrecursionlimit(100000)
imagefile = "images/mini.image"
selector = "loopTest"
receiver = 0
def setup():
space = objspace.ObjSpace()
stream = squeakimage.Stream(filename=imagefile)
image = squeakimage.ImageReader(space, stream).create_image()
interp = interpreter.Interpreter(space, image)
w_selector = interp.perform(space.wrap_string(selector), "asSymbol")
w_object = model.W_SmallInteger(receiver)
s_class = w_object.class_shadow(space)
w_method = s_class.lookup(w_selector)
s_frame = w_method.create_frame(space, w_object)
return interp, s_frame
interp, s_frame = setup()
def entry_point(argv):
if len(argv) > 1:
print "This RSqueak VM has an embedded image and ignores all cli-parameters."
try:
interp.loop(s_frame.w_self())
except interpreter.ReturnFromTopLevel, e:
w_result = e.object
print interp.space.unwrap_string(w_result)
return 0
# _____ Define and setup target ___
def target(driver, *args):
driver.exe_name = "rsqueak-embedded"
return entry_point, None
def jitpolicy(driver):
return JitPolicy()
if __name__ == "__main__":
entry_point(sys.argv)
| 869
| 0
| 92
|
1422dc0df676bf0e1144a0249d8dca0784d485b3
| 32
|
py
|
Python
|
model/__init__.py
|
Khurramjaved96/Dicta
|
416638a3d1ad851b00394e55a7574ec978080d51
|
[
"Apache-2.0"
] | 60
|
2019-05-29T17:09:15.000Z
|
2022-03-30T15:35:57.000Z
|
model/__init__.py
|
Khurramjaved96/Dicta
|
416638a3d1ad851b00394e55a7574ec978080d51
|
[
"Apache-2.0"
] | 6
|
2018-06-08T14:32:34.000Z
|
2019-05-20T05:34:39.000Z
|
model/__init__.py
|
Khurramjaved96/Dicta
|
416638a3d1ad851b00394e55a7574ec978080d51
|
[
"Apache-2.0"
] | 28
|
2019-06-10T04:07:24.000Z
|
2022-01-12T19:21:49.000Z
|
from model.modelfactory import *
| 32
| 32
| 0.84375
|
from model.modelfactory import *
| 0
| 0
| 0
|
3e4408ccc1944b2314adbe4828cdf345edcdd816
| 6,087
|
py
|
Python
|
doublyLinkedList.py
|
nemodesouza/ed-trabalho-01-1-doubly-linked-list
|
b86689a52837ee7d9cb7a6cde7a420ffeb809fb6
|
[
"CC0-1.0"
] | null | null | null |
doublyLinkedList.py
|
nemodesouza/ed-trabalho-01-1-doubly-linked-list
|
b86689a52837ee7d9cb7a6cde7a420ffeb809fb6
|
[
"CC0-1.0"
] | null | null | null |
doublyLinkedList.py
|
nemodesouza/ed-trabalho-01-1-doubly-linked-list
|
b86689a52837ee7d9cb7a6cde7a420ffeb809fb6
|
[
"CC0-1.0"
] | null | null | null |
from cursor import Cursor
from node import Node
| 33.629834
| 91
| 0.55676
|
from cursor import Cursor
from node import Node
class DoublyLinkedList:
def __init__(self, limit=None):
self.__cursor = Cursor()
self.__limit = None
self.__elements = 0
@property
def element(self):
return self.__elements
def accessCurrent(self):
"""(elemento) acessarAtual()"""
return self.__cursor.current
def insertBeforeCurrent(self, data):
"""(void) InserirAntesDoAtual ( novo )"""
new_element = Node(data)
if self.isFull():
print("A lista está cheia!")
elif self.isEmpty():
self.__cursor.current = new_element
self.__elements += 1
elif self.__cursor.current.previous is None:
self.__cursor.current.previous = new_element
new_element.next = self.__cursor.current
self.__elements += 1
else:
self.__cursor.current.previous.next = new_element
new_element.previous = self.__cursor.current.previous
self.__cursor.current.previous = new_element
new_element.next = self.__cursor.current
self.__elements += 1
def insertAfterCurrent(self, data):
"""(void) InserirApósAtual ( novo )"""
new_element = Node(data)
if self.isFull():
print("A lista está cheia!")
elif self.isEmpty():
self.__cursor.current = new_element
self.__elements += 1
elif self.__cursor.current.next is None:
self.__cursor.current.next = new_element
new_element.previous = self.__cursor.current
self.__elements += 1
def insertInTheEnd(self, data):
"""(void) inserirNoFim ( novo )"""
if self.isEmpty():
new_element = Node(data)
self.__cursor.current = new_element
self.__elements += 1
else:
self.__cursor.goToLast()
self.insertAfterCurrent(data)
def insertInFront(self, data):
"""(void) inserirNaFrente ( novo )"""
if self.isEmpty():
new_element = Node(data)
self.__cursor.current = new_element
self.__elements += 1
else:
self.__cursor.goToFirst()
self.insertBeforeCurrent(data)
def insertInPositionK(self, k, data):
"""(void) inserirNaPosicao ( k, novo )"""
if self.isFull():
print("A lista está cheia!")
elif k < 0 or k > self.__elements:
print("Essa posição não existe!")
else:
self.__cursor.goToFirst()
self.__cursor.advanceKPos(k-1)
self.insertBeforeCurrent(data)
def popCurrent(self):
"""(void) ExcluirAtual ()"""
if self.isEmpty():
print("A lista está vazia!")
elif self.__cursor.current.previous is None and self.__cursor.current.next is None:
self.__cursor.current.next = None #verificar necessiade
self.__cursor.current.previous = None #verificar necessiade
self.__cursor.current = None
self.__elements -= 1
elif self.__cursor.current.previous is None:
self.__cursor.current.next.previous = None
self.__cursor.current = self.__cursor.current.next
self.__elements -= 1
elif self.__cursor.current.next is None:
self.__cursor.current.previous.next = None
self.__cursor.current = self.__cursor.current.previous
self.__elements -= 1
else:
self.__cursor.current.previous.next = self.__cursor.current.next
self.__cursor.current.next.previous = self.__cursor.current.previous
self.__cursor.current = self.__cursor.current.next
self.__elements -= 1
def popFirst(self):
"""(void) ExcluirPrim ()"""
self.__cursor.goToFirst()
self.popCurrent()
def popLast(self):
"""(void) ExcluirUlt ()"""
self.__cursor.goToLast()
self.popCurrent()
def popElement(self, element):
"""(void) ExcluirElem ( chave )"""
self.search(element)
self.popCurrent()
print(f"O elemento {element} foi excluído!")
def excludeFromPos(self, k):
"""(void) ExcluirDaPos ( k )"""
if self.isEmpty():
print("A lista está vazia!")
elif k > self.__elements or k < 0:
print("A posição não existe!")
else:
k-=1
self.__cursor.goToFirst()
self.__cursor.advanceKPos(k)
self.popCurrent()
def search(self, key):
"""(boolean) Buscar ( chave )"""
self.__cursor.goToFirst()
element = self.__cursor.current
while element.data != key:
if self.__cursor.current.next is None:
print("Print: False, elemento inexistente!")
return False
else:
self.__cursor.advanceKPos(1)
element = self.__cursor.current
print("Print: True, o elemento informado existe na lista!")
return True
# Outras (de apoio):
def isEmpty(self):
"""(boolean) Vazia()"""
return self.__elements == 0
def isFull(self):
"""(boolean) Cheia()"""
return self.__elements == self.__limit
def positionOf(self, key):
"""(INT) posiçãoDe(chave)"""
if self.isEmpty():
print("A lista está vazia!")
else:
self.__cursor.goToFirst()
current = self.__cursor.current
position = 1
while current.data != key:
if self.__cursor.current.next is None:
print("Elemento inexistente!")
return None
else:
self.__cursor.advanceKPos(1)
current = self.__cursor.current
position += 1
print(f"A posição do elemento {key} é a de n. {position}!")
| 127
| 5,891
| 23
|
d5764f7267401bb5f87916e35baf7cbfc9aaaca4
| 96
|
bzl
|
Python
|
tools/bzl/classpath.bzl
|
jinrongc1986/events-log
|
37371e72e9604cc637d0a96ebc91e5a53d420e2c
|
[
"Apache-2.0"
] | null | null | null |
tools/bzl/classpath.bzl
|
jinrongc1986/events-log
|
37371e72e9604cc637d0a96ebc91e5a53d420e2c
|
[
"Apache-2.0"
] | null | null | null |
tools/bzl/classpath.bzl
|
jinrongc1986/events-log
|
37371e72e9604cc637d0a96ebc91e5a53d420e2c
|
[
"Apache-2.0"
] | null | null | null |
load(
"@com_googlesource_gerrit_bazlets//tools:classpath.bzl",
"classpath_collector",
)
| 19.2
| 60
| 0.739583
|
load(
"@com_googlesource_gerrit_bazlets//tools:classpath.bzl",
"classpath_collector",
)
| 0
| 0
| 0
|
9daee1667658b8ded3cb5aa585437a23a6547b46
| 9,652
|
py
|
Python
|
wrap/pyllbc/script/common/Stream.py
|
caochunxi/llbc
|
2ff4af937f1635be67a7e24602d0a3e87c708ba7
|
[
"MIT"
] | 83
|
2015-11-10T09:52:56.000Z
|
2022-01-12T11:53:01.000Z
|
wrap/pyllbc/script/common/Stream.py
|
lailongwei/llbc
|
ec7e69bfa1f0afece8bb19dfa9a0a4578508a077
|
[
"MIT"
] | 30
|
2017-09-30T07:43:20.000Z
|
2022-01-23T13:18:48.000Z
|
wrap/pyllbc/script/common/Stream.py
|
caochunxi/llbc
|
2ff4af937f1635be67a7e24602d0a3e87c708ba7
|
[
"MIT"
] | 34
|
2015-11-14T12:37:44.000Z
|
2021-12-16T02:38:36.000Z
|
# -*- coding: utf-8 -*-
import inspect
import llbc
class pyllbcStream(object):
"""
Stream class encapsulation, use to pack/unpack data sequence.
"""
@property
def endian(self):
"""
Get stream endian setting(see llbc.Endian module).
"""
return llbc.inl.GetPyStreamEndian(self.__c_obj)
@endian.setter
def endian(self, e):
"""
Set stream endian(see llbc.Endian module).
"""
llbc.inl.SetPyStreamEndian(self.__c_obj, e)
@property
def pos(self):
"""
Get stream current reading/writing position.
"""
return llbc.inl.GetPyStreamPos(self.__c_obj)
@pos.setter
def pos(self, p):
"""
Set stream current reading/writing position.
"""
llbc.inl.SetPyStreamPos(self.__c_obj, p)
@property
def size(self):
"""
Get stream size(unsafe method, size will automatic adjust by stream).
"""
return llbc.inl.GetPyStreamSize(self.__c_obj)
@size.setter
def size(self, s):
"""
Set stream size(unsafe method, size will automatic adjust by stream).
"""
llbc.inl.SetPyStreamSize(self.__c_obj, s)
@property
def raw(self):
"""
Get stream memery view as buffer.
"""
return llbc.inl.PyStreamGetRaw(self.__c_obj)
@raw.setter
def raw(self, r):
"""
Set stream raw memory from str/buffer/bytearray.
"""
llbc.inl.PyStreamSetRaw(self.__c_obj, r)
@property
def cobj(self):
"""
Get raw pyllbc stream object(calling by c/c++ layer).
"""
return self.__c_obj
def __str__(self):
"""
Get human readable stream data's string representation.
"""
import binascii
return binascii.hexlify(self.raw)
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
def unpack(self, fmt):
"""
Unpack data according to the given format. the result is a tuple even if it contents exactly one item.
format strings:
c: char value(like b).
b: byte value(like c).
B: boolean value.
s: short value.
i: integer value.
q: signed long long value.
f: float value.
d: double value(only support Fomat method).
S: string value.
S#: string value, use another pack/unpack algorithm, 4 bytes length + string content(not include NULL character).
S$: string value, will read stream to end as string content, write like 'S', but not append string end character '\0'.
U: unicode value.
A: byte array value.
F: buffer value.
N: None value.
C: class type, will automatic call class.encode() method to decode must tell stream this class name,
use C<ClassName> semantic.
(): tuple type, if only has one element, it represent tuple all element type is the given type, otherwise
the tuple size must equal your given element count.
[]: list type, the same as tuple type: ().
{key:value}: dictionary type.
The format examples:
iiS
(i)
(U)
[i]
{i:S}
{i:(C<int>)}
([SC<int>NA(i)]{int:S}B
"""
return self.__unpack(fmt)
def pack(self, fmt, *values):
"""
Pack values according to the given format, the arguments must match the values required by the format exactly.
format strings:
c: char value(like b).
b: byte value(like c).
B: boolean value.
s: short value.
i: integer value.
q: signed long long value.
f: float value.
d: double value(only support Fomat method).
S: string value.
S#: string value, use another pack/unpack algorithm, 4 bytes length + string content(not include NULL character).
S$: string value, will read stream to end as string content, write like 'S', but not append string end character '\0'.
U: unicode value.
A: byte array value.
F: buffer value.
N: None value.
C: class type, will automatic call class.encode() method to decode, must tell stream this class name,
use C<ClassName> semantic.
(): tuple type, if only has one element, it represent tuple all element type is the given type, otherwise
the tuple size must equal your given element count.
[]: list type, the same as tuple type: ().
{key:value}: dictionary type.
"""
caller_env = None
if fmt.find('C') >= 0 and not llbc.inl.PyStreamIsExprCompiled(fmt):
caller_env = inspect.stack()[1][0].f_globals
return llbc.inl.PyStreamFmtWrite(self.__c_obj, fmt, values, caller_env)
llbc.Stream = pyllbcStream
| 30.544304
| 129
| 0.623809
|
# -*- coding: utf-8 -*-
import inspect
import llbc
class pyllbcStream(object):
"""
Stream class encapsulation, use to pack/unpack data sequence.
"""
def __init__(self, size=0, init_obj=None, endian=llbc.Endian.MachineEndian):
self.__c_obj = llbc.inl.NewPyStream(self, size, endian)
self.packobj(init_obj)
def __del__(self):
llbc.inl.DelPyStream(self.__c_obj)
@property
def endian(self):
"""
Get stream endian setting(see llbc.Endian module).
"""
return llbc.inl.GetPyStreamEndian(self.__c_obj)
@endian.setter
def endian(self, e):
"""
Set stream endian(see llbc.Endian module).
"""
llbc.inl.SetPyStreamEndian(self.__c_obj, e)
@property
def pos(self):
"""
Get stream current reading/writing position.
"""
return llbc.inl.GetPyStreamPos(self.__c_obj)
@pos.setter
def pos(self, p):
"""
Set stream current reading/writing position.
"""
llbc.inl.SetPyStreamPos(self.__c_obj, p)
@property
def size(self):
"""
Get stream size(unsafe method, size will automatic adjust by stream).
"""
return llbc.inl.GetPyStreamSize(self.__c_obj)
@size.setter
def size(self, s):
"""
Set stream size(unsafe method, size will automatic adjust by stream).
"""
llbc.inl.SetPyStreamSize(self.__c_obj, s)
@property
def raw(self):
"""
Get stream memery view as buffer.
"""
return llbc.inl.PyStreamGetRaw(self.__c_obj)
@raw.setter
def raw(self, r):
"""
Set stream raw memory from str/buffer/bytearray.
"""
llbc.inl.PyStreamSetRaw(self.__c_obj, r)
@property
def cobj(self):
"""
Get raw pyllbc stream object(calling by c/c++ layer).
"""
return self.__c_obj
def __str__(self):
"""
Get human readable stream data's string representation.
"""
import binascii
return binascii.hexlify(self.raw)
@staticmethod
def getcachedsize():
return llbc.inl.PyStreamGetCachedSize()
@staticmethod
def getcachelimit():
return llbc.inl.PyStreamGetCacheLimit()
@staticmethod
def setcachelimit(lmt):
llbc.inl.PyStreamSetCacheLimit(lmt)
@staticmethod
def discardexpr(expr):
llbc.inl.PyStreamDiscardExpr(expr)
@staticmethod
def discardallexprs():
llbc.inl.PyStreamDiscardAllExprs()
def unpack(self, fmt):
"""
Unpack data according to the given format. the result is a tuple even if it contents exactly one item.
format strings:
c: char value(like b).
b: byte value(like c).
B: boolean value.
s: short value.
i: integer value.
q: signed long long value.
f: float value.
d: double value(only support Fomat method).
S: string value.
S#: string value, use another pack/unpack algorithm, 4 bytes length + string content(not include NULL character).
S$: string value, will read stream to end as string content, write like 'S', but not append string end character '\0'.
U: unicode value.
A: byte array value.
F: buffer value.
N: None value.
C: class type, will automatic call class.encode() method to decode must tell stream this class name,
use C<ClassName> semantic.
(): tuple type, if only has one element, it represent tuple all element type is the given type, otherwise
the tuple size must equal your given element count.
[]: list type, the same as tuple type: ().
{key:value}: dictionary type.
The format examples:
iiS
(i)
(U)
[i]
{i:S}
{i:(C<int>)}
([SC<int>NA(i)]{int:S}B
"""
return self.__unpack(fmt)
def unpackone(self, fmt):
return self.__unpack(fmt)[0]
def unpackcls(self, cls):
return llbc.inl.PyStreamRead(self.__c_obj, cls)
def unpacknone(self):
return llbc.inl.PyStreamRead_None(self.__c_obj)
def unpackbyte(self):
return llbc.inl.PyStreamRead_Byte(self.__c_obj)
def unpackbool(self):
return llbc.inl.PyStreamRead_Bool(self.__c_obj)
def unpackint16(self):
return llbc.inl.PyStreamRead_Int16(self.__c_obj)
def unpackint32(self):
return llbc.inl.PyStreamRead_Int32(self.__c_obj)
def unpackint64(self):
return llbc.inl.PyStreamRead_Int64(self.__c_obj)
def unpackfloat(self):
return llbc.inl.PyStreamRead_Float(self.__c_obj)
def unpackdouble(self):
return llbc.inl.PyStreamRead_Double(self.__c_obj)
def unpackstr(self):
return llbc.inl.PyStreamRead_Str(self.__c_obj)
def unpackstr2(self):
return llbc.inl.PyStreamRead_Str2(self.__c_obj)
def unpackstr3(self):
return llbc.inl.PyStreamRead_Str3(self.__c_obj)
def unpackunicode(self):
return llbc.inl.PyStreamRead_Unicode(self.__c_obj)
def unpackbytearray(self):
return llbc.inl.PyStreamRead_ByteArray(self.__c_obj)
def unpackbuffer(self):
return llbc.inl.PyStreamRead_Buffer(self.__c_obj)
def unpackstream(self, begin=0, end=-1):
return llbc.inl.PyStreamRead_Stream(self.__c_obj, begin, end)
def pack(self, fmt, *values):
"""
Pack values according to the given format, the arguments must match the values required by the format exactly.
format strings:
c: char value(like b).
b: byte value(like c).
B: boolean value.
s: short value.
i: integer value.
q: signed long long value.
f: float value.
d: double value(only support Fomat method).
S: string value.
S#: string value, use another pack/unpack algorithm, 4 bytes length + string content(not include NULL character).
S$: string value, will read stream to end as string content, write like 'S', but not append string end character '\0'.
U: unicode value.
A: byte array value.
F: buffer value.
N: None value.
C: class type, will automatic call class.encode() method to decode, must tell stream this class name,
use C<ClassName> semantic.
(): tuple type, if only has one element, it represent tuple all element type is the given type, otherwise
the tuple size must equal your given element count.
[]: list type, the same as tuple type: ().
{key:value}: dictionary type.
"""
caller_env = None
if fmt.find('C') >= 0 and not llbc.inl.PyStreamIsExprCompiled(fmt):
caller_env = inspect.stack()[1][0].f_globals
return llbc.inl.PyStreamFmtWrite(self.__c_obj, fmt, values, caller_env)
def packobj(self, obj):
return llbc.inl.PyStreamWrite(self.__c_obj, obj)
def packnone(self):
return llbc.inl.PyStreamWrite_None(self.__c_obj, None)
def packbyte(self, obj):
return llbc.inl.PyStreamWrite_Byte(self.__c_obj, obj)
def packbool(self, obj):
return llbc.inl.PyStreamWrite_Bool(self.__c_obj, obj)
def packint16(self, obj):
return llbc.inl.PyStreamWrite_Int16(self.__c_obj, obj)
def packint32(self, obj):
return llbc.inl.PyStreamWrite_Int32(self.__c_obj, obj)
def packint64(self, obj):
return llbc.inl.PyStreamWrite_Int64(self.__c_obj, obj)
def packfloat(self, obj):
return llbc.inl.PyStreamWrite_Float(self.__c_obj, obj)
def packdouble(self, obj):
return llbc.inl.PyStreamWrite_Double(self.__c_obj, obj)
def packstr(self, obj):
return llbc.inl.PyStreamWrite_Str(self.__c_obj, obj)
def packstr2(self, obj):
return llbc.inl.PyStreamWrite_Str2(self.__c_obj, obj)
def packstr3(self, obj):
return llbc.inl.PyStreamWrite_Str3(self.__c_obj, obj)
def packunicode(self, obj):
return llbc.inl.PyStreamWrite_Unicode(self.__c_obj, obj)
def packbytearray(self, obj):
return llbc.inl.PyStreamWrite_ByteArray(self.__c_obj, obj)
def packbuffer(self, obj):
return llbc.inl.PyStreamWrite_Buffer(self.__c_obj, obj)
def packtuple(self, obj):
return llbc.inl.PyStreamWrite_Tuple(self.__c_obj, obj)
def packlist(self, obj):
return llbc.inl.PyStreamWrite_List(self.__c_obj, obj)
def packsequence(self, obj):
return llbc.inl.PyStreamWrite_Sequence(self.__c_obj, obj)
def packdict(self, obj):
return llbc.inl.PyStreamWrite_Dict(self.__c_obj, obj)
def packstream(self, s, begin=0, to=-1):
if not isinstance(s, pyllbcStream):
raise TypeError('pack argument "s" must be stream type')
return llbc.inl.PyStreamWrite_Stream(self.__c_obj, s.cobj, begin, to)
def encode(self, s):
if not isinstance(s, pyllbcStream):
raise TypeError('encode argument not Stream type')
return llbc.inl.PyStreamEncodeSelf(self.__c_obj, s.cobj)
def __unpack(self, fmt, stack_idx=1):
caller_env = None
if fmt.find('C') >= 0 and not llbc.inl.PyStreamIsExprCompiled(fmt):
caller_env = inspect.stack()[stack_idx + 1][0].f_globals
return llbc.inl.PyStreamFmtRead(self.__c_obj, fmt, caller_env)
llbc.Stream = pyllbcStream
| 3,328
| 0
| 1,236
|
793dabc069adbb525fbf397b7888dc9fdb942b2b
| 2,036
|
py
|
Python
|
src/BloomFilter/BloomFilter.py
|
shapovalovdev/AlgorythmsAndDataStructures
|
34d5f38c089e0ba902813607f08847fbdc7361ab
|
[
"Apache-2.0"
] | null | null | null |
src/BloomFilter/BloomFilter.py
|
shapovalovdev/AlgorythmsAndDataStructures
|
34d5f38c089e0ba902813607f08847fbdc7361ab
|
[
"Apache-2.0"
] | null | null | null |
src/BloomFilter/BloomFilter.py
|
shapovalovdev/AlgorythmsAndDataStructures
|
34d5f38c089e0ba902813607f08847fbdc7361ab
|
[
"Apache-2.0"
] | null | null | null |
#import hashlib
#from random import randint
#
# def hash2(self, str1):
#
# result=0
# b_str1=str.encode(str1)
# h=hashlib.sha1(b_str1).hexdigest()
# for c in str1:
# result += ord(c)
# return result % self.filter_len
# if __name__ == '__main__':
# dataset=["0123456789", "1234567890", "sdfsdfsdf", "sdf2143124", "hophey", "abirvaolg", "8901234567", "2356sdfqix,ed", "9012345678"]
# dataset2=["012345678932", "12345623e47890", "sdfdsfq1sdfsdf", "sdf2gs2143124", "qwerhophey", "atgxcvbirvaolg", "8sdgaw901234567", "321452356sdfqix,ed", "5124e39012345678"]
# BLOOM_TEST=BloomFilter(32)
# for data in dataset:
# BLOOM_TEST.add(data)
# for data in dataset2:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# for data in dataset:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# print( BLOOM_TEST.bloom_array)
| 32.83871
| 178
| 0.589391
|
#import hashlib
#from random import randint
class BloomFilter:
def __init__(self, f_len):
self.filter_len = f_len
# создаём битовый массив длиной f_len ...
self.bloom_array=self.filter_len * [0]
def hash1(self, str1):
result=1
rand_int=17
for c in str1:
result = result*rand_int + ord(c)
return result % self.filter_len
#
# def hash2(self, str1):
#
# result=0
# b_str1=str.encode(str1)
# h=hashlib.sha1(b_str1).hexdigest()
# for c in str1:
# result += ord(c)
# return result % self.filter_len
def hash2(self, str1):
result=1
rand_int=223
for c in str1:
result = result*rand_int + ord(c)
return result % self.filter_len
def add(self, str1):
self.bloom_array[self.hash1(str1)] = 1
self.bloom_array[self.hash2(str1)] = 1
def is_value(self, str1):
# проверка, имеется ли строка str1 в фильтре
if not self.bloom_array[self.hash1(str1)] or not self.bloom_array[self.hash2(str1)]:
return False
else:
return True
# if __name__ == '__main__':
# dataset=["0123456789", "1234567890", "sdfsdfsdf", "sdf2143124", "hophey", "abirvaolg", "8901234567", "2356sdfqix,ed", "9012345678"]
# dataset2=["012345678932", "12345623e47890", "sdfdsfq1sdfsdf", "sdf2gs2143124", "qwerhophey", "atgxcvbirvaolg", "8sdgaw901234567", "321452356sdfqix,ed", "5124e39012345678"]
# BLOOM_TEST=BloomFilter(32)
# for data in dataset:
# BLOOM_TEST.add(data)
# for data in dataset2:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# for data in dataset:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# print( BLOOM_TEST.bloom_array)
| 793
| -3
| 157
|
a5c93ce3ff85b6ac11e58c307fe15570a8d84a8e
| 2,659
|
py
|
Python
|
target.py
|
anarthal/omnibuild
|
466ce4391d016b0890894b984fa1da57edd8f136
|
[
"MIT"
] | null | null | null |
target.py
|
anarthal/omnibuild
|
466ce4391d016b0890894b984fa1da57edd8f136
|
[
"MIT"
] | null | null | null |
target.py
|
anarthal/omnibuild
|
466ce4391d016b0890894b984fa1da57edd8f136
|
[
"MIT"
] | null | null | null |
import os.path
| 35.453333
| 92
| 0.649116
|
import os.path
class Target(object):
def __init__(self, name):
self.name = name
self.depends = []
def __str__(self):
return '<Target {}>'.format(self.name)
def is_up_to_date(self, cache):
raise NotImplementedError()
def build(self):
raise NotImplementedError()
def update_cache(self, cache):
pass
class ObjectFile(Target):
def __init__(self, src, output, compiler):
Target.__init__(self, output)
self.src = src
self.output = output
self.compiler = compiler
if not os.path.isfile(src):
raise FileNotFoundError('Source file {} does not exist'.format(src))
self.includes = self.compiler.get_includes(self.src)
def is_up_to_date(self, cache):
if cache.file_has_changed(self.src):
return False
for inc in self.includes:
if cache.file_has_changed(inc):
return False
return not cache.file_has_changed(self.output)
def build(self):
self.compiler.compile(self.src, self.output)
def update_cache(self, cache):
to_store = [self.src] + self.includes + [self.output]
for fname in to_store:
cache.file_store(fname)
class CppTarget(Target):
def __init__(self, name, sources, output_dir, compiler):
Target.__init__(self, os.path.join(output_dir, 'targets', name))
self.sources = sources
self.output_dir = output_dir
self.compiler = compiler
self.objects = [self.make_object_file(src, output_dir, compiler) for src in sources]
self.libs = []
self.depends = list(self.objects)
def get_path(self):
return self.name
def is_up_to_date(self, cache):
return not cache.file_has_changed(self.get_path())
def update_cache(self, cache):
cache.file_store(self.get_path())
def link_libraries(self, libs):
self.libs += libs
self.depends += libs
def _get_link_inputs(self):
return [obj.output for obj in self.objects] + [lib.name for lib in self.libs]
@staticmethod
def make_object_file(src, output_dir, compiler):
return ObjectFile(src, os.path.join(output_dir, src + '.o'), compiler)
class Executable(CppTarget):
def build(self):
self.compiler.link_executable(self._get_link_inputs(), self.get_path())
class DynamicLibrary(CppTarget):
def __init__(self, name, sources, output_dir, compiler):
CppTarget.__init__(self, name + '.so', sources, output_dir, compiler)
def build(self):
self.compiler.link_dynamic_library(self._get_link_inputs(), self.get_path())
| 1,989
| 227
| 427
|
6f4a2eae77a08e70580164954af371a9d61703ba
| 5,170
|
py
|
Python
|
ivolution/FacemovieThread.py
|
jlengrand/Ivolution
|
2753d7120b11fb94a5ce84bfe4a134b5a437a5c7
|
[
"BSD-3-Clause"
] | 4
|
2015-01-22T06:32:15.000Z
|
2020-01-30T05:53:48.000Z
|
ivolution/FacemovieThread.py
|
jlengrand/Ivolution
|
2753d7120b11fb94a5ce84bfe4a134b5a437a5c7
|
[
"BSD-3-Clause"
] | null | null | null |
ivolution/FacemovieThread.py
|
jlengrand/Ivolution
|
2753d7120b11fb94a5ce84bfe4a134b5a437a5c7
|
[
"BSD-3-Clause"
] | 7
|
2015-04-23T12:34:19.000Z
|
2021-08-01T05:58:56.000Z
|
"""
.. module:: Facemovie
:platform: Unix, Windows
:synopsis: Main class of the application. Contains the core image processing functions.Plays the role of a controller for the application, as it supports the communication layer.
.. moduleauthor:: Julien Lengrand-Lambert <jlengrand@gmail.com>
"""
import threading
import logging
import Facemovie_lib
from util.Notifier import Observer
from util.Notifier import Observable
class FacemovieThread(threading.Thread, Observable, Observer):
'''
Creates a Thread version of Facemovie using the facemovie_lib.
This class can then be run anywhere, from a GUI, script, ...
'''
def __init__(self, face_params):
"""
Initializes all parameters of the application. Input and output folders
are defined, together with the classifier profile.
:param face_params: A faceparams object that contains all needed information to run the Facemovie.
:type face_params: FaceParams
"""
threading.Thread.__init__(self)
Observable.__init__(self)
Observer.__init__(self, "Application")
self.stop_process = False
self.face_params = face_params
self.facemovie = Facemovie_lib.FaceMovie(self.face_params)
self.facemovie.subscribe(self) # Subscribing to facemovie reports
self.subscribe(self.facemovie) # Used to send request to stop
self.my_logger = logging.getLogger('IvolutionFile.Thread')
#self.console_logger = logging.getLogger('ConsoleLog')
def update(self, message):
"""
Trigerred by IvolutionWindow.
Uses the Observer pattern to inform the user about the progress of the GUI.
"""
if len(message) == 1: # system commands
if message[0] == "STOP":
#self.console_logger.debug("Facemovie is going to stop")
self.my_logger.debug("Facemovie is going to stop")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
else:
#self.console_logger.debug("Unrecognized system command")
self.my_logger.debug("Unrecognized system command")
##self.console_logger.debug(message)
self.my_logger.debug(message)
elif len(message) == 2: # notifications
##self.console_logger.debug(message)
self.my_logger.debug(message)
if message[0] == "FILEADD":
self.notify(["Interface", [message[0], message[1], 0]])
else:
# notify gui about small updates
self.notify(["Interface", ["STATUS", message[0], message[1]]])
# checking for fatal error
if message[0] == "Error":
#self.console_logger.debug("Fatal Error detected")
self.my_logger.debug("Fatal Error detected")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
elif len(message) == 3: # notifications
if message[0] == "FILEDONE":
self.notify(["Interface", message])
else:
#self.console_logger.debug("Unrecognized command")
self.my_logger.debug("Unrecognized command")
#self.console_logger.debug(message)
self.my_logger.debug(message)
| 40.077519
| 181
| 0.609284
|
"""
.. module:: Facemovie
:platform: Unix, Windows
:synopsis: Main class of the application. Contains the core image processing functions.Plays the role of a controller for the application, as it supports the communication layer.
.. moduleauthor:: Julien Lengrand-Lambert <jlengrand@gmail.com>
"""
import threading
import logging
import Facemovie_lib
from util.Notifier import Observer
from util.Notifier import Observable
class FacemovieThread(threading.Thread, Observable, Observer):
'''
Creates a Thread version of Facemovie using the facemovie_lib.
This class can then be run anywhere, from a GUI, script, ...
'''
def __init__(self, face_params):
"""
Initializes all parameters of the application. Input and output folders
are defined, together with the classifier profile.
:param face_params: A faceparams object that contains all needed information to run the Facemovie.
:type face_params: FaceParams
"""
threading.Thread.__init__(self)
Observable.__init__(self)
Observer.__init__(self, "Application")
self.stop_process = False
self.face_params = face_params
self.facemovie = Facemovie_lib.FaceMovie(self.face_params)
self.facemovie.subscribe(self) # Subscribing to facemovie reports
self.subscribe(self.facemovie) # Used to send request to stop
self.my_logger = logging.getLogger('IvolutionFile.Thread')
#self.console_logger = logging.getLogger('ConsoleLog')
def update(self, message):
"""
Trigerred by IvolutionWindow.
Uses the Observer pattern to inform the user about the progress of the GUI.
"""
if len(message) == 1: # system commands
if message[0] == "STOP":
#self.console_logger.debug("Facemovie is going to stop")
self.my_logger.debug("Facemovie is going to stop")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
else:
#self.console_logger.debug("Unrecognized system command")
self.my_logger.debug("Unrecognized system command")
##self.console_logger.debug(message)
self.my_logger.debug(message)
elif len(message) == 2: # notifications
##self.console_logger.debug(message)
self.my_logger.debug(message)
if message[0] == "FILEADD":
self.notify(["Interface", [message[0], message[1], 0]])
else:
# notify gui about small updates
self.notify(["Interface", ["STATUS", message[0], message[1]]])
# checking for fatal error
if message[0] == "Error":
#self.console_logger.debug("Fatal Error detected")
self.my_logger.debug("Fatal Error detected")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
elif len(message) == 3: # notifications
if message[0] == "FILEDONE":
self.notify(["Interface", message])
else:
#self.console_logger.debug("Unrecognized command")
self.my_logger.debug("Unrecognized command")
#self.console_logger.debug(message)
self.my_logger.debug(message)
def run(self):
if not self.stop_process:
self.my_logger.debug("Listing pictures")
self.notify(["Interface", ["PROGRESS", "Listing pictures", 0.0]])
num_guys = self.facemovie.list_guys()
# FIXME: Later to be done in Lib
if num_guys < 0:
self.notify(["Interface", ["STATUS", "Source folder not found", 0.0]])
self.stop_process = True
elif num_guys == 0:
self.notify(["Interface", ["STATUS", "No image found in source folder", 0.0]])
self.stop_process = True
if not self.stop_process:
self.my_logger.debug("Detecting Faces")
self.notify(["Interface", ["PROGRESS", "Detecting Faces", 0.2]])
self.facemovie.prepare_faces() # I want to search for the faces, and characteristics of the images
if not self.stop_process:
self.my_logger.debug("Calculating video requirements")
self.notify(["Interface", ["PROGRESS", "Calculating video requirements", 0.6]])
self.facemovie.find_final_dimensions() # finds output size for desired mode.
if not self.stop_process:
self.my_logger.debug("Generating movie")
self.notify(["Interface", ["PROGRESS", "Generating movie", 0.8]])
self.facemovie.save_movie()
self.my_logger.debug("Movie saved")
self.notify(["Interface", ["PROGRESS", "Movie saved, Finished!", 1.0]])
# updating status to avoid remanent messages
self.notify(["Interface", ["STATUS", " ", 1.0]])
if not self.stop_process:
self.my_logger.debug("Thread terminated")
if self.stop_process:
self.notify(["Interface", ["PROGRESS", "Process cancelled!", 1.0]])
| 1,796
| 0
| 27
|
0c88166c936c8776b4331a148fd68ba27d214ba1
| 1,395
|
py
|
Python
|
Q024_implement_queue_class_in_python.py
|
latika18/learning
|
a57c9aacc0157bf7c318f46c1e7c4971d1d55aea
|
[
"Unlicense"
] | null | null | null |
Q024_implement_queue_class_in_python.py
|
latika18/learning
|
a57c9aacc0157bf7c318f46c1e7c4971d1d55aea
|
[
"Unlicense"
] | null | null | null |
Q024_implement_queue_class_in_python.py
|
latika18/learning
|
a57c9aacc0157bf7c318f46c1e7c4971d1d55aea
|
[
"Unlicense"
] | null | null | null |
#Question 24
#Implement a queue class in Python: It should support 3 APIs:
#queue.top(): prints current element at front of queue
#queue.pop(): takes out an element from front of queue
#queue.add(): adds a new element at end of stack
queue_1 = Queue()
queue_1.add(12)
queue_1.add(11)
queue_1.add(55)
queue_1.add(66)
queue_1.add(56)
queue_1.add(43)
queue_1.add(33)
queue_1.add(88)
queue_1.add(56)
queue_1.add(34)
print queue_1
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
| 21.136364
| 61
| 0.653047
|
#Question 24
#Implement a queue class in Python: It should support 3 APIs:
#queue.top(): prints current element at front of queue
#queue.pop(): takes out an element from front of queue
#queue.add(): adds a new element at end of stack
class Queue:
def __init__(self):
"""initialise a Queue class"""
self.items = []
def top(self):
"""returns the current element at front of queue"""
if self.items:
return self.items[0]
else:
raise Exception("Empty Queue")
def pop(self):
"""takes out an element from front of queue"""
if self.items:
self.items.pop(0)
else :
raise Exception("Empty Queue")
def add(self , item):
"""adds a new element at the end of queue"""
self.items.append(item)
queue_1 = Queue()
queue_1.add(12)
queue_1.add(11)
queue_1.add(55)
queue_1.add(66)
queue_1.add(56)
queue_1.add(43)
queue_1.add(33)
queue_1.add(88)
queue_1.add(56)
queue_1.add(34)
print queue_1
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
| 0
| 571
| 22
|
9961f18b23d3984a91e993cdbb9cc0a5a87b1478
| 1,860
|
py
|
Python
|
app/services/searchers/rule.py
|
ninoseki/uzen
|
93726f22f43902e17b22dd36142dac05171d0d84
|
[
"MIT"
] | 76
|
2020-02-27T06:36:27.000Z
|
2022-03-10T20:18:03.000Z
|
app/services/searchers/rule.py
|
ninoseki/uzen
|
93726f22f43902e17b22dd36142dac05171d0d84
|
[
"MIT"
] | 33
|
2020-03-13T02:04:14.000Z
|
2022-03-04T02:06:11.000Z
|
app/services/searchers/rule.py
|
ninoseki/uzen
|
93726f22f43902e17b22dd36142dac05171d0d84
|
[
"MIT"
] | 6
|
2020-03-17T16:42:25.000Z
|
2021-04-27T06:35:46.000Z
|
from typing import Any, Dict, List, Optional, cast
from uuid import UUID
from tortoise.query_utils import Q
from app import models, schemas
from app.services.searchers import AbstractSearcher
| 31
| 89
| 0.612366
|
from typing import Any, Dict, List, Optional, cast
from uuid import UUID
from tortoise.query_utils import Q
from app import models, schemas
from app.services.searchers import AbstractSearcher
class RuleSearcher(AbstractSearcher):
@classmethod
async def search(
cls,
filters: Dict[str, Any],
size: Optional[int] = None,
offset: Optional[int] = None,
id_only: bool = False,
) -> schemas.RulesSearchResults:
"""Search rules.
Arguments:
filters {dict} -- Filters for rule search
Keyword Arguments:
size {[int]} -- Number of results returned (default: {None})
offset {[int]} -- Offset of the first result for pagination (default: {None})
id_only {bool} -- Whether to return only a list of ids (default: {False})
Returns:
SearchResults -- A list of rules and total count
"""
# build queirs from filters
queries = []
name = filters.get("name")
if name is not None:
queries.append(Q(name__contains=name))
target = filters.get("target")
if target is not None:
queries.append(Q(target=target))
source = filters.get("source")
if source is not None:
queries.append(Q(source__contains=source))
query = Q(*queries)
# Run search
instance = cls(model=models.Rule, query=query)
results = await instance._search(size=size, offset=offset, id_only=id_only)
if id_only:
return schemas.RulesSearchResults(
results=cast(List[UUID], results.results), total=results.total
)
rules = [rule.to_model() for rule in cast(List[models.Rule], results.results)]
return schemas.RulesSearchResults(results=rules, total=results.total)
| 0
| 1,642
| 23
|
7a93655ebac4268d033069e0c8e6ff264d96d3fb
| 45
|
py
|
Python
|
src/widget/user1/__init__.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
src/widget/user1/__init__.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
src/widget/user1/__init__.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Just for test
"""
| 11.25
| 23
| 0.466667
|
# -*- coding: utf-8 -*-
"""
Just for test
"""
| 0
| 0
| 0
|
2366a81d98fc425ba641105e960fcc11a70e1e25
| 993
|
py
|
Python
|
plot/Plot_thermal_conductiviy_V1.py
|
eastsheng/Thermal-conductivity
|
8a26be22c58b3b3b6723c57c65f4bba93556f9e8
|
[
"MIT"
] | 7
|
2020-06-10T05:38:17.000Z
|
2022-03-11T10:33:57.000Z
|
plot/Plot_thermal_conductiviy_V1.py
|
eastsheng/Thermal-conductivity
|
8a26be22c58b3b3b6723c57c65f4bba93556f9e8
|
[
"MIT"
] | null | null | null |
plot/Plot_thermal_conductiviy_V1.py
|
eastsheng/Thermal-conductivity
|
8a26be22c58b3b3b6723c57c65f4bba93556f9e8
|
[
"MIT"
] | 2
|
2020-04-24T09:36:25.000Z
|
2022-03-11T10:33:58.000Z
|
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# tcfile = './Thermal_conductivity_Se.txt'
tcfile = './Thermal_conductivity_S.txt'
plt_tc(tcfile)
| 24.825
| 96
| 0.678751
|
import numpy as np
import matplotlib.pyplot as plt
def plt_tc(tcfile):
tc = np.loadtxt(tcfile)
# print(tc)
x = tc[:,0]/32.06
y = tc[:,1:4]
y_mean = np.mean(y,axis=1)
y_std = np.std(y,axis=1)
# print(y_mean,y_std)
plt.rc('font',family='Times New Roman',size=26)
fig, ax = plt.subplots(figsize=(8,6))
fig.subplots_adjust(bottom=0.2,left=0.2)
s1 = ax.errorbar(x,y_mean,yerr=y_std,capsize=10,capthick=4,
fmt='bo:',mfc='w',mec='b',markersize=16,mew=2)
ax.legend(handles=[s1],labels=['$\mathregular{MoS_2}$/$\mathregular{MoS^{m}}_\mathregular{2}$']
,loc='best', fontsize=26)
ax.set_xlabel('Mass ratio (R)',fontsize=26,fontweight='bold')
ax.set_ylabel('Thermal conductivity (W/m-K)',fontsize=26,fontweight='bold')
ax.set_xticks([0,1,2,3,4,5,6])
ax.set_yticks([0,5,10,15,20,25,30])
plt.savefig(tcfile+'_.tiff',dpi=300)
plt.show()
return
if __name__ == '__main__':
# tcfile = './Thermal_conductivity_Se.txt'
tcfile = './Thermal_conductivity_S.txt'
plt_tc(tcfile)
| 785
| 0
| 23
|
68a08329945e4e078a86db5c9188a879ac68c385
| 4,743
|
py
|
Python
|
scripts/scatter_plots.py
|
tupleblog/bkkdreams-datathon
|
54214356d42cecdc758803d958375bd7ee7dc169
|
[
"MIT"
] | 1
|
2020-09-13T16:52:03.000Z
|
2020-09-13T16:52:03.000Z
|
scripts/scatter_plots.py
|
tupleblog/bkkdreams-datathon
|
54214356d42cecdc758803d958375bd7ee7dc169
|
[
"MIT"
] | null | null | null |
scripts/scatter_plots.py
|
tupleblog/bkkdreams-datathon
|
54214356d42cecdc758803d958375bd7ee7dc169
|
[
"MIT"
] | null | null | null |
"""
Scatter plot between
"""
import pandas as pd
import numpy as np
from numpy.random import random
from math import pi
from bokeh.io import output_notebook
output_notebook()
from bokeh.io import show, output_file
from bokeh.palettes import RdYlGn6
from bokeh.models import (
BasicTicker,
ColorBar,
LinearColorMapper,
PrintfTickFormatter,
ColumnDataSource,
HoverTool,
Span,
)
from bokeh.plotting import figure, save, show, output_file
from bokeh.palettes import BuGn, Blues8, Oranges256
def plot_vs_population(districts_budget_df):
"""
From district budget to scatter plots vs total population
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df["num_total"] = df["num_male"] + df["num_female"]
df = df.groupby(["dname", "num_total"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["num_total"] / 10000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.num_total.mean() / 10000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = "จำนวนผู้อยู่อาศัย (หมื่นคน)"
p.yaxis.axis_label = f"งบประมาณ{q} (ล้านบาท)"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("เขต", "@desc"),
(f"งบ{q}", "@y ล้านบาท"),
("จำนวนผู้อาศัย", "@x หมื่นคน"),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget.html", mode="inline")
save(p)
def plot_vs_area(districts_budget_df):
"""
From district budget to scatter plots vs area size
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df = df.groupby(["dname", "AREA"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["AREA"] / 1000000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.AREA.mean() / 1000000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = "ขนาดพื้นที่ (ตร.กม.)"
p.yaxis.axis_label = f"งบประมาณ{q} (ล้านบาท)"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("เขต", "@desc"),
(f"งบ{q}", "@y ล้านบาท"),
("ขนาดพื้นที่", "@x ตร.กม."),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget-area.html", mode="inline")
save(p)
if __name__ == "__main__":
districts_budget_df = pd.read_csv("data/districts_budget.csv")[
["dname", "ประเภทแผนงาน", "งบแผนงาน", "AREA", "num_male", "num_female"]
]
districts_budget_df["num_total"] = (
districts_budget_df.num_male + districts_budget_df.num_female
)
districts_budget_df.rename(
columns={"ประเภทแผนงาน": "budget_type", "งบแผนงาน": "budget"}, inplace=True
)
q_map = {
"ทั่วไป/บริหาร/อื่นๆ": "gen",
"การคลัง": "treasury",
"เทศกิจ/รักษาความสะอาด": "clean",
"โยธา/ก่อสร้าง/จราจร": "civil",
"น้ำท่วม/ทางเท้า": "pedes",
"สิ่งแวดล้อม": "env",
"พัฒนาชุมชน/อาชีพ": "enh",
"อนามัย/สาธารณะสุข": "health",
"การศึกษา": "edu",
}
plot_vs_population(districts_budget_df)
plot_vs_area(districts_budget_df)
| 31.62
| 85
| 0.560826
|
"""
Scatter plot between
"""
import pandas as pd
import numpy as np
from numpy.random import random
from math import pi
from bokeh.io import output_notebook
output_notebook()
from bokeh.io import show, output_file
from bokeh.palettes import RdYlGn6
from bokeh.models import (
BasicTicker,
ColorBar,
LinearColorMapper,
PrintfTickFormatter,
ColumnDataSource,
HoverTool,
Span,
)
from bokeh.plotting import figure, save, show, output_file
from bokeh.palettes import BuGn, Blues8, Oranges256
def plot_vs_population(districts_budget_df):
"""
From district budget to scatter plots vs total population
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df["num_total"] = df["num_male"] + df["num_female"]
df = df.groupby(["dname", "num_total"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["num_total"] / 10000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.num_total.mean() / 10000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = "จำนวนผู้อยู่อาศัย (หมื่นคน)"
p.yaxis.axis_label = f"งบประมาณ{q} (ล้านบาท)"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("เขต", "@desc"),
(f"งบ{q}", "@y ล้านบาท"),
("จำนวนผู้อาศัย", "@x หมื่นคน"),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget.html", mode="inline")
save(p)
def plot_vs_area(districts_budget_df):
"""
From district budget to scatter plots vs area size
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df = df.groupby(["dname", "AREA"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["AREA"] / 1000000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.AREA.mean() / 1000000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = "ขนาดพื้นที่ (ตร.กม.)"
p.yaxis.axis_label = f"งบประมาณ{q} (ล้านบาท)"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("เขต", "@desc"),
(f"งบ{q}", "@y ล้านบาท"),
("ขนาดพื้นที่", "@x ตร.กม."),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget-area.html", mode="inline")
save(p)
if __name__ == "__main__":
districts_budget_df = pd.read_csv("data/districts_budget.csv")[
["dname", "ประเภทแผนงาน", "งบแผนงาน", "AREA", "num_male", "num_female"]
]
districts_budget_df["num_total"] = (
districts_budget_df.num_male + districts_budget_df.num_female
)
districts_budget_df.rename(
columns={"ประเภทแผนงาน": "budget_type", "งบแผนงาน": "budget"}, inplace=True
)
q_map = {
"ทั่วไป/บริหาร/อื่นๆ": "gen",
"การคลัง": "treasury",
"เทศกิจ/รักษาความสะอาด": "clean",
"โยธา/ก่อสร้าง/จราจร": "civil",
"น้ำท่วม/ทางเท้า": "pedes",
"สิ่งแวดล้อม": "env",
"พัฒนาชุมชน/อาชีพ": "enh",
"อนามัย/สาธารณะสุข": "health",
"การศึกษา": "edu",
}
plot_vs_population(districts_budget_df)
plot_vs_area(districts_budget_df)
| 0
| 0
| 0
|
c175aa18424a015e81f4404dc8122dd28b20d6bf
| 302
|
py
|
Python
|
QISKIT/TDA/Qconfig_20_qubit.py
|
rsarkar-github/Quantum-Computing-Projects
|
966c0465f98dca0091f09826e12eb57277faf3c0
|
[
"MIT"
] | null | null | null |
QISKIT/TDA/Qconfig_20_qubit.py
|
rsarkar-github/Quantum-Computing-Projects
|
966c0465f98dca0091f09826e12eb57277faf3c0
|
[
"MIT"
] | null | null | null |
QISKIT/TDA/Qconfig_20_qubit.py
|
rsarkar-github/Quantum-Computing-Projects
|
966c0465f98dca0091f09826e12eb57277faf3c0
|
[
"MIT"
] | null | null | null |
APItoken = 'ffb1bf6df27099919ca9ab63da88b1929016a7f7468d477f65241f61e1f457ab4' \
'f53c50ead0371ce632b283b5dc803fae33b34b3601053d2bde24f4ebc921b1b'
config = {
'url': 'https://q-console-api.mybluemix.net/api',
'hub': 'ibmq',
'group': 'qc-ware',
'project': 'default'
}
| 30.2
| 80
| 0.688742
|
APItoken = 'ffb1bf6df27099919ca9ab63da88b1929016a7f7468d477f65241f61e1f457ab4' \
'f53c50ead0371ce632b283b5dc803fae33b34b3601053d2bde24f4ebc921b1b'
config = {
'url': 'https://q-console-api.mybluemix.net/api',
'hub': 'ibmq',
'group': 'qc-ware',
'project': 'default'
}
| 0
| 0
| 0
|
de5c1f120fe34ee35b979c4f5b009fc460b748c4
| 1,906
|
py
|
Python
|
os_faults/ansible/modules/iptables.py
|
Marie-Donnie/os-faults
|
2f9eb760d240b9a03b7df5682b5b24cf35daacd0
|
[
"Apache-2.0"
] | null | null | null |
os_faults/ansible/modules/iptables.py
|
Marie-Donnie/os-faults
|
2f9eb760d240b9a03b7df5682b5b24cf35daacd0
|
[
"Apache-2.0"
] | null | null | null |
os_faults/ansible/modules/iptables.py
|
Marie-Donnie/os-faults
|
2f9eb760d240b9a03b7df5682b5b24cf35daacd0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
| 38.897959
| 75
| 0.629066
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import * # noqa
def main():
module = AnsibleModule(
argument_spec=dict(
service=dict(required=True, type='str'),
action=dict(required=True, choices=['block', 'unblock']),
port=dict(required=True, type='int'),
protocol=dict(required=True, choices=['tcp', 'udp']),
))
service = module.params['service']
action = module.params['action']
port = module.params['port']
protocol = module.params['protocol']
comment = '{}_temporary_DROP'.format(service)
if action == 'block':
cmd = ('bash -c "iptables -I INPUT 1 -p {protocol} --dport {port} '
'-j DROP -m comment --comment "{comment}""'.format(
comment=comment, port=port, protocol=protocol))
else:
cmd = ('bash -c "rule=`iptables -L INPUT -n --line-numbers | '
'grep "{comment}" | cut -d \' \' -f1`; for arg in $rule;'
' do iptables -D INPUT -p {protocol} --dport {port} '
'-j DROP -m comment --comment "{comment}"; done"'.format(
comment=comment, port=port, protocol=protocol))
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
module.exit_json(cmd=cmd, rc=rc, stderr=stderr, stdout=stdout)
if __name__ == '__main__':
main()
| 1,228
| 0
| 23
|
91aa923768fc23db2b0a5d788c50eb978a3701bc
| 15,766
|
py
|
Python
|
python/pyspark/sql/tests/test_session.py
|
wangyeweikuer/spark
|
731aa2cdf8a78835621fbf3de2d3492b27711d1a
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2022-03-25T06:40:43.000Z
|
2022-03-25T06:40:43.000Z
|
python/pyspark/sql/tests/test_session.py
|
nyingping/spark
|
ca7200b0008dc6101a252020e6c34ef7b72d81d6
|
[
"Apache-2.0"
] | 6
|
2018-06-14T11:15:27.000Z
|
2019-01-27T12:11:23.000Z
|
python/pyspark/sql/tests/test_session.py
|
nyingping/spark
|
ca7200b0008dc6101a252020e6c34ef7b72d81d6
|
[
"Apache-2.0"
] | 1
|
2022-03-09T08:50:07.000Z
|
2022-03-09T08:50:07.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext, Row
from pyspark.sql.functions import col
from pyspark.testing.sqlutils import ReusedSQLTestCase
from pyspark.testing.utils import PySparkTestCase
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
if __name__ == "__main__":
from pyspark.sql.tests.test_session import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 40.425641
| 99
| 0.639921
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext, Row
from pyspark.sql.functions import col
from pyspark.testing.sqlutils import ReusedSQLTestCase
from pyspark.testing.utils import PySparkTestCase
class SparkSessionTests(ReusedSQLTestCase):
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
class SparkSessionTests1(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext("local[4]", self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class SparkSessionTests2(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class SparkSessionTests3(unittest.TestCase):
def test_active_session(self):
spark = SparkSession.builder.master("local").getOrCreate()
try:
activeSession = SparkSession.getActiveSession()
df = activeSession.createDataFrame([(1, "Alice")], ["age", "name"])
self.assertEqual(df.collect(), [Row(age=1, name="Alice")])
finally:
spark.stop()
def test_get_active_session_when_no_active_session(self):
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
spark = SparkSession.builder.master("local").getOrCreate()
active = SparkSession.getActiveSession()
self.assertEqual(active, spark)
spark.stop()
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
def test_spark_session(self):
spark = SparkSession.builder.master("local").config("some-config", "v2").getOrCreate()
try:
self.assertEqual(spark.conf.get("some-config"), "v2")
self.assertEqual(spark.sparkContext._conf.get("some-config"), "v2")
self.assertEqual(spark.version, spark.sparkContext.version)
spark.sql("CREATE DATABASE test_db")
spark.catalog.setCurrentDatabase("test_db")
self.assertEqual(spark.catalog.currentDatabase(), "test_db")
spark.sql("CREATE TABLE table1 (name STRING, age INT) USING parquet")
self.assertEqual(spark.table("table1").columns, ["name", "age"])
self.assertEqual(spark.range(3).count(), 3)
# SPARK-37516: Only plain column references work as variable in SQL.
self.assertEqual(
spark.sql("select {c} from range(1)", c=col("id")).first(), spark.range(1).first()
)
with self.assertRaisesRegex(ValueError, "Column"):
spark.sql("select {c} from range(10)", c=col("id") + 1)
finally:
spark.sql("DROP DATABASE test_db CASCADE")
spark.stop()
def test_global_default_session(self):
spark = SparkSession.builder.master("local").getOrCreate()
try:
self.assertEqual(SparkSession.builder.getOrCreate(), spark)
finally:
spark.stop()
def test_default_and_active_session(self):
spark = SparkSession.builder.master("local").getOrCreate()
activeSession = spark._jvm.SparkSession.getActiveSession()
defaultSession = spark._jvm.SparkSession.getDefaultSession()
try:
self.assertEqual(activeSession, defaultSession)
finally:
spark.stop()
def test_config_option_propagated_to_existing_session(self):
session1 = SparkSession.builder.master("local").config("spark-config1", "a").getOrCreate()
self.assertEqual(session1.conf.get("spark-config1"), "a")
session2 = SparkSession.builder.config("spark-config1", "b").getOrCreate()
try:
self.assertEqual(session1, session2)
self.assertEqual(session1.conf.get("spark-config1"), "b")
finally:
session1.stop()
def test_new_session(self):
session = SparkSession.builder.master("local").getOrCreate()
newSession = session.newSession()
try:
self.assertNotEqual(session, newSession)
finally:
session.stop()
newSession.stop()
def test_create_new_session_if_old_session_stopped(self):
session = SparkSession.builder.master("local").getOrCreate()
session.stop()
newSession = SparkSession.builder.master("local").getOrCreate()
try:
self.assertNotEqual(session, newSession)
finally:
newSession.stop()
def test_active_session_with_None_and_not_None_context(self):
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
sc = None
session = None
try:
sc = SparkContext._active_spark_context
self.assertEqual(sc, None)
activeSession = SparkSession.getActiveSession()
self.assertEqual(activeSession, None)
sparkConf = SparkConf()
sc = SparkContext.getOrCreate(sparkConf)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertFalse(activeSession.isDefined())
session = SparkSession(sc)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertTrue(activeSession.isDefined())
activeSession2 = SparkSession.getActiveSession()
self.assertNotEqual(activeSession2, None)
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
class SparkSessionTests4(ReusedSQLTestCase):
def test_get_active_session_after_create_dataframe(self):
session2 = None
try:
activeSession1 = SparkSession.getActiveSession()
session1 = self.spark
self.assertEqual(session1, activeSession1)
session2 = self.spark.newSession()
activeSession2 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession2)
self.assertNotEqual(session2, activeSession2)
session2.createDataFrame([(1, "Alice")], ["age", "name"])
activeSession3 = SparkSession.getActiveSession()
self.assertEqual(session2, activeSession3)
session1.createDataFrame([(1, "Alice")], ["age", "name"])
activeSession4 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession4)
finally:
if session2 is not None:
session2.stop()
class SparkSessionTests5(unittest.TestCase):
def setUp(self):
# These tests require restarting the Spark context so we set up a new one for each test
# rather than at the class level.
self.sc = SparkContext("local[4]", self.__class__.__name__, conf=SparkConf())
self.spark = SparkSession(self.sc)
def tearDown(self):
self.sc.stop()
self.spark.stop()
def test_sqlcontext_with_stopped_sparksession(self):
# SPARK-30856: test that SQLContext.getOrCreate() returns a usable instance after
# the SparkSession is restarted.
sql_context = SQLContext.getOrCreate(self.spark.sparkContext)
self.spark.stop()
spark = SparkSession.builder.master("local[4]").appName(self.sc.appName).getOrCreate()
new_sql_context = SQLContext.getOrCreate(spark.sparkContext)
self.assertIsNot(new_sql_context, sql_context)
self.assertIs(SQLContext.getOrCreate(spark.sparkContext).sparkSession, spark)
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
self.assertIsNone(SQLContext._instantiatedContext)
def test_sqlcontext_with_stopped_sparkcontext(self):
# SPARK-30856: test initialization via SparkSession when only the SparkContext is stopped
self.sc.stop()
spark = SparkSession.builder.master("local[4]").appName(self.sc.appName).getOrCreate()
self.sc = spark.sparkContext
self.assertIs(SQLContext.getOrCreate(self.sc).sparkSession, spark)
def test_get_sqlcontext_with_stopped_sparkcontext(self):
# SPARK-30856: test initialization via SQLContext.getOrCreate() when only the SparkContext
# is stopped
self.sc.stop()
self.sc = SparkContext("local[4]", self.sc.appName)
self.assertIs(SQLContext.getOrCreate(self.sc)._sc, self.sc)
class SparkSessionBuilderTests(unittest.TestCase):
def test_create_spark_context_first_then_spark_session(self):
sc = None
session = None
try:
conf = SparkConf().set("key1", "value1")
sc = SparkContext("local[4]", "SessionBuilderTests", conf=conf)
session = SparkSession.builder.config("key2", "value2").getOrCreate()
self.assertEqual(session.conf.get("key1"), "value1")
self.assertEqual(session.conf.get("key2"), "value2")
self.assertEqual(session.sparkContext, sc)
self.assertFalse(sc.getConf().contains("key2"))
self.assertEqual(sc.getConf().get("key1"), "value1")
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
def test_another_spark_session(self):
session1 = None
session2 = None
try:
session1 = SparkSession.builder.config("key1", "value1").getOrCreate()
session2 = SparkSession.builder.config(
"spark.sql.codegen.comments", "true"
).getOrCreate()
self.assertEqual(session1.conf.get("key1"), "value1")
self.assertEqual(session2.conf.get("key1"), "value1")
self.assertEqual(session1.conf.get("spark.sql.codegen.comments"), "false")
self.assertEqual(session2.conf.get("spark.sql.codegen.comments"), "false")
self.assertEqual(session1.sparkContext, session2.sparkContext)
self.assertEqual(session1.sparkContext.getConf().get("key1"), "value1")
self.assertFalse(session1.sparkContext.getConf().contains("key2"))
finally:
if session1 is not None:
session1.stop()
if session2 is not None:
session2.stop()
def test_create_spark_context_with_initial_session_options(self):
sc = None
session = None
try:
conf = SparkConf().set("key1", "value1")
sc = SparkContext("local[4]", "SessionBuilderTests", conf=conf)
session = (
SparkSession.builder.config("spark.sql.codegen.comments", "true")
.enableHiveSupport()
.getOrCreate()
)
self.assertEqual(session._jsparkSession.sharedState().conf().get("key1"), "value1")
self.assertEqual(
session._jsparkSession.sharedState().conf().get("spark.sql.codegen.comments"),
"true",
)
self.assertEqual(
session._jsparkSession.sharedState().conf().get("spark.sql.catalogImplementation"),
"hive",
)
self.assertEqual(session.sparkContext, sc)
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
class SparkExtensionsTest(unittest.TestCase):
# These tests are separate because it uses 'spark.sql.extensions' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"SparkSessionExtensionSuite.class"
)
if not glob.glob(os.path.join(SPARK_HOME, filename_pattern)):
raise unittest.SkipTest(
"'org.apache.spark.sql.SparkSessionExtensionSuite' is not "
"available. Will skip the related tests."
)
# Note that 'spark.sql.extensions' is a static immutable configuration.
cls.spark = (
SparkSession.builder.master("local[4]")
.appName(cls.__name__)
.config("spark.sql.extensions", "org.apache.spark.sql.MyExtensions")
.getOrCreate()
)
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def test_use_custom_class_for_extensions(self):
self.assertTrue(
self.spark._jsparkSession.sessionState()
.planner()
.strategies()
.contains(
self.spark._jvm.org.apache.spark.sql.MySparkStrategy(self.spark._jsparkSession)
),
"MySparkStrategy not found in active planner strategies",
)
self.assertTrue(
self.spark._jsparkSession.sessionState()
.analyzer()
.extendedResolutionRules()
.contains(self.spark._jvm.org.apache.spark.sql.MyRule(self.spark._jsparkSession)),
"MyRule not found in extended resolution rules",
)
if __name__ == "__main__":
from pyspark.sql.tests.test_session import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 12,883
| 469
| 771
|
f10f0867880f642b6d0b6d7c51bd8255be411723
| 14,187
|
py
|
Python
|
model/run_experiments.py
|
irenetrampoline/clustering-interval-censored
|
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
|
[
"MIT"
] | 1
|
2022-02-03T08:47:45.000Z
|
2022-02-03T08:47:45.000Z
|
model/run_experiments.py
|
irenetrampoline/clustering-interval-censored
|
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
|
[
"MIT"
] | null | null | null |
model/run_experiments.py
|
irenetrampoline/clustering-interval-censored
|
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
|
[
"MIT"
] | null | null | null |
import argparse
import numpy as np
import os
import sys
sys.path.append('../data')
sys.path.append('../plot')
import torch
from load import sigmoid, quadratic, chf, parkinsons, load_data_format
from data_utils import parse_data, change_missing
from plot_utils import plot_subtypes, plot_latent
from models import Sublign
if __name__=='__main__':
main()
| 48.585616
| 229
| 0.584408
|
import argparse
import numpy as np
import os
import sys
sys.path.append('../data')
sys.path.append('../plot')
import torch
from load import sigmoid, quadratic, chf, parkinsons, load_data_format
from data_utils import parse_data, change_missing
from plot_utils import plot_subtypes, plot_latent
from models import Sublign
def get_hyperparameters(data_format_num):
# if data_format_num < 3:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.001, 0., 10, 20, 50, 'l1', 0.01
if data_format_num == 3:
# failing on hpsearch
anneal, C, b_vae, dh, ds, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.001
# if data_format_num == 5 or data_format_num == 3:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.001, 0.01, 20, 20, 100, 'l2', 0.01
if data_format_num == 1:
# best by hpsearch: (True, 0.001, 0.0, 200, 5, 200, 'l2', 0.001)
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.001, 0.0, 5, 200, 200, 'l2', 0.001
if data_format_num == 3:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.0, 0.0, 100, 20, 200, 'l2', 0.01
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 1.0, 0.01, 100, 5, 200, 'l2', 0.01 # cheat
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.001 # cheat 2
if data_format_num == 4:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.01 # cheat
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 1.0, 0.01, 200, 20, 200, 'l2', 0.1
if data_format_num == 5:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 200, 20, 200, 'l2', 0.01
if data_format_num == 6:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.01, 0.0, 200, 20, 200, 'l2', 0.01
if data_format_num == 7:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.001
if data_format_num == 8:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.01, 0.01, 100, 20, 200, 'l2', 0.01
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 1., 0., 10, 20, 50, 'l1', 1e-2
# best from prev : False, 0.001, 0.0, 10, 20, 50, 'l1', 0.1
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.001, 0.0, 10, 20, 50, 'l1', 0.1
return anneal, b_vae, C, ds, dh, drnn, reg_type, lr
def get_hyperparameters_ppmi():
b_vae, C, ds, dh, drnn, reg_type, lr = 0.01, 0., 10, 10, 20, 'l1', 0.1
return b_vae, C, ds, dh, drnn, reg_type, lr
def get_hyperparameters_chf(version=0):
# original, results in paper are from this version
if version == 0:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10, 20, 50,'l1', 0.0, 0.001, 0.01, 1000, False
elif version == 1:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.0, 0.001, 0.01, 1000, True
elif version == 2:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.1, 0.001, 0.01, 1000, True
elif version == 3:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.0, 0.001, 0.1, 1000, True
elif version == 4:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.0, 0.01, 0.1, 1000, True
return ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', action='store', type=int, default=800, help="Number of epochs")
parser.add_argument('--trials', action='store', type=int, default=1, help="Number of trials")
parser.add_argument('--model_name', action='store', type=str, default='SubLign', help="Model name for Latex table making")
parser.add_argument('--lr', action='store', type=float, default=None, help="Learning rate manual override")
parser.add_argument('--b_vae', action='store', type=float, default=None, help="b-VAE val override")
parser.add_argument('--C', action='store', type=float, default=None, help="C override")
# datasets
parser.add_argument('--data_num', action='store', type=int, help="Data Format Number")
parser.add_argument('--chf', action='store_true', help="Use CHF dataset")
parser.add_argument('--ppmi', action='store_true', help="Use PPMI dataset")
# delta setup
parser.add_argument('--max_delta', action='store', type=float, default=5., help="Maximum possible delta")
parser.add_argument('--no_time', action='store_true', help="Learn time at all")
# debugging
parser.add_argument('--verbose', action='store_true', help="Plot everything")
parser.add_argument('--cuda', action='store_true', help="Use GPU")
parser.add_argument('--missing', action='store', type=float, default=0., help="What percent of data to make missing")
parser.add_argument('--plot_debug', action='store_true', help="Make animated gif about alignment / clusterings over epochs")
parser.add_argument('--epoch_debug', action='store_true', help="Save pickle about epoch differences over training")
parser.add_argument('--aggressive', action='store', type=int, help="Learn time at all")
parser.add_argument('--version', action='store', type=int, help="Choose hyp settings", default=0)
# other experiments
args = parser.parse_args()
trial_results = np.zeros((args.trials, 4))
data_format_num = args.data_num
if args.cuda:
device = 'cuda'
else:
device = 'cpu'
print('device', device)
print('data %d' % data_format_num)
for trial_num in range(args.trials):
# datasets
if data_format_num is not None:
max_visits = 4
num_output_dims = 3 if data_format_num < 3 else 1
use_sigmoid = data_format_num < 3
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
if args.lr is not None:
print('Running with lr=%.3f' % args.lr)
lr = args.lr
if args.C is not None:
print('Running with C=%.3f' % args.C)
C = args.C
if args.b_vae is not None:
print('Running with b_vae=%.3f' % args.b_vae)
b_vae = args.b_vae
data = load_data_format(data_format_num, trial_num, cache=True)
shuffle = False
elif args.chf:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = get_hyperparameters_chf(version=args.version)
data = chf()
max_visits = 38
shuffle = True
num_output_dims = data.shape[1] - 4
elif args.ppmi:
b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters_ppmi()
if args.lr is not None:
print('Running with lr=%.3f' % args.lr)
lr = args.lr
if args.C is not None:
print('Running with C=%.3f' % args.C)
C = args.C
if args.b_vae is not None:
print('Running with b_vae=%.3f' % args.b_vae)
b_vae = args.b_vae
data = parkinsons()
max_visits = 17
shuffle = True
num_output_dims = data.shape[1] - 4
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=max_visits, test_per=0.2, valid_per=0.2, shuffle=shuffle, device=device)
if args.missing > 0.:
train_data_loader, train_data_dict = change_missing(train_data_dict, args.missing)
data_loader, collect_dict, unique_pid = parse_data(data.values, max_visits=max_visits, device=device)
"""
best parmas found through hypertuning (cross_validation/hpsearch.py)
# sigmoid: C (0.01), dim_h (20), ds (10 mid), dim_rnn (50 mid), reg_type (l1), lr (0.1)
# quad: C (0.1), dim_h (50), ds (10), dim_rnn (100), reg_type (l1), lr (0.1)
ppmi: (0.0, 10, 10, 50, 'l1', 0.1)
"""
# dim_stochastic, dim_hidden, dim_rnn, C, dim_biomarkers=3, reg_type = 'l2',
if data_format_num is not None:
model = Sublign(d_s, d_h, d_rnn, b_vae=b_vae, dim_biomarkers=num_output_dims, sigmoid=use_sigmoid, reg_type=reg_type, auto_delta=False, max_delta=args.max_delta, learn_time=(not args.no_time), device=device)
if device == 'cuda':
device_torch = torch.device('cuda')
model.to(device_torch)
model.fit(train_data_loader, test_data_loader, args.epochs, lr, verbose=args.verbose, fname='runs/data%d_trial%d.pt' % (data_format_num, trial_num), eval_freq=25, anneal=anneal)
elif args.chf:
args.verbose = False
model = Sublign(ds, dh, drnn, dim_biomarkers=num_output_dims, sigmoid=True, reg_type=reg_type, C=C, auto_delta=False, max_delta=args.max_delta, learn_time=(not args.no_time and learn_time), device=device, b_vae=b_vae)
if device == 'cuda':
device_torch = torch.device('cuda')
model.to(device_torch)
model.fit(data_loader, data_loader, args.epochs, lr, verbose=args.verbose,fname='runs/chf_v%d_%d.pt' % (args.version, args.epochs),eval_freq=25)
X = torch.tensor(collect_dict['Y_collect']).to(model.device)
Y = torch.tensor(collect_dict['obs_t_collect']).to(model.device)
M = torch.tensor(collect_dict['mask_collect']).to(model.device)
(nelbo, nll, kl), norm_reg = model.forward(Y, None, X, M, None)
nelbo, nll, kl, norm_reg = nelbo.item(), nll.item(), kl.item(), norm_reg.item()
subtypes = model.get_subtypes_datadict(collect_dict, K=3)
labels = model.get_labels(collect_dict)
deltas = model.get_deltas(collect_dict)
if args.cuda:
deltas = deltas.cpu().detach().numpy()
else:
deltas = deltas.detach().numpy()
import pickle
results = {
'labels':labels,
'deltas': deltas,
'subtypes': subtypes,
'nelbo': nelbo,
'nll': nll,
'kl': kl,
'norm_reg': norm_reg
}
pickle.dump(results, open('../clinical_runs/chf_v%d_%d.pk' % (args.version, args.epochs), 'wb'))
return
elif args.ppmi:
model = Sublign(d_s, d_h, d_rnn, b_vae=b_vae, C=C, dim_biomarkers=num_output_dims, sigmoid=True, reg_type=reg_type, auto_delta=True, max_delta=args.max_delta, learn_time=(not args.no_time))
model.fit(train_data_loader, test_data_loader, args.epochs, lr=lr, verbose=args.verbose, fname='runs/ppmi.pt', eval_freq=25)
results = model.score(train_data_dict, test_data_dict, K=2)
test_ari = results['ari']
print('PPMI Test ARI: %.3f' % test_ari)
# results = model.score(train_data_dict, test_data_dict, K=2)
# test_ari = results['ari']
# print('PPMI Test ARI: %.3f' % test_ari)
subtypes = model.get_subtypes_datadict(collect_dict)
labels = model.get_labels(collect_dict)
deltas = model.get_deltas(collect_dict)
import pickle
if args.cuda:
subtypes = subtypes.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
deltas = deltas.cpu().detach().numpy()
else:
subtypes = subtypes.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
deltas = deltas.cpu().detach().numpy()
pickle.dump((labels, deltas, subtypes), open('../clinical_runs/ppmi_icml.pk', 'wb'))
return
# subtypes = model.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=2)
train_results = model.score(train_data_dict, train_data_dict)
test_results = model.score(train_data_dict, test_data_dict)
train_mse = train_results['mse']
train_ari = train_results['ari']
train_swaps = train_results['swaps']
train_pear = train_results['pear']
mse = test_results['mse']
ari = test_results['ari']
swaps = test_results['swaps']
pear = test_results['pear']
# nelbo, nll, kl = model.get_loss(Y, S, X, M, anneal=1.)
# nelbo, nll, kl = nelbo.mean().detach().numpy(), nll.mean().detach().numpy(), kl.mean().detach().numpy()
# if args.verbose:
# plot_subtypes(subtypes, args.sigmoid, train_data_dict)
# plot_latent(model, test_data_dict)
trial_results[trial_num] = [mse, ari, swaps, pear]
if args.no_time:
args.model_name = 'SubNoLign'
if args.trials == 1:
print('Train: %.3f, %.3f, %.3f, %.3f' % (train_mse, train_ari, train_swaps, train_pear))
print('Test : %.3f, %.3f, %.3f, %.3f' % (mse, ari, swaps, pear))
# print('NELBO: %.3f, NLL: %.3f, KL: %.3f' % (nelbo, nll, kl))
else:
line_str = list()
for i,j in zip(trial_results.mean(axis=0), trial_results.std(axis=0)):
line_str.append('%.3f $\\pm$ %.3f' % (i,j))
print(' & '.join([args.model_name] + line_str) + '\\\\')
if args.data_num:
trials_fname = '%s_data%d_trials%d.txt' % (args.model_name, args.data_num, args.trials)
else:
trials_fname = '%s_ppmi_trials%d.txt' % (args.model_name, args.trials)
if not os.path.exists(trials_fname):
f = open(trials_fname, 'w')
else:
f = open(trials_fname, 'a')
f.write(' & '.join([args.model_name] + line_str) + '\\\\' + '\n')
f.close()
if __name__=='__main__':
main()
| 13,728
| 0
| 99
|
a53c8a2a5ad5aded053ad7c5fd27b412fc60a466
| 2,573
|
py
|
Python
|
parsers/pdbparser.py
|
rigdenlab/conkit-web
|
bf50d28a73f43b9eb0e0c397ec1d0fd32547fdf1
|
[
"BSD-3-Clause"
] | 1
|
2020-04-16T16:52:53.000Z
|
2020-04-16T16:52:53.000Z
|
parsers/pdbparser.py
|
rigdenlab/conplot
|
9b3129d9e1b7ed93da63c6fd31f9b50e63f2d4d9
|
[
"BSD-3-Clause"
] | 47
|
2020-05-11T13:59:11.000Z
|
2022-01-21T09:37:18.000Z
|
parsers/pdbparser.py
|
rigdenlab/conkit-web
|
bf50d28a73f43b9eb0e0c397ec1d0fd32547fdf1
|
[
"BSD-3-Clause"
] | 5
|
2020-04-24T11:19:21.000Z
|
2020-05-06T08:01:36.000Z
|
from Bio.PDB import PDBParser as BioPDBParser
import io
import itertools
from operator import itemgetter
from utils.exceptions import InvalidFormat
VALID_AMINOACIDS = {"A", "R", "N", "D", "C", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "M", "F", "P", "O", "S", "U",
"T", "W", "Y", "V", "B", "Z", "X", "X", "J"}
def get_chain_contacts(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
contacts = []
residue_range = list(range(1, len(chain) + 1))
assert len(residue_range) == len(chain)
iterator = itertools.product(list(zip(residue_range, chain)), list(zip(residue_range, chain)))
for (resseq1_alt, residue1), (resseq2_alt, residue2) in iterator:
seq_distance = int(residue1.id[1]) - int(residue2.id[1])
if seq_distance <= 4:
continue
for atom1, atom2 in itertools.product(residue1, residue2):
xyz_distance = atom1 - atom2
if xyz_distance > 20:
d_bin = 9
elif xyz_distance <= 4:
d_bin = 0
else:
d_bin = int(round((xyz_distance - 4) / 2, 0))
if xyz_distance < 8:
contact = (int(residue1.id[1]), int(residue2.id[1]), round(1.0 - (xyz_distance / 100), 6), d_bin, 1)
else:
contact = (int(residue1.id[1]), int(residue2.id[1]), 0, d_bin, 1)
contacts.append(contact)
return contacts
def remove_atoms(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
for residue in chain.copy():
if residue.id[0].strip() and residue.resname not in VALID_AMINOACIDS:
chain.detach_child(residue.id)
continue
for atom in residue.copy():
# if atom.is_disordered():
# chain[residue.id].detach_child(atom.id)
if residue.resname == "GLY" and atom.id == "CA":
continue
elif atom.id != "CB":
chain[residue.id].detach_child(atom.id)
| 38.402985
| 119
| 0.585309
|
from Bio.PDB import PDBParser as BioPDBParser
import io
import itertools
from operator import itemgetter
from utils.exceptions import InvalidFormat
VALID_AMINOACIDS = {"A", "R", "N", "D", "C", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "M", "F", "P", "O", "S", "U",
"T", "W", "Y", "V", "B", "Z", "X", "X", "J"}
def get_chain_contacts(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
contacts = []
residue_range = list(range(1, len(chain) + 1))
assert len(residue_range) == len(chain)
iterator = itertools.product(list(zip(residue_range, chain)), list(zip(residue_range, chain)))
for (resseq1_alt, residue1), (resseq2_alt, residue2) in iterator:
seq_distance = int(residue1.id[1]) - int(residue2.id[1])
if seq_distance <= 4:
continue
for atom1, atom2 in itertools.product(residue1, residue2):
xyz_distance = atom1 - atom2
if xyz_distance > 20:
d_bin = 9
elif xyz_distance <= 4:
d_bin = 0
else:
d_bin = int(round((xyz_distance - 4) / 2, 0))
if xyz_distance < 8:
contact = (int(residue1.id[1]), int(residue2.id[1]), round(1.0 - (xyz_distance / 100), 6), d_bin, 1)
else:
contact = (int(residue1.id[1]), int(residue2.id[1]), 0, d_bin, 1)
contacts.append(contact)
return contacts
def remove_atoms(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
for residue in chain.copy():
if residue.id[0].strip() and residue.resname not in VALID_AMINOACIDS:
chain.detach_child(residue.id)
continue
for atom in residue.copy():
# if atom.is_disordered():
# chain[residue.id].detach_child(atom.id)
if residue.resname == "GLY" and atom.id == "CA":
continue
elif atom.id != "CB":
chain[residue.id].detach_child(atom.id)
def PDBParser(input, input_format=None):
try:
parser = BioPDBParser().get_structure('pdb', io.StringIO(input))
chain = list(parser.get_chains())[0]
remove_atoms(chain)
contacts = get_chain_contacts(chain)
except:
raise InvalidFormat('Unable to parse contacts')
if not contacts:
raise InvalidFormat('Unable to parse contacts')
output = ["PDB"]
output += sorted(contacts, key=itemgetter(2), reverse=True)
return output
| 469
| 0
| 23
|
415c056f05afba92871ed1b11cf1af7a2b45bdd6
| 1,753
|
py
|
Python
|
salmon/search/discogs.py
|
Junkbite/smoked-salmon
|
c7ee36dc9bba00707d8529af34b69e8c529d3615
|
[
"Apache-2.0"
] | 42
|
2020-03-02T11:42:17.000Z
|
2022-03-02T13:51:05.000Z
|
salmon/search/discogs.py
|
Junkbite/smoked-salmon
|
c7ee36dc9bba00707d8529af34b69e8c529d3615
|
[
"Apache-2.0"
] | 20
|
2020-03-02T11:46:43.000Z
|
2022-01-26T23:33:37.000Z
|
salmon/search/discogs.py
|
Junkbite/smoked-salmon
|
c7ee36dc9bba00707d8529af34b69e8c529d3615
|
[
"Apache-2.0"
] | 16
|
2020-03-01T11:29:55.000Z
|
2022-01-24T18:10:35.000Z
|
import re
from salmon.search.base import IdentData, SearchMixin
from salmon.sources import DiscogsBase
SOURCES = {
"Vinyl": "Vinyl",
"File": "WEB",
"CD": "CD",
}
def sanitize_artist_name(name):
"""
Remove parenthentical number disambiguation bullshit from artist names,
as well as the asterisk stuff.
"""
name = re.sub(r" \(\d+\)$", "", name)
return re.sub(r"\*+$", "", name)
def parse_source(formats):
"""
Take the list of format strings provided by Discogs and iterate over them
to find a possible source for the release.
"""
for format_s, source in SOURCES.items():
if any(format_s in f for f in formats):
return source
| 30.224138
| 79
| 0.553908
|
import re
from salmon.search.base import IdentData, SearchMixin
from salmon.sources import DiscogsBase
SOURCES = {
"Vinyl": "Vinyl",
"File": "WEB",
"CD": "CD",
}
class Searcher(DiscogsBase, SearchMixin):
async def search_releases(self, searchstr, limit):
releases = {}
resp = await self.get_json(
"/database/search",
params={"q": searchstr, "type": "release", "perpage": 50},
)
for rls in resp["results"]:
artists, title = rls["title"].split(" - ", 1)
year = rls["year"] if "year" in rls else None
source = parse_source(rls["format"])
ed_title = ", ".join(set(rls["format"]))
edition = f"{year} {source}"
if rls["label"] and rls["label"][0] != "Not On Label":
edition += f" {rls['label'][0]} {rls['catno']}"
else:
edition += " Not On Label"
releases[rls["id"]] = (
IdentData(artists, title, year, None, source),
self.format_result(artists, title, edition, ed_title=ed_title),
)
if len(releases) == limit:
break
return "Discogs", releases
def sanitize_artist_name(name):
"""
Remove parenthentical number disambiguation bullshit from artist names,
as well as the asterisk stuff.
"""
name = re.sub(r" \(\d+\)$", "", name)
return re.sub(r"\*+$", "", name)
def parse_source(formats):
"""
Take the list of format strings provided by Discogs and iterate over them
to find a possible source for the release.
"""
for format_s, source in SOURCES.items():
if any(format_s in f for f in formats):
return source
| 978
| 20
| 49
|
8e0d50b482773beb3ff48ba3c18ff76723f48d7c
| 590
|
py
|
Python
|
scripts/relay.py
|
MDooley47/jam-house
|
f67c98e2dc3edd32fa26f7f95df03a27b5e0b3ff
|
[
"Apache-2.0"
] | null | null | null |
scripts/relay.py
|
MDooley47/jam-house
|
f67c98e2dc3edd32fa26f7f95df03a27b5e0b3ff
|
[
"Apache-2.0"
] | null | null | null |
scripts/relay.py
|
MDooley47/jam-house
|
f67c98e2dc3edd32fa26f7f95df03a27b5e0b3ff
|
[
"Apache-2.0"
] | null | null | null |
import RPi.GPIO as GPIO
| 16.857143
| 37
| 0.625424
|
import RPi.GPIO as GPIO
class Relay:
def __init__(self, pin):
self.pin = int(pin)
GPIO.setup(self.pin, GPIO.OUT)
def close(self):
GPIO.output(self.pin, GPIO.LOW)
return
def open(self):
GPIO.output(self.pin, GPIO.HIGH)
return
def status(self):
return bool(GPIO.input(self.pin))
def isClose(self):
return not self.status()
def isOpen(self):
return self.status()
def toggle(self):
if self.isOpen():
self.close()
elif self.isClose():
self.open()
return
def cleanup(self):
GPIO.cleanup(self.pin)
return
| 353
| -9
| 222
|
d4a0efc7601cb3d5e6ec66f5f5af3b78a9158768
| 1,309
|
py
|
Python
|
threaded_messages/listeners.py
|
MattBlack85/django-threaded-messages
|
da86dea6dd854f9ab37201d3953f9d028faa85e9
|
[
"MIT"
] | null | null | null |
threaded_messages/listeners.py
|
MattBlack85/django-threaded-messages
|
da86dea6dd854f9ab37201d3953f9d028faa85e9
|
[
"MIT"
] | null | null | null |
threaded_messages/listeners.py
|
MattBlack85/django-threaded-messages
|
da86dea6dd854f9ab37201d3953f9d028faa85e9
|
[
"MIT"
] | 1
|
2021-01-06T14:41:13.000Z
|
2021-01-06T14:41:13.000Z
|
import logging
from django.utils.html import strip_tags
from . import settings as sendgrid_settings
from .signals import message_composed
logger = logging.getLogger('threaded_messages')
if sendgrid_settings.THREADED_MESSAGES_USE_SENDGRID:
from sendgrid_parse_api.signals import email_received
else:
email_received = None
| 30.44186
| 98
| 0.663102
|
import logging
from django.utils.html import strip_tags
from . import settings as sendgrid_settings
from .signals import message_composed
logger = logging.getLogger('threaded_messages')
if sendgrid_settings.THREADED_MESSAGES_USE_SENDGRID:
from sendgrid_parse_api.signals import email_received
else:
email_received = None
def signal_received_email(sender, sma, app_id, html, text, from_field, **kwargs):
from .utils import reply_to_thread, strip_mail
logger.debug("Sendgrid signal receive: %s, %s, %s, %s, %s, %s" % (sender, sma, app_id,
html, repr(text), from_field))
if app_id == sendgrid_settings.THREADED_MESSAGES_ID:
body = ''
if text:
body = text
if not body:
body = html
if body:
body = strip_tags(body)
body = strip_mail(body)
thread = sma.content_object
reply_to_thread(thread, sma.user, body)
def start_listening():
if email_received:
logger.debug("Sendgrid start listening")
email_received.connect(signal_received_email, dispatch_uid="thm_reply")
from .utils import invalidate_count_cache
message_composed.connect(invalidate_count_cache, dispatch_uid="thm_composed")
| 928
| 0
| 46
|
4e5e61e419f37f8dd598086847f0c15a320b4ff7
| 15,814
|
py
|
Python
|
fzutils/free_api_utils.py
|
superonesfazai/fzutils
|
a8aafaacd94af0001af2ab139f0aa8cbcb8b5eda
|
[
"MIT"
] | 11
|
2018-08-04T08:14:27.000Z
|
2021-09-03T09:00:33.000Z
|
fzutils/free_api_utils.py
|
superonesfazai/fzutils
|
a8aafaacd94af0001af2ab139f0aa8cbcb8b5eda
|
[
"MIT"
] | null | null | null |
fzutils/free_api_utils.py
|
superonesfazai/fzutils
|
a8aafaacd94af0001af2ab139f0aa8cbcb8b5eda
|
[
"MIT"
] | 8
|
2018-08-04T08:16:17.000Z
|
2019-05-05T09:17:35.000Z
|
# coding:utf-8
'''
@author = super_fazai
@File : free_api_utils.py
@connect : superonesfazai@gmail.com
'''
"""
一些免费api 接口的封装
"""
from pprint import pprint
import re
# from fzutils.ip_pools import tri_ip_pool
# from fzutils.spider.fz_requests import Requests
# from fzutils.common_utils import json_2_dict
# from fzutils.internet_utils import (
# get_base_headers,)
from .ip_pools import tri_ip_pool
from .spider.fz_requests import Requests
from .common_utils import json_2_dict
from .internet_utils import (
get_base_headers,)
__all__ = [
'get_jd_one_goods_price_info', # 获取京东单个商品价格
'get_express_info', # 获取快递信息
'get_phone_num_info', # 获取手机号信息
'get_baidu_baike_info', # 获取某关键字的百度百科信息
# map
'get_bd_map_shop_info_list_by_keyword_and_area_name', # 根据关键字和区域检索店铺信息(百度api 关键字搜索服务)[测试最多前400个]
'get_gd_map_shop_info_list_by_keyword_and_area_name', # 根据关键字和区域检索店铺信息(高德api 关键字搜索服务)
'get_gd_input_prompt_info', # 根据关键字和城市名获取输入提示(高德api)
'get_gd_reverse_geocode_info', # 根据地址str获取逆向地理编码(高德api)
'get_gd_map_shop_info_list_by_lng_and_lat_and_keyword', # 根据经纬度(主要根据), 关键字(附加条件)等条件检索附近店铺信息(高德api 关键字搜索服务)
'get_gd_map_shop_info_list_by_gd_id', # 根据gd_id来得到指定的shop info list(一般为第一个)[测试发现不准确, 根据id, 常返回不相干商家]
]
def get_jd_one_goods_price_info(goods_id) -> list:
'''
获取京东单个商品价格
:param goods_id: 商品id
:return:
'''
base_url = 'http://p.3.cn/prices/mgets'
params = (
('skuIds', 'J_' + goods_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params)
return json_2_dict(body, default_res=[])
def get_express_info(express_type, express_id) -> dict:
'''
获取快递信息
express_type: ps: 传字典对应的value
{
'申通': 'shentong',
'ems': 'ems',
'顺丰': 'shunfeng',
'圆通': 'yuantong',
'中通': 'zhongtong',
'韵达': 'yunda',
'天天': 'tiantian',
'汇通': 'huitongkuaidi',
'全峰': 'quanfengkuaidi',
'德邦': 'debangwuliu',
'宅急送': 'zhaijisong',
...
}
:param express_type: 快递公司名
:param express_id: 快递号
:return:
'''
base_url = 'http://www.kuaidi100.com/query'
params = (
('type', express_type),
('postid', express_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params,)
return json_2_dict(body)
def get_phone_num_info(phone_num) -> dict:
'''
获取手机号信息
:param phone_num: 手机号
:return:
'''
url = 'https://tcc.taobao.com/cc/json/mobile_tel_segment.htm'
params = (
('tel', str(phone_num)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
try:
res = re.compile('__GetZoneResult_ = (.*)').findall(body)[0]
return json_2_dict(res)
except IndexError:
return {}
def get_baidu_baike_info(keyword, bk_length=1000) -> dict:
'''
获取某关键字的百度百科信息
:param keyword:
:return:
'''
url = 'http://baike.baidu.com/api/openapi/BaikeLemmaCardApi'
params = (
('scope', '103'),
('format', 'json'),
('appid', '379020'),
('bk_key', str(keyword)),
('bk_length', str(bk_length)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
return json_2_dict(body)
def get_bd_map_shop_info_list_by_keyword_and_area_name(ak:str,
keyword:str,
area_name:str,
page_num:int,
page_size:int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据关键字和区域检索店铺信息(百度api 关键字搜索服务)[测试最多前400个]
:param ak: 百度地图申请的ak
:param keyword: eg: '鞋子'
:param area_name: eg: '杭州' 待搜索的区域, 多为省份, 城市, 具体区域
:param page_num: start 1, 最大20
:param page_size: 固定
:param ip_pool_type:
:param num_retries:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('query', str(keyword)),
('region', str(area_name)),
('output', 'json'),
('ak', str(ak)),
('page_num', str(page_num)),
('page_size', str(page_size)),
)
url = 'http://api.map.baidu.com/place/v2/search'
body = Requests.get_url_body(
url=url,
headers=headers,
params=params,
use_proxy=use_proxy,
ip_pool_type=ip_pool_type,
num_retries=num_retries,
timeout=timeout,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('results', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_keyword_and_area_name(gd_key:str,
keyword:str,
area_name:str,
page_num: int,
page_size: int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
children=0,
extensions='all',
poi_type='',
logger=None,) -> list:
"""
根据关键字和区域检索店铺信息(高德api 关键字搜索服务)
:param gd_key: 申请的key
:param keyword: 关键字 eg: '鞋子'
:param area_name: eg: '杭州' 待搜索的区域, 城市名
:param page_num: 最大翻页数100
:param page_size: 默认值'20'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param children: 按照层级展示子POI数据, 取值0 or 1
:param extensions: 返回结果控制
:param poi_type: 查询POI类型, eg: '061205', 可默认为空值!
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('types', str(poi_type)),
('city', str(area_name)),
('citylimit', 'true'),
('children', str(children)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'http://restapi.amap.com/v3/place/text'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_input_prompt_info(gd_key:str,
keyword,
city_name:str,
poi_type='',
lng:float=0.,
lat:float=0.,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
根据关键字和城市名获取输入提示(高德api)
:param gd_key: 申请的key
:param keyword: eg: '美食'
:param city_name: eg: '杭州'
:param poi_type: eg: '050301'
:param lng:
:param lat:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
# eg: '116.481488,39.990464' 经纬度
location = ','.join([str(lng), str(lat)]) if lng != 0. or lat != 0. else ''
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('type', poi_type),
('location', location),
('city', str(city_name)),
('datatype', 'all'),
)
url= 'https://restapi.amap.com/v3/assistant/inputtips'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('tips', [])
# pprint(data)
return data
def get_gd_reverse_geocode_info(gd_key:str,
address:str,
city_name:str,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
根据地址str获取逆向地理编码(高德api)
:param gd_key:
:param address: eg: '方恒国际中心A座'
:param city_name: eg: '北京'
:param ip_pool_type:
:param num_retries:
:param timeout:
:param use_proxy:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('address', str(address)),
('city', str(city_name)),
)
url= 'https://restapi.amap.com/v3/geocode/geo'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('geocodes', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_lng_and_lat_and_keyword(gd_key:str,
lng:float,
lat:float,
keyword:str='',
radius:int=1000,
page_num:int=1,
page_size:int=20,
poi_type='',
extensions='all',
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据经纬度(主要根据), 关键字(附加条件)等条件检索附近店铺信息(高德api 关键字搜索服务)
:param gd_key: 申请的key
:param lng: 经度
:param lat: 纬度
:param keyword: 关键字 eg: '鞋子', 默认空值!
:param radius: 半径 (如果已知的经纬度能准确定位到某家店铺, 可将radius=100, 来提高定位返回信息精确度!!)
:param page_num: 最大翻页数100
:param page_size: 默认值'20'
:param poi_type: 查询POI类型, eg: '061205', 可默认为空值!
:param extensions: 返回结果控制
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('location', ','.join([str(lng), str(lat)])),
('keywords', str(keyword)),
('types', str(poi_type)),
('radius', str(radius)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'https://restapi.amap.com/v3/place/around'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_gd_id(gd_key:str,
gd_id:str,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据gd_id来得到指定的shop info list(一般为第一个)[测试发现不准确, 根据id, 常返回不相干商家]
:param gd_key: 申请的key
:param gd_id: eg: 'B0FFIR6P0B'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('id', gd_id),
('output', ''),
('key', gd_key),
)
url = 'https://restapi.amap.com/v3/place/detail'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
| 33.014614
| 139
| 0.496775
|
# coding:utf-8
'''
@author = super_fazai
@File : free_api_utils.py
@connect : superonesfazai@gmail.com
'''
"""
一些免费api 接口的封装
"""
from pprint import pprint
import re
# from fzutils.ip_pools import tri_ip_pool
# from fzutils.spider.fz_requests import Requests
# from fzutils.common_utils import json_2_dict
# from fzutils.internet_utils import (
# get_base_headers,)
from .ip_pools import tri_ip_pool
from .spider.fz_requests import Requests
from .common_utils import json_2_dict
from .internet_utils import (
get_base_headers,)
__all__ = [
'get_jd_one_goods_price_info', # 获取京东单个商品价格
'get_express_info', # 获取快递信息
'get_phone_num_info', # 获取手机号信息
'get_baidu_baike_info', # 获取某关键字的百度百科信息
# map
'get_bd_map_shop_info_list_by_keyword_and_area_name', # 根据关键字和区域检索店铺信息(百度api 关键字搜索服务)[测试最多前400个]
'get_gd_map_shop_info_list_by_keyword_and_area_name', # 根据关键字和区域检索店铺信息(高德api 关键字搜索服务)
'get_gd_input_prompt_info', # 根据关键字和城市名获取输入提示(高德api)
'get_gd_reverse_geocode_info', # 根据地址str获取逆向地理编码(高德api)
'get_gd_map_shop_info_list_by_lng_and_lat_and_keyword', # 根据经纬度(主要根据), 关键字(附加条件)等条件检索附近店铺信息(高德api 关键字搜索服务)
'get_gd_map_shop_info_list_by_gd_id', # 根据gd_id来得到指定的shop info list(一般为第一个)[测试发现不准确, 根据id, 常返回不相干商家]
]
def get_jd_one_goods_price_info(goods_id) -> list:
'''
获取京东单个商品价格
:param goods_id: 商品id
:return:
'''
base_url = 'http://p.3.cn/prices/mgets'
params = (
('skuIds', 'J_' + goods_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params)
return json_2_dict(body, default_res=[])
def get_express_info(express_type, express_id) -> dict:
'''
获取快递信息
express_type: ps: 传字典对应的value
{
'申通': 'shentong',
'ems': 'ems',
'顺丰': 'shunfeng',
'圆通': 'yuantong',
'中通': 'zhongtong',
'韵达': 'yunda',
'天天': 'tiantian',
'汇通': 'huitongkuaidi',
'全峰': 'quanfengkuaidi',
'德邦': 'debangwuliu',
'宅急送': 'zhaijisong',
...
}
:param express_type: 快递公司名
:param express_id: 快递号
:return:
'''
base_url = 'http://www.kuaidi100.com/query'
params = (
('type', express_type),
('postid', express_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params,)
return json_2_dict(body)
def get_phone_num_info(phone_num) -> dict:
'''
获取手机号信息
:param phone_num: 手机号
:return:
'''
url = 'https://tcc.taobao.com/cc/json/mobile_tel_segment.htm'
params = (
('tel', str(phone_num)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
try:
res = re.compile('__GetZoneResult_ = (.*)').findall(body)[0]
return json_2_dict(res)
except IndexError:
return {}
def get_baidu_baike_info(keyword, bk_length=1000) -> dict:
'''
获取某关键字的百度百科信息
:param keyword:
:return:
'''
url = 'http://baike.baidu.com/api/openapi/BaikeLemmaCardApi'
params = (
('scope', '103'),
('format', 'json'),
('appid', '379020'),
('bk_key', str(keyword)),
('bk_length', str(bk_length)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
return json_2_dict(body)
def get_bd_map_shop_info_list_by_keyword_and_area_name(ak:str,
keyword:str,
area_name:str,
page_num:int,
page_size:int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据关键字和区域检索店铺信息(百度api 关键字搜索服务)[测试最多前400个]
:param ak: 百度地图申请的ak
:param keyword: eg: '鞋子'
:param area_name: eg: '杭州' 待搜索的区域, 多为省份, 城市, 具体区域
:param page_num: start 1, 最大20
:param page_size: 固定
:param ip_pool_type:
:param num_retries:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('query', str(keyword)),
('region', str(area_name)),
('output', 'json'),
('ak', str(ak)),
('page_num', str(page_num)),
('page_size', str(page_size)),
)
url = 'http://api.map.baidu.com/place/v2/search'
body = Requests.get_url_body(
url=url,
headers=headers,
params=params,
use_proxy=use_proxy,
ip_pool_type=ip_pool_type,
num_retries=num_retries,
timeout=timeout,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('results', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_keyword_and_area_name(gd_key:str,
keyword:str,
area_name:str,
page_num: int,
page_size: int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
children=0,
extensions='all',
poi_type='',
logger=None,) -> list:
"""
根据关键字和区域检索店铺信息(高德api 关键字搜索服务)
:param gd_key: 申请的key
:param keyword: 关键字 eg: '鞋子'
:param area_name: eg: '杭州' 待搜索的区域, 城市名
:param page_num: 最大翻页数100
:param page_size: 默认值'20'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param children: 按照层级展示子POI数据, 取值0 or 1
:param extensions: 返回结果控制
:param poi_type: 查询POI类型, eg: '061205', 可默认为空值!
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('types', str(poi_type)),
('city', str(area_name)),
('citylimit', 'true'),
('children', str(children)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'http://restapi.amap.com/v3/place/text'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_input_prompt_info(gd_key:str,
keyword,
city_name:str,
poi_type='',
lng:float=0.,
lat:float=0.,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
根据关键字和城市名获取输入提示(高德api)
:param gd_key: 申请的key
:param keyword: eg: '美食'
:param city_name: eg: '杭州'
:param poi_type: eg: '050301'
:param lng:
:param lat:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
# eg: '116.481488,39.990464' 经纬度
location = ','.join([str(lng), str(lat)]) if lng != 0. or lat != 0. else ''
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('type', poi_type),
('location', location),
('city', str(city_name)),
('datatype', 'all'),
)
url= 'https://restapi.amap.com/v3/assistant/inputtips'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('tips', [])
# pprint(data)
return data
def get_gd_reverse_geocode_info(gd_key:str,
address:str,
city_name:str,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
根据地址str获取逆向地理编码(高德api)
:param gd_key:
:param address: eg: '方恒国际中心A座'
:param city_name: eg: '北京'
:param ip_pool_type:
:param num_retries:
:param timeout:
:param use_proxy:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('address', str(address)),
('city', str(city_name)),
)
url= 'https://restapi.amap.com/v3/geocode/geo'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('geocodes', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_lng_and_lat_and_keyword(gd_key:str,
lng:float,
lat:float,
keyword:str='',
radius:int=1000,
page_num:int=1,
page_size:int=20,
poi_type='',
extensions='all',
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据经纬度(主要根据), 关键字(附加条件)等条件检索附近店铺信息(高德api 关键字搜索服务)
:param gd_key: 申请的key
:param lng: 经度
:param lat: 纬度
:param keyword: 关键字 eg: '鞋子', 默认空值!
:param radius: 半径 (如果已知的经纬度能准确定位到某家店铺, 可将radius=100, 来提高定位返回信息精确度!!)
:param page_num: 最大翻页数100
:param page_size: 默认值'20'
:param poi_type: 查询POI类型, eg: '061205', 可默认为空值!
:param extensions: 返回结果控制
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('location', ','.join([str(lng), str(lat)])),
('keywords', str(keyword)),
('types', str(poi_type)),
('radius', str(radius)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'https://restapi.amap.com/v3/place/around'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_gd_id(gd_key:str,
gd_id:str,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据gd_id来得到指定的shop info list(一般为第一个)[测试发现不准确, 根据id, 常返回不相干商家]
:param gd_key: 申请的key
:param gd_id: eg: 'B0FFIR6P0B'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('id', gd_id),
('output', ''),
('key', gd_key),
)
url = 'https://restapi.amap.com/v3/place/detail'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
| 0
| 0
| 0
|
b24c57ae9801978655a2dd8c90c7f52a6c81983c
| 377
|
py
|
Python
|
tests/test_axpy_weather.py
|
AxxAxx/axpy_weather
|
2714397968b55b63b784ce08a2df0ade08aa2008
|
[
"MIT"
] | null | null | null |
tests/test_axpy_weather.py
|
AxxAxx/axpy_weather
|
2714397968b55b63b784ce08a2df0ade08aa2008
|
[
"MIT"
] | null | null | null |
tests/test_axpy_weather.py
|
AxxAxx/axpy_weather
|
2714397968b55b63b784ce08a2df0ade08aa2008
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_axpy_weather
----------------------------------
Tests for `axpy_weather` module.
"""
import sys
import unittest
from axpy_weather import axpy_weather
| 13
| 42
| 0.599469
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_axpy_weather
----------------------------------
Tests for `axpy_weather` module.
"""
import sys
import unittest
from axpy_weather import axpy_weather
class TestAxpy_weather(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
| 40
| 21
| 104
|
dad37465db8abb220a6642fa8e0c3fe096021b1a
| 446
|
py
|
Python
|
setup.py
|
GeorgianaElena/jupyterhub-configurator
|
f356175732d487c520415b84368b3368397d8b60
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
GeorgianaElena/jupyterhub-configurator
|
f356175732d487c520415b84368b3368397d8b60
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
GeorgianaElena/jupyterhub-configurator
|
f356175732d487c520415b84368b3368397d8b60
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="jupyterhub-configurator",
version="1.0",
packages=find_packages(),
license="3-BSD",
author="yuvipanda",
author_email="yuvipanda@gmail.com",
install_requires=["tornado", "aiohttp", "jupyterhub", "deepmerge", "pluggy"],
include_package_data=True,
entry_points={
"jupyterhub_configurator": ["z2jh = jupyterhub_configurator.schemas.z2jh"]
},
)
| 27.875
| 82
| 0.686099
|
from setuptools import setup, find_packages
setup(
name="jupyterhub-configurator",
version="1.0",
packages=find_packages(),
license="3-BSD",
author="yuvipanda",
author_email="yuvipanda@gmail.com",
install_requires=["tornado", "aiohttp", "jupyterhub", "deepmerge", "pluggy"],
include_package_data=True,
entry_points={
"jupyterhub_configurator": ["z2jh = jupyterhub_configurator.schemas.z2jh"]
},
)
| 0
| 0
| 0
|
85bc4000ac9a7feae9aa1e58301dae7af7b354a8
| 2,767
|
py
|
Python
|
src/austin_heller_repo/component_manager.py
|
AustinHellerRepo/ComponentManager
|
bd347d87cb0c19acf07419ba8e8c30d4fa6f6027
|
[
"MIT"
] | null | null | null |
src/austin_heller_repo/component_manager.py
|
AustinHellerRepo/ComponentManager
|
bd347d87cb0c19acf07419ba8e8c30d4fa6f6027
|
[
"MIT"
] | null | null | null |
src/austin_heller_repo/component_manager.py
|
AustinHellerRepo/ComponentManager
|
bd347d87cb0c19acf07419ba8e8c30d4fa6f6027
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import List, Tuple, Dict
try:
import urequests as requests
except ImportError:
import requests
try:
import ujson as json
except ImportError:
import json
| 28.234694
| 152
| 0.729671
|
from __future__ import annotations
from typing import List, Tuple, Dict
try:
import urequests as requests
except ImportError:
import requests
try:
import ujson as json
except ImportError:
import json
class MethodTypeEnum():
Get = 0
Post = 1
class ApiInterface():
def __init__(self, *, api_base_url: str):
self.__api_base_url = api_base_url
def _get_json_result_from_url(self, *, method_type, url: str, arguments_json_object: dict) -> dict:
print("Trying to " + str(method_type) + " to \"" + url + "\"...")
if method_type == MethodTypeEnum.Get:
_response = requests.get(url, json=arguments_json_object)
elif method_type == MethodTypeEnum.Post:
_response = requests.post(url, json=arguments_json_object)
else:
raise NotImplementedError()
if _response.status_code != 200:
raise Exception("Unexpected status code: " + str(_response.status_code) + ": " + str(_response.reason) + ". Error: \"" + str(_response.text) + "\".")
else:
_json_response = _response.json()
if "is_successful" not in _json_response:
raise Exception("Unexpected missing key \"is_successful\": " + str(_json_response))
elif "response" not in _json_response:
raise Exception("Unexpected missing key \"response\": " + str(_json_response))
elif "error" not in _json_response:
raise Exception("Unexpected missing key \"error\": " + str(_json_response))
else:
_is_successful = _json_response["is_successful"]
_response_value = _json_response["response"]
_error = _json_response["error"]
if not _is_successful:
raise Exception("Error from messaging system: \"" + str(_error) + "\".")
else:
return _response_value
def _get_formatted_url(self, *, url_part: str) -> str:
return self.__api_base_url + url_part
class ComponentManagerApiInterface(ApiInterface):
def __init__(self, *, component_manager_api_base_url: str):
super().__init__(
api_base_url=component_manager_api_base_url
)
def get_health(self) -> Dict:
return self._get_json_result_from_url(
method_type=MethodTypeEnum.Get,
url=self._get_formatted_url(
url_part="/v1/test/health"
),
arguments_json_object={}
)
def get_docker_api_specification(self) -> Dict:
return self._get_json_result_from_url(
method_type=MethodTypeEnum.Post,
url=self._get_formatted_url(
url_part="/v1/api/get_docker_api_specification"
),
arguments_json_object={}
)
def get_component_specification_by_component_uuid(self, *, component_uuid: str) -> Dict:
return self._get_json_result_from_url(
method_type=MethodTypeEnum.Post,
url=self._get_formatted_url(
url_part="/v1/api/get_component_specification_by_component_uuid"
),
arguments_json_object={
"component_uuid": component_uuid
}
)
| 2,272
| 49
| 237
|
067659e95365ddba0dd9591d5eb66a3a527b4438
| 100
|
py
|
Python
|
python/hackerrank/conditional code/which one is greater/task.py
|
3keepmovingforward3/ENGR1102
|
b4f38a70560fc695d70706279047b1dec9f5c7f4
|
[
"MIT"
] | null | null | null |
python/hackerrank/conditional code/which one is greater/task.py
|
3keepmovingforward3/ENGR1102
|
b4f38a70560fc695d70706279047b1dec9f5c7f4
|
[
"MIT"
] | null | null | null |
python/hackerrank/conditional code/which one is greater/task.py
|
3keepmovingforward3/ENGR1102
|
b4f38a70560fc695d70706279047b1dec9f5c7f4
|
[
"MIT"
] | null | null | null |
# Start your code below (tip: Make sure to indent your code)
| 20
| 64
| 0.71
|
def greater_if_else(num1, num2):
# Start your code below (tip: Make sure to indent your code)
| 11
| 0
| 23
|
ca8820199ef0c7948e24a842fd58013d23375baa
| 2,451
|
py
|
Python
|
pmst/tests/test_component.py
|
talonchandler/pmst
|
c7d4d00a9a377726f8996cb416970037af92c40a
|
[
"MIT"
] | null | null | null |
pmst/tests/test_component.py
|
talonchandler/pmst
|
c7d4d00a9a377726f8996cb416970037af92c40a
|
[
"MIT"
] | null | null | null |
pmst/tests/test_component.py
|
talonchandler/pmst
|
c7d4d00a9a377726f8996cb416970037af92c40a
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../../")
from unittest import TestCase
from pmst.geometry import Point, Ray
from pmst.component import Lens
from pmst.microscope import Microscope
import pmst.source
import numpy as np
# self.assertTrue(self.s.ray_list.get_ray(1) == Ray(Point(0, .5, 1), Point(0, .5, 2)))
# self.assertTrue(self.s.ray_list.get_ray(2) == Ray(Point(.1, .1, 1), Point(.1, .1, 2)))
# Plane source converges
| 35.014286
| 95
| 0.571195
|
import sys
sys.path.append("../../")
from unittest import TestCase
from pmst.geometry import Point, Ray
from pmst.component import Lens
from pmst.microscope import Microscope
import pmst.source
import numpy as np
class TestPixel(TestCase):
def setUp(self):
origin = np.array((0, 0, 0))
normal = np.array((0, 0))
dimensions = np.array((.1, .2))
class TestLensIsoSourceAtFocal(TestCase):
def setUp(self):
self.r0 = Ray(Point(0, 0, 0), Point(0, 0, 1))
self.r1 = Ray(Point(0, 0, 0), Point(0, 0.1, 1))
self.r1 = Ray(Point(0, 0, 0), Point(0, 0.5, 1)) # numerical error here
self.r2 = Ray(Point(0, 0, 0), Point(0.1, 0.1, 1))
self.ray_list = [self.r0, self.r1, self.r2]
self.s = pmst.source.RayListSource(self.ray_list)
self.s.generate_rays()
self.m = Microscope(source=self.s)
self.l = Lens(Point(0, 0, 1), n=1.5, normal=Point(0, 0, 2), f=1,
radius=1.0)
self.m.add_component(self.l)
self.m.simulate()
def test_Lens(self):
print('Focal', self.s.ray_list)
self.assertTrue(self.s.n_rays == 3)
self.assertTrue(self.s.ray_list.get_ray(0) == self.r0)
# self.assertTrue(self.s.ray_list.get_ray(1) == Ray(Point(0, .5, 1), Point(0, .5, 2)))
# self.assertTrue(self.s.ray_list.get_ray(2) == Ray(Point(.1, .1, 1), Point(.1, .1, 2)))
# Plane source converges
class TestLensPlaneSourceAtFocal(TestCase):
def setUp(self):
self.r0 = Ray(Point(0, 0, 0), Point(0, 0, 1))
self.r1 = Ray(Point(0, .5, 0), Point(0, .5, 1))
self.r2 = Ray(Point(.1, .1, 0), Point(.1, .1, 1))
self.ray_list = [self.r0, self.r1, self.r2]
self.s = pmst.source.RayListSource(self.ray_list)
self.s.generate_rays()
self.m = Microscope(source=self.s)
self.l = Lens(Point(0, 0, 1), n=1.5, normal=Point(0, 0, 2), f=1,
radius=1.0)
self.m.add_component(self.l)
self.m.simulate()
def test_Lens(self):
print('Plane', self.s.ray_list)
self.assertTrue(self.s.n_rays == 3)
self.assertTrue(self.s.ray_list.get_ray(0) == Ray(Point(0, 0, 1), Point(0, 0, 2)))
self.assertTrue(self.s.ray_list.get_ray(1) == Ray(Point(0, .5, 1), Point(0, 0, 2)))
self.assertTrue(self.s.ray_list.get_ray(2) == Ray(Point(.1, .1, 1), Point(0, 0, 2)))
| 1,759
| 47
| 203
|
19c0a3b65b67a59f869878709be52202016b86ff
| 3,021
|
py
|
Python
|
src/bmeg/utils.py
|
bmeg/bmeg-etl
|
3efa28a7775d6defd77457838e92817a2fbc9e99
|
[
"MIT"
] | 1
|
2022-03-08T22:06:35.000Z
|
2022-03-08T22:06:35.000Z
|
src/bmeg/utils.py
|
bmeg/bmeg-etl
|
3efa28a7775d6defd77457838e92817a2fbc9e99
|
[
"MIT"
] | 191
|
2018-07-09T20:49:34.000Z
|
2021-02-09T18:44:28.000Z
|
src/bmeg/utils.py
|
bmeg/bmeg-etl
|
3efa28a7775d6defd77457838e92817a2fbc9e99
|
[
"MIT"
] | null | null | null |
import os
import inspect
import typing
import threading
from contextlib import suppress
from functools import wraps
def enforce_types(callable):
"""
From:
https://stackoverflow.com/questions/50563546/validating-detailed-types-in-python-dataclasses
"""
spec = inspect.getfullargspec(callable)
if inspect.isclass(callable):
callable.__init__ = decorate(callable.__init__)
return callable
return decorate(callable)
| 27.216216
| 96
| 0.586892
|
import os
import inspect
import typing
import threading
from contextlib import suppress
from functools import wraps
def ensure_directory(*args):
path = os.path.join(*args)
if os.path.isfile(path):
raise Exception(
"Emitter output directory %s is a regular file", path)
if not os.path.exists(path):
os.makedirs(path)
def enforce_types(callable):
"""
From:
https://stackoverflow.com/questions/50563546/validating-detailed-types-in-python-dataclasses
"""
spec = inspect.getfullargspec(callable)
def check_types(*args, **kwargs):
parameters = dict(zip(spec.args, args))
parameters.update(kwargs)
# allow thread to control if check skipped
try:
if threading.local().skip_check_types:
return
except AttributeError:
pass
for name, value in parameters.items():
# Assume un-annotated parameters can be any type
with suppress(KeyError):
type_hint = spec.annotations[name]
if isinstance(type_hint, typing._SpecialForm):
# No check for typing.Any, typing.Union, typing.ClassVar
# (without parameters)
continue
try:
actual_type = type_hint.__origin__
except AttributeError:
actual_type = type_hint
if isinstance(actual_type, typing._SpecialForm):
# case of typing.Union[…] or typing.ClassVar[…]
actual_type = type_hint.__args__
if not isinstance(value, actual_type):
raise TypeError(
"Unexpected type for '{}' (expected {} but found {})".
format(name, type_hint, type(value))
)
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
check_types(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
if inspect.isclass(callable):
callable.__init__ = decorate(callable.__init__)
return callable
return decorate(callable)
def set_gid(obj, gid):
object.__setattr__(obj, "gid", gid)
def get_tcga_individual_barcode(id):
parts = id.split("-")
return "-".join(parts[0:3])
def get_tcga_sample_barcode(id):
parts = id.split("-")
return "-".join(parts[0:4])
def get_tcga_portion_barcode(id):
parts = id.split("-")
parts[5] = parts[5][:-1]
return "-".join(parts[0:5])
def get_tcga_analyte_barcode(id):
parts = id.split("-")
return "-".join(parts[0:5])
def get_tcga_aliquot_barcode(id):
parts = id.split("-")
return "-".join(parts[0:7])
def tcga_barcode_is_tumor(id):
parts = id.split("-")
sample_number = parts[4][:-1]
return sample_number < 10
def tcga_barcode_is_normal(id):
parts = id.split("-")
sample_number = parts[4][:-1]
return sample_number >= 10
| 2,295
| 0
| 261
|
35a9845cf13f68f6956c51a6bcfbdd68d916ace4
| 568
|
py
|
Python
|
voting/apps/survey/signals.py
|
nurikou/voting_back
|
de54218f01095f5090d490cabf32a86b1e608925
|
[
"MIT"
] | null | null | null |
voting/apps/survey/signals.py
|
nurikou/voting_back
|
de54218f01095f5090d490cabf32a86b1e608925
|
[
"MIT"
] | null | null | null |
voting/apps/survey/signals.py
|
nurikou/voting_back
|
de54218f01095f5090d490cabf32a86b1e608925
|
[
"MIT"
] | null | null | null |
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Vote
from .serializers import VoteSerializer
from asgiref.sync import async_to_sync
import channels.layers
@receiver(post_save, sender=Vote)
| 31.555556
| 78
| 0.741197
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Vote
from .serializers import VoteSerializer
from asgiref.sync import async_to_sync
import channels.layers
@receiver(post_save, sender=Vote)
def send_notification(sender, instance, created, **kwargs):
if created:
notification = VoteSerializer(instance=instance).data
layer = channels.layers.get_channel_layer()
async_to_sync(layer.group_send)(
"notification", {"type": "notification", "message": notification},
)
| 298
| 0
| 22
|
aadac3c14b4d6bf52fd38741ccf5cbd8ff170fdc
| 5,352
|
py
|
Python
|
check_mariadb_slaves.py
|
flickerfly/check_mariadb_slaves
|
b9917c5a097a9806d19caee83c5644afab924366
|
[
"MIT"
] | null | null | null |
check_mariadb_slaves.py
|
flickerfly/check_mariadb_slaves
|
b9917c5a097a9806d19caee83c5644afab924366
|
[
"MIT"
] | null | null | null |
check_mariadb_slaves.py
|
flickerfly/check_mariadb_slaves
|
b9917c5a097a9806d19caee83c5644afab924366
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""MariaDB slave status checker"""
import sys
import argparse
import MySQLdb
class NagiosPlugin(object):
"""Nagios Plugin base class"""
class SlaveStatusCheck(NagiosPlugin):
"""Class to help us run slave status queries against MariaDB"""
REPLICATION_LAG_MODE = 'replication_lag'
SLAVESQL_MODE = 'slave_sql'
SLAVEIO_MODE = 'slave_io'
MODES = (REPLICATION_LAG_MODE,
SLAVESQL_MODE,
SLAVEIO_MODE)
def run_check(self):
"""Execute the check against the given mode"""
check_fn = getattr(self, self.mode)
check_fn()
def replication_lag(self):
"""Check replication lag thresholds"""
lag = self._slave_status.get('Seconds_Behind_Master')
if lag is None:
self.unknown_state("No replication lag reported")
if not self.warning or not self.critical:
self.unknown_state("Warning and critical thresholds undefined")
lag = int(lag)
warning = int(self.warning)
critical = int(self.critical)
lag_performance_msg = "log={0}s;{1};{2};0".format(lag,warning,critical)
lag_display_msg = "Slave is {0} seconds behinds master".format(lag)
lag_msg = "{0} | {1}".format(lag_display_msg,lag_performance_msg)
if lag >= warning and lag < critical:
self.warning_state(lag_msg)
elif lag >= critical:
self.critical_state(lag_msg)
self.ok_state(lag_msg)
def slave_sql(self):
"""Check that Slave_SQL_Running = Yes"""
if self._slave_status.get('Slave_SQL_Running') == "No":
msg = "Slave sql is not running. Last error: {0}".format(
self._slave_status.get('Last_SQL_Error'))
self.critical_state(msg)
self.ok_state("Slave sql is running")
def slave_io(self):
"""Check that Slave_IO_Running = Yes"""
if self._slave_status.get('Slave_IO_Running') == "No":
msg = "Slave io is not running. Last error: {0}".format(
self._slave_status.get('Last_IO_Error'))
self.critical_state(msg)
self.ok_state("Slave io is running")
def get_slave_status(self):
"""Run the query!"""
try:
sql = 'SHOW SLAVE "{0}" STATUS'.format(self.connection_name)
conn = None
conn = MySQLdb.Connection(
self.hostname,
self.username,
self.password)
curs = conn.cursor(MySQLdb.cursors.DictCursor)
curs.execute(sql)
conn.commit()
self._slave_status = curs.fetchall()[0]
if self.verbose:
print self._slave_status
except MySQLdb.Error, exc:
msg = "{0}: {1}".format(exc.args[0], exc.args[1])
self.unknown_state(msg)
finally:
if conn:
conn.close()
def main(args=None):
"""starter method"""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='MariaDB slave status checker')
parser.add_argument('--hostname', default='localhost', type=str,
help="MariaDB hostname")
parser.add_argument('--username', type=str, help="MariaDB username")
parser.add_argument('--password', type=str, help="MariaDB password")
parser.add_argument('--connection', required=True, type=str,
help="MariaDB slave connection name")
parser.add_argument('--mode', type=str, required=True,
choices=SlaveStatusCheck.MODES,
help="slave state to check")
parser.add_argument('-w', '--warning', type=int, default=None,
help="warning limit")
parser.add_argument('-c', '--critical', type=int, default=None,
help="critical limit")
parser.add_argument('--verbose', action='store_true', default=False,
help="enable verbose mode")
args = parser.parse_args(args)
ssc = SlaveStatusCheck(args.hostname, args.username, args.password,
args.connection, args.mode, args.verbose,
args.warning, args.critical)
ssc.get_slave_status()
ssc.run_check()
if __name__ == '__main__':
main() # pragma: no cover
| 33.873418
| 80
| 0.596599
|
#!/usr/bin/env python
"""MariaDB slave status checker"""
import sys
import argparse
import MySQLdb
class NagiosPlugin(object):
"""Nagios Plugin base class"""
def __init__(self, warning, critical, *args, **kwargs):
self.warning = warning
self.critical = critical
def run_check(self):
raise NotImplementedError
def ok_state(self, msg):
print "OK - {0}".format(msg)
sys.exit(0)
def warning_state(self, msg):
print "WARNING - {0}".format(msg)
sys.exit(1)
def critical_state(self, msg):
print "CRITICAL - {0}".format(msg)
sys.exit(2)
def unknown_state(self, msg):
print "UNNKNOWN - {0}".format(msg)
sys.exit(3)
class SlaveStatusCheck(NagiosPlugin):
"""Class to help us run slave status queries against MariaDB"""
REPLICATION_LAG_MODE = 'replication_lag'
SLAVESQL_MODE = 'slave_sql'
SLAVEIO_MODE = 'slave_io'
MODES = (REPLICATION_LAG_MODE,
SLAVESQL_MODE,
SLAVEIO_MODE)
def __init__(self, hostname, username, password, connection_name,
mode, verbose=False, warning=None, critical=None):
super(SlaveStatusCheck, self).__init__(warning, critical)
self.hostname = hostname
self.username = username
self.password = password
self.connection_name = connection_name
self.verbose = verbose
self.mode = mode
self._slave_status = {}
def run_check(self):
"""Execute the check against the given mode"""
check_fn = getattr(self, self.mode)
check_fn()
def replication_lag(self):
"""Check replication lag thresholds"""
lag = self._slave_status.get('Seconds_Behind_Master')
if lag is None:
self.unknown_state("No replication lag reported")
if not self.warning or not self.critical:
self.unknown_state("Warning and critical thresholds undefined")
lag = int(lag)
warning = int(self.warning)
critical = int(self.critical)
lag_performance_msg = "log={0}s;{1};{2};0".format(lag,warning,critical)
lag_display_msg = "Slave is {0} seconds behinds master".format(lag)
lag_msg = "{0} | {1}".format(lag_display_msg,lag_performance_msg)
if lag >= warning and lag < critical:
self.warning_state(lag_msg)
elif lag >= critical:
self.critical_state(lag_msg)
self.ok_state(lag_msg)
def slave_sql(self):
"""Check that Slave_SQL_Running = Yes"""
if self._slave_status.get('Slave_SQL_Running') == "No":
msg = "Slave sql is not running. Last error: {0}".format(
self._slave_status.get('Last_SQL_Error'))
self.critical_state(msg)
self.ok_state("Slave sql is running")
def slave_io(self):
"""Check that Slave_IO_Running = Yes"""
if self._slave_status.get('Slave_IO_Running') == "No":
msg = "Slave io is not running. Last error: {0}".format(
self._slave_status.get('Last_IO_Error'))
self.critical_state(msg)
self.ok_state("Slave io is running")
def get_slave_status(self):
"""Run the query!"""
try:
sql = 'SHOW SLAVE "{0}" STATUS'.format(self.connection_name)
conn = None
conn = MySQLdb.Connection(
self.hostname,
self.username,
self.password)
curs = conn.cursor(MySQLdb.cursors.DictCursor)
curs.execute(sql)
conn.commit()
self._slave_status = curs.fetchall()[0]
if self.verbose:
print self._slave_status
except MySQLdb.Error, exc:
msg = "{0}: {1}".format(exc.args[0], exc.args[1])
self.unknown_state(msg)
finally:
if conn:
conn.close()
def main(args=None):
"""starter method"""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='MariaDB slave status checker')
parser.add_argument('--hostname', default='localhost', type=str,
help="MariaDB hostname")
parser.add_argument('--username', type=str, help="MariaDB username")
parser.add_argument('--password', type=str, help="MariaDB password")
parser.add_argument('--connection', required=True, type=str,
help="MariaDB slave connection name")
parser.add_argument('--mode', type=str, required=True,
choices=SlaveStatusCheck.MODES,
help="slave state to check")
parser.add_argument('-w', '--warning', type=int, default=None,
help="warning limit")
parser.add_argument('-c', '--critical', type=int, default=None,
help="critical limit")
parser.add_argument('--verbose', action='store_true', default=False,
help="enable verbose mode")
args = parser.parse_args(args)
ssc = SlaveStatusCheck(args.hostname, args.username, args.password,
args.connection, args.mode, args.verbose,
args.warning, args.critical)
ssc.get_slave_status()
ssc.run_check()
if __name__ == '__main__':
main() # pragma: no cover
| 817
| 0
| 189
|
10099580e70302fbeaf41e9a358a3baf413b5d47
| 6,025
|
py
|
Python
|
src/prg_state_ctrl.py
|
ccarr66/handwriting_gui
|
c5d8a8925ee37d874ee794a241e50974a7b9d921
|
[
"MIT"
] | null | null | null |
src/prg_state_ctrl.py
|
ccarr66/handwriting_gui
|
c5d8a8925ee37d874ee794a241e50974a7b9d921
|
[
"MIT"
] | null | null | null |
src/prg_state_ctrl.py
|
ccarr66/handwriting_gui
|
c5d8a8925ee37d874ee794a241e50974a7b9d921
|
[
"MIT"
] | 1
|
2020-11-17T21:31:55.000Z
|
2020-11-17T21:31:55.000Z
|
import sys, os, platform
import ocr_image_analyzer as OCR
try:
import PIL.Image
import PIL.ImageTk
except ModuleNotFoundError:
print('Required libraries not found, please install PIL')
if __name__ == "__main__":
raise Exception('Cannot be called as main script')
debug = True
#******************************************** Program state independent logic
filepathSlash = '\\' if isWindowsOS() else '/'
#******************************************** Object that contains program state
| 33.848315
| 137
| 0.601494
|
import sys, os, platform
import ocr_image_analyzer as OCR
try:
import PIL.Image
import PIL.ImageTk
except ModuleNotFoundError:
print('Required libraries not found, please install PIL')
if __name__ == "__main__":
raise Exception('Cannot be called as main script')
debug = True
#******************************************** Program state independent logic
def isWindowsOS():
return platform.system == "Windows"
filepathSlash = '\\' if isWindowsOS() else '/'
def scaleImage(imgObj, width, height):
imgWidth, imgHeight = imgObj.size
smallestOutDim = min(width, height)
largestInDim = max(imgObj.size)
imageScale = smallestOutDim/largestInDim
newWidth = (int)(imageScale * imgWidth)
newHeight = (int)(imageScale * imgHeight)
imgObj = imgObj.resize((newWidth,newHeight), PIL.Image.ANTIALIAS)
offsetX = (int)(abs(width - newWidth)/2)
offsetY = (int)(abs(height - newHeight)/2)
background = PIL.Image.new('RGBA', (width, height), (255, 0, 0, 0))
foreground = imgObj.convert('RGBA')
background.paste(foreground, (offsetX, offsetY), foreground)
return background
#******************************************** Object that contains program state
class PrgStateCtrl:
_execPath = os.path.dirname(os.path.realpath(__file__))
_resSubFolderPath = "\\res" if isWindowsOS() else "/res"
_imgSubFolderPath = "\\images" if isWindowsOS() else "/images"
_modelName = ""
_modelPath = _execPath + _resSubFolderPath + filepathSlash
_imgName = ""
_imgPath = _execPath + _resSubFolderPath + _imgSubFolderPath
_outputFileName = "results"
_outputFilePath = _execPath
_cachedImage = PIL.Image.new('RGBA', (400, 550), (0,0,0,0))
_modelSet = False
_imageLoaded = False
_outputIsValid = False
_currentOutputImageIdx = 0
_currentOutputImages = []
_currentOutputImageLabels = []
_currentOutputText = ""
def __init__(self):
if(debug):
self._imgName = "perry3.png"
self._modelName = "handwriting_v1.model"
self.SetModel(self._modelName)
self.LoadImage(self._imgName)
def GetImagePath(self):
return self._execPath + self._resSubFolderPath + self._imgSubFolderPath + filepathSlash
def GetImageFullPath(self):
return self._execPath + self._resSubFolderPath + self._imgSubFolderPath + filepathSlash + self._imgName
def GetModelPath(self):
return self._modelPath
def GetModelFullPath(self):
return self._modelPath + self._modelName
def GetCachedImage(self):
return (self._cachedImage, "Cached Image")
def GetOutputPath(self):
return self._outputFilePath
def GetOutputName(self):
return self._outputFileName
def isValidImg(self, name):
try:
PIL.Image.open(self.GetImagePath() + name)
return True
except:
return False
def SetModel(self, modelName):
if OCR.modelIsValid(self.GetModelPath() + modelName):
self._modelName = modelName
self._modelSet = True
return True
else:
return False
def LoadImage(self, imageName):
if self.isValidImg(imageName):
self._imgName = imageName
self._cachedImage = scaleImage(PIL.Image.open(self.GetImagePath() + self._imgName), 400, 550)
self._imageLoaded = True
self._outputIsValid = False;
self._currentOutputImages.clear()
self._currentOutputImageLabels.clear()
self._currentOutputImageIdx = 0
return True
else:
return False
def PerformOCR(self):
self._currentOutputImages.clear()
self._currentOutputImageLabels.clear()
if self._modelSet and self._imageLoaded:
try:
self._currentOutputImageIdx = 0
self._currentOutputImages.append(self._cachedImage)
self._currentOutputImageLabels.append("Original")
text, images, imageLabels = OCR.analyzeImage(self.GetImageFullPath())
self._currentOutputText = ""
for c in text:
self._currentOutputText += c
for img in images:
img_pil = PIL.Image.fromarray(img)
scaledImg = scaleImage(img_pil,400,550)
self._currentOutputImages.append(scaledImg)
for label in imageLabels:
self._currentOutputImageLabels.append(label)
self._outputIsValid = True
except:
self._outputIsValid = False
def GetOutputText(self):
if self._outputIsValid:
return self._currentOutputText
else:
return ""
def GetNextOutputImage(self):
if self._outputIsValid:
if self._currentOutputImageIdx == len(self._currentOutputImages) - 1:
self._currentOutputImageIdx = 0
else:
self._currentOutputImageIdx += 1
return (self._currentOutputImages[self._currentOutputImageIdx], self._currentOutputImageLabels[self._currentOutputImageIdx])
else:
return (self._cachedImage, "Cached Image")
def GetPrevOutputImage(self):
if self._outputIsValid:
if self._currentOutputImageIdx == 0:
self._currentOutputImageIdx = len(self._currentOutputImages) - 1
else:
self._currentOutputImageIdx -= 1
return (self._currentOutputImages[self._currentOutputImageIdx], self._currentOutputImageLabels[self._currentOutputImageIdx])
else:
return (self._cachedImage, "Cached Image")
| 4,261
| 1,155
| 74
|
54692a530049e8154ae7b77f87421496fbec60bd
| 1,013
|
py
|
Python
|
02_assignment/toolbox/Toolbox_Python02450/Scripts/ex10_2_1.py
|
LukaAvbreht/ML_projects
|
8b36acdeb017ce8a57959c609b96111968852d5f
|
[
"MIT"
] | null | null | null |
02_assignment/toolbox/Toolbox_Python02450/Scripts/ex10_2_1.py
|
LukaAvbreht/ML_projects
|
8b36acdeb017ce8a57959c609b96111968852d5f
|
[
"MIT"
] | null | null | null |
02_assignment/toolbox/Toolbox_Python02450/Scripts/ex10_2_1.py
|
LukaAvbreht/ML_projects
|
8b36acdeb017ce8a57959c609b96111968852d5f
|
[
"MIT"
] | null | null | null |
# exercise 10.2.1
from matplotlib.pyplot import figure, show
from scipy.io import loadmat
from toolbox_02450 import clusterplot
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
# Load Matlab data file and extract variables of interest
mat_data = loadmat('../Data/synth1.mat')
X = mat_data['X']
y = mat_data['y'].squeeze()
attributeNames = [name[0] for name in mat_data['attributeNames'].squeeze()]
classNames = [name[0][0] for name in mat_data['classNames']]
N, M = X.shape
C = len(classNames)
# Perform hierarchical/agglomerative clustering on data matrix
Method = 'single'
Metric = 'euclidean'
Z = linkage(X, method=Method, metric=Metric)
# Compute and display clusters by thresholding the dendrogram
Maxclust = 4
cls = fcluster(Z, criterion='maxclust', t=Maxclust)
figure(1)
clusterplot(X, cls.reshape(cls.shape[0],1), y=y)
# Display dendrogram
max_display_levels=6
figure(2,figsize=(10,4))
dendrogram(Z, truncate_mode='level', p=max_display_levels)
show()
print('Ran Exercise 10.2.1')
| 28.138889
| 75
| 0.755183
|
# exercise 10.2.1
from matplotlib.pyplot import figure, show
from scipy.io import loadmat
from toolbox_02450 import clusterplot
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
# Load Matlab data file and extract variables of interest
mat_data = loadmat('../Data/synth1.mat')
X = mat_data['X']
y = mat_data['y'].squeeze()
attributeNames = [name[0] for name in mat_data['attributeNames'].squeeze()]
classNames = [name[0][0] for name in mat_data['classNames']]
N, M = X.shape
C = len(classNames)
# Perform hierarchical/agglomerative clustering on data matrix
Method = 'single'
Metric = 'euclidean'
Z = linkage(X, method=Method, metric=Metric)
# Compute and display clusters by thresholding the dendrogram
Maxclust = 4
cls = fcluster(Z, criterion='maxclust', t=Maxclust)
figure(1)
clusterplot(X, cls.reshape(cls.shape[0],1), y=y)
# Display dendrogram
max_display_levels=6
figure(2,figsize=(10,4))
dendrogram(Z, truncate_mode='level', p=max_display_levels)
show()
print('Ran Exercise 10.2.1')
| 0
| 0
| 0
|
67bd1f4cc17ab520aed02257323fdafae66bc88e
| 2,002
|
py
|
Python
|
modules/network/PrefabGateOne.py
|
Jumpscale/rsal9
|
e7ff7638ca53dafe872ce3030a379e8b65cb4831
|
[
"Apache-2.0"
] | 1
|
2017-06-07T08:11:57.000Z
|
2017-06-07T08:11:57.000Z
|
modules/network/PrefabGateOne.py
|
Jumpscale/rsal9
|
e7ff7638ca53dafe872ce3030a379e8b65cb4831
|
[
"Apache-2.0"
] | 106
|
2017-05-10T18:16:31.000Z
|
2019-09-18T15:09:07.000Z
|
modules/network/PrefabGateOne.py
|
Jumpscale/rsal9
|
e7ff7638ca53dafe872ce3030a379e8b65cb4831
|
[
"Apache-2.0"
] | 5
|
2018-01-26T16:11:52.000Z
|
2018-08-22T15:12:52.000Z
|
from js9 import j
app = j.tools.prefab._getBaseAppClass()
| 26
| 131
| 0.584915
|
from js9 import j
app = j.tools.prefab._getBaseAppClass()
class PrefabGateOne(app):
NAME = "gateone"
def build(self, reset=False):
"""
Build Gateone
:param reset: reset build if already built before
:return:
"""
if self.doneCheck("build", reset):
return
self.prefab.tools.git.pullRepo("https://github.com/liftoff/GateOne", branch="master")
self.doneSet('build')
def install(self, reset=False):
"""
Installs gateone
@param reset: boolean: forces the install operation.
"""
if reset is False and self.isInstalled():
return
cmd = """
cd /opt/code/github/liftoff/GateOne
apt-get install build-essential python3-dev python3-setuptools python3-pip -y
pip3 install tornado==4.5.3
python3 setup.py install
cp /usr/local/bin/gateone $BINDIR/gateone
ln -s /usr/bin/python3 /usr/bin/python
"""
self.prefab.core.run(cmd)
self.prefab.system.ssh.keygen(name="id_rsa")
self.doneSet('install')
def start(self, name="main", address="localhost", port=10443):
"""
Starts gateone.
@param name: str: instance name.
@param address: str: bind address.
@param port: int: port number.
"""
cmd = "eval `ssh-agent -s` ssh-add /root/.ssh/id_rsa && gateone --address={} --port={} --disable_ssl".format(address, port)
pm = self.prefab.system.processmanager.get()
pm.ensure(name='gateone_{}'.format(name), cmd=cmd)
def stop(self, name='main'):
"""
Stops gateone
"""
pm = self.prefab.system.processmanager.get()
pm.stop(name='gateone_{}'.format(name))
def restart(self, name="main"):
"""
Restart GateOne instance by name.
"""
self.stop(name)
self.start(name)
def reset(self):
"""
helper method to clean what this module generates.
"""
pass
| 0
| 1,919
| 23
|
d6055403ada75fdb58112230e49db04f73faeaa8
| 7,213
|
py
|
Python
|
scripts/slave/recipe_modules/skia/resources/trigger_wait_ct_task.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/skia/resources/trigger_wait_ct_task.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/skia/resources/trigger_wait_ct_task.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python utility that triggers and waits for tasks to complete on CTFE."""
import base64
import hashlib
import json
import math
import optparse
import requests
import sys
import time
CTFE_HOST = "https://ct.skia.org"
CTFE_QUEUE = CTFE_HOST + '/queue/'
CHROMIUM_PERF_TASK_POST_URI = CTFE_HOST + "/_/webhook_add_chromium_perf_task"
GET_CHROMIUM_PERF_RUN_STATUS_URI = CTFE_HOST + "/get_chromium_perf_run_status"
CHROMIUM_PERF_RUNS_HISTORY = CTFE_HOST + "/chromium_perf_runs/"
GCE_WEBHOOK_SALT_METADATA_URI = (
"http://metadata/computeMetadata/v1/project/attributes/"
"webhook_request_salt")
CTFE_CONNECTION_RETRIES = 5
CONNECTION_WAIT_BASE = 5
POLLING_FREQUENCY_SECS = 30 # 30 seconds.
TRYBOT_DEADLINE_SECS = 24 * 60 * 60 # 24 hours.
def retry():
"""A retry decorator with exponential backoff."""
return decorator
@retry()
@retry()
def _CreateTaskJSON(options):
"""Creates a JSON representation of the requested task."""
task_params = {}
task_params["username"] = options.requester
task_params["benchmark"] = options.benchmark
task_params["platform"] = "Linux"
task_params["page_sets"] = "10k"
task_params["repeat_runs"] = "3"
task_params["run_in_parallel"] = str(options.parallel)
task_params["benchmark_args"] = "--output-format=csv-pivot-table"
task_params["browser_args_nopatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
task_params["browser_args_withpatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
trybot_params = {}
trybot_params["issue"] = options.issue
trybot_params["patchset"] = options.patchset
trybot_params["task"] = task_params
return json.dumps(trybot_params)
def _GetWebhookSaltFromMetadata():
"""Gets webhook_request_salt from GCE's metadata server."""
headers = {"Metadata-Flavor": "Google"}
resp = requests.get(GCE_WEBHOOK_SALT_METADATA_URI, headers=headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (GCE_WEBHOOK_SALT_METADATA_URI,
resp.status_code))
return base64.standard_b64decode(resp.text)
def _TriggerTask(options):
"""Triggers the requested task on CTFE and returns the new task's ID."""
task = _CreateTaskJSON(options)
m = hashlib.sha512()
m.update(task)
m.update('notverysecret' if options.local else _GetWebhookSaltFromMetadata())
encoded = base64.standard_b64encode(m.digest())
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"X-Webhook-Auth-Hash": encoded}
resp = _AddTaskToCTFE(task, headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (CHROMIUM_PERF_TASK_POST_URI,
resp.status_code))
try:
ret = json.loads(resp.text)
except ValueError, e:
raise CtTrybotException(
'Did not get a JSON response from %s: %s' % (
CHROMIUM_PERF_TASK_POST_URI, e))
return ret["taskID"]
if '__main__' == __name__:
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--issue',
help='The Rietveld CL number to get the patch from.')
option_parser.add_option(
'', '--patchset',
help='The Rietveld CL patchset to use.')
option_parser.add_option(
'', '--requester',
help='Email address of the user who requested this run.')
option_parser.add_option(
'', '--benchmark',
help='The CT benchmark to run on the patch.')
option_parser.add_option(
'', '--parallel', default=False, action='store_true',
help='Whether to run this benchmark in parallel.')
option_parser.add_option(
'', '--local', default=False, action='store_true',
help='Uses a dummy metadata salt if this flag is true else it tries to '
'get the salt from GCE metadata.')
options, unused_args = option_parser.parse_args()
if (not options.issue or not options.patchset or not options.requester
or not options.benchmark):
option_parser.error('Must specify issue, patchset, requester and benchmark')
sys.exit(TriggerAndWait(options))
| 33.087156
| 80
| 0.674338
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python utility that triggers and waits for tasks to complete on CTFE."""
import base64
import hashlib
import json
import math
import optparse
import requests
import sys
import time
CTFE_HOST = "https://ct.skia.org"
CTFE_QUEUE = CTFE_HOST + '/queue/'
CHROMIUM_PERF_TASK_POST_URI = CTFE_HOST + "/_/webhook_add_chromium_perf_task"
GET_CHROMIUM_PERF_RUN_STATUS_URI = CTFE_HOST + "/get_chromium_perf_run_status"
CHROMIUM_PERF_RUNS_HISTORY = CTFE_HOST + "/chromium_perf_runs/"
GCE_WEBHOOK_SALT_METADATA_URI = (
"http://metadata/computeMetadata/v1/project/attributes/"
"webhook_request_salt")
CTFE_CONNECTION_RETRIES = 5
CONNECTION_WAIT_BASE = 5
POLLING_FREQUENCY_SECS = 30 # 30 seconds.
TRYBOT_DEADLINE_SECS = 24 * 60 * 60 # 24 hours.
class CtTrybotException(Exception):
pass
def retry():
"""A retry decorator with exponential backoff."""
def decorator(func):
def wrapper(*args, **kwargs):
tries = CTFE_CONNECTION_RETRIES
delay = CONNECTION_WAIT_BASE
while tries > 0:
try:
ret = func(*args, **kwargs)
return ret
except:
print >> sys.stderr, 'Failed to connect to CTFE.'
tries -= 1
if tries == 0:
raise
print 'Retry in %d seconds.' % delay
time.sleep(delay)
delay *= 2
return wrapper
return decorator
@retry()
def _AddTaskToCTFE(task, headers):
return requests.post(CHROMIUM_PERF_TASK_POST_URI, task, headers=headers)
@retry()
def _GetTaskStatusFromCTFE(get_url):
return requests.get(get_url)
def _CreateTaskJSON(options):
"""Creates a JSON representation of the requested task."""
task_params = {}
task_params["username"] = options.requester
task_params["benchmark"] = options.benchmark
task_params["platform"] = "Linux"
task_params["page_sets"] = "10k"
task_params["repeat_runs"] = "3"
task_params["run_in_parallel"] = str(options.parallel)
task_params["benchmark_args"] = "--output-format=csv-pivot-table"
task_params["browser_args_nopatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
task_params["browser_args_withpatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
trybot_params = {}
trybot_params["issue"] = options.issue
trybot_params["patchset"] = options.patchset
trybot_params["task"] = task_params
return json.dumps(trybot_params)
def _GetWebhookSaltFromMetadata():
"""Gets webhook_request_salt from GCE's metadata server."""
headers = {"Metadata-Flavor": "Google"}
resp = requests.get(GCE_WEBHOOK_SALT_METADATA_URI, headers=headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (GCE_WEBHOOK_SALT_METADATA_URI,
resp.status_code))
return base64.standard_b64decode(resp.text)
def _TriggerTask(options):
"""Triggers the requested task on CTFE and returns the new task's ID."""
task = _CreateTaskJSON(options)
m = hashlib.sha512()
m.update(task)
m.update('notverysecret' if options.local else _GetWebhookSaltFromMetadata())
encoded = base64.standard_b64encode(m.digest())
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"X-Webhook-Auth-Hash": encoded}
resp = _AddTaskToCTFE(task, headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (CHROMIUM_PERF_TASK_POST_URI,
resp.status_code))
try:
ret = json.loads(resp.text)
except ValueError, e:
raise CtTrybotException(
'Did not get a JSON response from %s: %s' % (
CHROMIUM_PERF_TASK_POST_URI, e))
return ret["taskID"]
def TriggerAndWait(options):
task_id = _TriggerTask(options)
print
print 'Task %s has been successfull scheduled on CTFE (%s).' % (
task_id, CHROMIUM_PERF_RUNS_HISTORY)
print 'You will get an email once the task has been picked up by the server.'
print
print
# Now poll CTFE till the task completes or till deadline is hit.
time_started_polling = time.time()
while True:
if (time.time() - time_started_polling) > TRYBOT_DEADLINE_SECS:
raise CtTrybotException(
'Task did not complete in the deadline of %s seconds.' % (
TRYBOT_DEADLINE_SECS))
# Get the status of the task the trybot added.
get_url = '%s?task_id=%s' % (GET_CHROMIUM_PERF_RUN_STATUS_URI, task_id)
resp = _GetTaskStatusFromCTFE(get_url)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (GET_CHROMIUM_PERF_RUN_STATUS_URI,
resp.status_code))
try:
ret = json.loads(resp.text)
except ValueError, e:
raise CtTrybotException(
'Did not get a JSON response from %s: %s' % (get_url, e))
# Assert that the status is for the task we asked for.
assert int(ret["taskID"]) == int(task_id)
status = ret["status"]
if status == "Completed":
results = ret["resultsLink"]
print
print 'Your run was successfully completed.'
if results:
print 'The output of your run is available here: %s' % results
print
print '@@@STEP_LINK@%s@%s@@@' % ('CT Perf Results', results)
print
return 0
elif status == "Completed with failures":
print
raise CtTrybotException(
'Your run was completed with failures. Please check your email for '
'links to logs of the run.')
print ('The current status of the task %s is "%s". You can view the size '
'of the queue here: %s' % (task_id, status, CTFE_QUEUE))
print 'Checking again after %s seconds' % POLLING_FREQUENCY_SECS
print
time.sleep(POLLING_FREQUENCY_SECS)
if '__main__' == __name__:
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--issue',
help='The Rietveld CL number to get the patch from.')
option_parser.add_option(
'', '--patchset',
help='The Rietveld CL patchset to use.')
option_parser.add_option(
'', '--requester',
help='Email address of the user who requested this run.')
option_parser.add_option(
'', '--benchmark',
help='The CT benchmark to run on the patch.')
option_parser.add_option(
'', '--parallel', default=False, action='store_true',
help='Whether to run this benchmark in parallel.')
option_parser.add_option(
'', '--local', default=False, action='store_true',
help='Uses a dummy metadata salt if this flag is true else it tries to '
'get the salt from GCE metadata.')
options, unused_args = option_parser.parse_args()
if (not options.issue or not options.patchset or not options.requester
or not options.benchmark):
option_parser.error('Must specify issue, patchset, requester and benchmark')
sys.exit(TriggerAndWait(options))
| 2,643
| 21
| 114
|
98cde3ab65273da077819776204d130012f60c8c
| 189
|
py
|
Python
|
qvapay/v1/errors.py
|
jorgeajimenezl/qvapay-python
|
e3fa24ae1be858c65f6816af3d74dabba572e7f0
|
[
"MIT"
] | 15
|
2021-08-28T12:45:30.000Z
|
2022-02-09T23:41:43.000Z
|
qvapay/v1/errors.py
|
jorgeajimenezl/qvapay-python
|
e3fa24ae1be858c65f6816af3d74dabba572e7f0
|
[
"MIT"
] | 11
|
2021-08-30T20:30:37.000Z
|
2021-10-31T18:05:41.000Z
|
qvapay/v1/errors.py
|
jorgeajimenezl/qvapay-python
|
e3fa24ae1be858c65f6816af3d74dabba572e7f0
|
[
"MIT"
] | 6
|
2021-08-28T22:22:08.000Z
|
2022-03-07T19:53:09.000Z
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
| 21
| 55
| 0.783069
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class QvaPayError(Exception):
status_code: int
status_message: Optional[str] = field(default=None)
| 0
| 85
| 22
|
3da55060c919b50297a4552fec6032bcf0f5ca39
| 1,605
|
py
|
Python
|
flask_appify/ext/appengine.py
|
RealKinetic/flask-appify
|
3bfc649ab96fca92abeb03bd37b976f3de7dbbc3
|
[
"Apache-2.0"
] | null | null | null |
flask_appify/ext/appengine.py
|
RealKinetic/flask-appify
|
3bfc649ab96fca92abeb03bd37b976f3de7dbbc3
|
[
"Apache-2.0"
] | null | null | null |
flask_appify/ext/appengine.py
|
RealKinetic/flask-appify
|
3bfc649ab96fca92abeb03bd37b976f3de7dbbc3
|
[
"Apache-2.0"
] | null | null | null |
import functools
from flask_appify.util import request_wants_json
try:
from google.appengine.api import datastore_errors
except ImportError:
# not on the AppEngine platform
datastore_errors = None
__all__ = [
'init_app',
]
html_response = {
503: """<!doctype html><html><head><title>503 Service
Unavailable</title></head><body><h1>Service Unavailable</h1><p>Service is
temporarily unavailable. Please try again later.</p></body></html>"""
}
json_response = {
503: """{"status":"error","code":503,"message":"Service is
temporarily unavailable. Please try again later."}"""
}
def handle_temp_error(app, err):
"""
This is a Flask `errorhandler` handling `datastore_errors.InternalError`.
According to `https://cloud.google.com/appengine/docs/standard/python/
datastore/exceptions` this exception does not necessarily mean that the
underlying operation failed.
:param app: The flask app that received the error.
:param err: The exception that was raised.
"""
response = app.make_response(
json_response[503] if request_wants_json() else html_response[503]
)
response.status_code = 503
return response
| 26.75
| 77
| 0.699688
|
import functools
from flask_appify.util import request_wants_json
try:
from google.appengine.api import datastore_errors
except ImportError:
# not on the AppEngine platform
datastore_errors = None
__all__ = [
'init_app',
]
html_response = {
503: """<!doctype html><html><head><title>503 Service
Unavailable</title></head><body><h1>Service Unavailable</h1><p>Service is
temporarily unavailable. Please try again later.</p></body></html>"""
}
json_response = {
503: """{"status":"error","code":503,"message":"Service is
temporarily unavailable. Please try again later."}"""
}
def handle_temp_error(app, err):
"""
This is a Flask `errorhandler` handling `datastore_errors.InternalError`.
According to `https://cloud.google.com/appengine/docs/standard/python/
datastore/exceptions` this exception does not necessarily mean that the
underlying operation failed.
:param app: The flask app that received the error.
:param err: The exception that was raised.
"""
response = app.make_response(
json_response[503] if request_wants_json() else html_response[503]
)
response.status_code = 503
return response
def init_app(app):
if datastore_errors:
app.errorhandler(datastore_errors.InternalError)(
functools.partial(handle_temp_error, app)
)
app.errorhandler(datastore_errors.Timeout)(
functools.partial(handle_temp_error, app)
)
app.errorhandler(datastore_errors.TransactionFailedError)(
functools.partial(handle_temp_error, app)
)
| 391
| 0
| 23
|
e70169b20bf89f034aa2f15f946cacbc5c045ef2
| 1,146
|
py
|
Python
|
my_receipts/apps/receipts/tests/test_parsers.py
|
Tvrsch/my_receipts
|
0a905d2366cee3b62f2dd083af1622afeb648bef
|
[
"MIT"
] | null | null | null |
my_receipts/apps/receipts/tests/test_parsers.py
|
Tvrsch/my_receipts
|
0a905d2366cee3b62f2dd083af1622afeb648bef
|
[
"MIT"
] | 11
|
2021-05-10T15:40:38.000Z
|
2022-02-28T21:09:37.000Z
|
my_receipts/apps/receipts/tests/test_parsers.py
|
Tvrsch/my_receipts
|
0a905d2366cee3b62f2dd083af1622afeb648bef
|
[
"MIT"
] | 2
|
2021-09-18T18:49:58.000Z
|
2021-11-17T12:29:13.000Z
|
from datetime import datetime
from pathlib import Path
import pytest
from my_receipts.apps.receipts.parsers import TaxcomParser
CURRENT_DIR = Path(__file__).resolve(strict=True).parent
pytestmark = pytest.mark.django_db
| 30.157895
| 82
| 0.711169
|
from datetime import datetime
from pathlib import Path
import pytest
from my_receipts.apps.receipts.parsers import TaxcomParser
CURRENT_DIR = Path(__file__).resolve(strict=True).parent
pytestmark = pytest.mark.django_db
def test_taxcom(requests_mock):
with open(CURRENT_DIR / "taxcom_page.html", encoding="utf8") as fp:
html = fp.read()
requests_mock.get(TaxcomParser.base_url, text=html)
parser = TaxcomParser("00000000-0000-0000-0000-000000000000")
assert parser.get_html() == html
assert parser.shop_name == 'ООО "Лента"'
assert parser.shop_itn == 7814148471
assert parser.shop_address == "630083, Новосибирск, ул.Большевистская, д.52/1"
assert parser.terminal_number == 29
assert parser.shift == 204
assert parser.cashier == "Оператор"
assert parser.receipt_number == 59
assert parser.receipt_created_dt == datetime(2021, 2, 17, 12, 13)
assert parser.receipt_sum == 1196.00
assert len(parser.items) == 16
item = parser.items[2]
assert item.name == "Пакет ЛЕНТА майка 9кг"
assert item.price == 3.49
assert item.amount == 1.0
assert item.sum == 3.49
| 959
| 0
| 23
|
3f3179753f11234dd6a4245ab4afb96624ed7961
| 341
|
py
|
Python
|
django_fastapi/api/migrations/0005_rename_id_login_id2.py
|
ehddn5252/FastAPI_Django
|
a179aedb62c28d1700578882e681002a61576060
|
[
"MIT"
] | null | null | null |
django_fastapi/api/migrations/0005_rename_id_login_id2.py
|
ehddn5252/FastAPI_Django
|
a179aedb62c28d1700578882e681002a61576060
|
[
"MIT"
] | null | null | null |
django_fastapi/api/migrations/0005_rename_id_login_id2.py
|
ehddn5252/FastAPI_Django
|
a179aedb62c28d1700578882e681002a61576060
|
[
"MIT"
] | 1
|
2021-11-26T08:22:57.000Z
|
2021-11-26T08:22:57.000Z
|
# Generated by Django 3.2.9 on 2021-11-18 17:50
from django.db import migrations
| 17.947368
| 47
| 0.557185
|
# Generated by Django 3.2.9 on 2021-11-18 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0004_login_id'),
]
operations = [
migrations.RenameField(
model_name='login',
old_name='id',
new_name='id2',
),
]
| 0
| 235
| 23
|
0d241de29929eca30aa84aa2c82ddd0f71d7d6f2
| 1,049
|
py
|
Python
|
projects/ide/sublime/src/Bolt/api/command/bolt_listener.py
|
boltjs/bolt
|
c2666c876b34b1a61486a432eef3141ca8d1e411
|
[
"BSD-3-Clause"
] | 11
|
2015-09-29T19:19:34.000Z
|
2020-11-20T09:14:46.000Z
|
projects/ide/sublime/src/Bolt/api/command/bolt_listener.py
|
boltjs/bolt
|
c2666c876b34b1a61486a432eef3141ca8d1e411
|
[
"BSD-3-Clause"
] | null | null | null |
projects/ide/sublime/src/Bolt/api/command/bolt_listener.py
|
boltjs/bolt
|
c2666c876b34b1a61486a432eef3141ca8d1e411
|
[
"BSD-3-Clause"
] | null | null | null |
import sublime
import sublime_plugin
from structs.thread_handler import *
from api.inspect import highlighting
from lookup import file_type as lookup_file_type
| 26.225
| 83
| 0.633937
|
import sublime
import sublime_plugin
from structs.thread_handler import *
from api.inspect import highlighting
from lookup import file_type as lookup_file_type
class BoltListener(sublime_plugin.EventListener):
def __init__(self):
self.thread = None
def init(t):
self.thread = t
def failure(exc):
self.thread = None
def success(result):
self.thread = None
self.handler = ThreadHandler(init, success, failure)
def _update_highlight(self, view):
def run():
if lookup_file_type.is_bolt_module(view) and highlighting.is_enabled():
if (self.thread == None):
highlighting.run(view, self.handler)
else:
highlighting.clear(view)
return run
def on_modified(self, view):
rate = highlighting.get_rate()
sublime.set_timeout(self._update_highlight(view), rate)
def on_activated(self, view):
sublime.set_timeout(self._update_highlight(view), 0)
| 728
| 28
| 131
|
d9a539681a7a683b2742f4354908b361aaacc82b
| 18
|
py
|
Python
|
tmc-langs/tests/data/some_course/PythonExercise/src/__init__.py
|
Robustic/tmc-langs-rust
|
fd7d689a5f898a728787123966b8a5d8eb0f0c5b
|
[
"Apache-2.0",
"MIT"
] | 7
|
2021-11-16T06:01:41.000Z
|
2022-03-30T21:09:14.000Z
|
tmc-langs/tests/data/some_course/PythonExercise/src/__init__.py
|
Robustic/tmc-langs-rust
|
fd7d689a5f898a728787123966b8a5d8eb0f0c5b
|
[
"Apache-2.0",
"MIT"
] | 110
|
2020-05-04T13:44:28.000Z
|
2022-03-09T12:21:40.000Z
|
tmc-langs/tests/data/some_course/PythonExercise/src/__init__.py
|
Robustic/tmc-langs-rust
|
fd7d689a5f898a728787123966b8a5d8eb0f0c5b
|
[
"Apache-2.0",
"MIT"
] | 9
|
2020-05-05T03:05:53.000Z
|
2021-04-29T13:13:52.000Z
|
from src import *
| 9
| 17
| 0.722222
|
from src import *
| 0
| 0
| 0
|
70cb51c5cefbf126f84ed0c92fb87ae557ed0c84
| 2,072
|
py
|
Python
|
grocery_store/grocery_store/grocery_auth/views.py
|
DeanDupalov/my_project
|
3346f940a246441c46188c20d1ddfbd13a883928
|
[
"MIT"
] | null | null | null |
grocery_store/grocery_store/grocery_auth/views.py
|
DeanDupalov/my_project
|
3346f940a246441c46188c20d1ddfbd13a883928
|
[
"MIT"
] | null | null | null |
grocery_store/grocery_store/grocery_auth/views.py
|
DeanDupalov/my_project
|
3346f940a246441c46188c20d1ddfbd13a883928
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
from grocery_store.grocery_auth.forms import SignInForm, SignUpForm
from grocery_store.product.models import Category
from grocery_store.profiles.forms import ProfileForm, ProfileAddressForm
@login_required
| 28
| 72
| 0.644305
|
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
from grocery_store.grocery_auth.forms import SignInForm, SignUpForm
from grocery_store.product.models import Category
from grocery_store.profiles.forms import ProfileForm, ProfileAddressForm
class RegisterView(TemplateView):
template_name = 'grocery/auth/sign_up.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = SignUpForm()
context['profile_form'] = ProfileForm()
context['categories'] = Category.objects.all()
return context
@transaction.atomic
def post(self, request):
form = SignUpForm(request.POST)
profile_form = ProfileForm(request.POST)
if form.is_valid() and profile_form.is_valid():
user = form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
address = ProfileAddressForm().save(commit=False)
address.profile = profile
address.save()
login(request, user)
return redirect('landing page')
context = {
'categories': Category.objects.all(),
'form': SignUpForm(),
'profile_form': ProfileForm()
}
return render(request, 'grocery/auth/sign_up.html', context)
def sign_in(request):
if request.method == 'POST':
form = SignInForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect('landing page')
else:
form = SignInForm()
context = {
'categories': Category.objects.all(),
'form': form,
}
return render(request, 'grocery/auth/sign_in.html', context)
@login_required
def sign_out(request):
logout(request)
return redirect('landing page')
| 1,422
| 138
| 68
|
037795ac1abedf72121969ab98a1ef082062b78c
| 6,589
|
py
|
Python
|
data/Luminometrics/htbayes.py
|
ericmjl/protein-systematic-characterization
|
3ac44d672380490d8e602aa024e40009fdf306b0
|
[
"MIT"
] | null | null | null |
data/Luminometrics/htbayes.py
|
ericmjl/protein-systematic-characterization
|
3ac44d672380490d8e602aa024e40009fdf306b0
|
[
"MIT"
] | 44
|
2016-08-31T14:58:13.000Z
|
2017-04-07T19:01:56.000Z
|
data/Luminometrics/htbayes.py
|
ericmjl/protein-systematic-characterization
|
3ac44d672380490d8e602aa024e40009fdf306b0
|
[
"MIT"
] | 1
|
2016-08-31T14:33:35.000Z
|
2016-08-31T14:33:35.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 17:56:58 2016
@author: Vivian Zhong
"""
import click
import pymc3 as pm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import logging
@click.command()
@click.option('--filename', default='data.csv',
help='File name of the data in CSV format.')
@click.option('--output_col', default='output',
help='Name of column that contains data.')
@click.option('--sample_col', default='sample_name',
help='Name of column that contains sample names.')
@click.option('--baseline_name', default='control',
help='Name of positive control in sample names column.')
@click.option('--n_steps', default=300000,
help='Number of iterations for ADVI.')
class BEST(object):
"""BEST Model, based on Kruschke (2013).
Parameters
----------
data : pandas DataFrame
A pandas dataframe which has the following data:
- Each row is one replicate measurement.
- There is a column that records the treatment name.
- There is a column that records the measured value for that replicate.
sample_col : str
The name of the column containing sample names.
output_col : str
The name of the column containing values to estimate.
baseline_name : str
The name of the "control" or "baseline".
Output
------
model : PyMC3 model
Returns the BEST model containing
"""
def _convert_to_indices(self):
"""
Adds the "indices" column to self.data (DataFrame). This is necessary
for the simplified model specification in the "fit" function below.
"""
sample_names = dict()
for i, name in enumerate(
list(np.unique(self.data[self.sample_col].values))):
logging.info('Sample name {0} has the index {1}'.format(name, i))
sample_names[name] = i
self.data['indices'] = self.data[self.sample_col].apply(
lambda x: sample_names[x])
def fit(self, n_steps=50000):
"""
Creates a Bayesian Estimation model for replicate measurements of
treatment(s) vs. control.
Parameters
----------
n_steps : int
The number of steps to run ADVI.
"""
sample_names = set(self.data[self.sample_col].values)
# mean_test = self.data.groupby('indices').mean()[self.output_col].values
# sd_test = self.data.groupby('indices').std()[self.output_col].values
# print(mean_test, sd_test)
with pm.Model() as model:
# Hyperpriors
# upper = pm.Exponential('upper', lam=0.05)
nu = pm.Exponential('nu_minus_one', 1/29.) + 1
# "fold", which is the estimated fold change.
fold = pm.Flat('fold', shape=len(sample_names))
# Assume that data have heteroskedastic (i.e. variable) error but
# are drawn from the same HalfCauchy distribution.
sigma = pm.HalfCauchy('sigma', beta=1, shape=len(sample_names))
# Model prediction
mu = fold[self.data['indices']]
sig = sigma[self.data['indices']]
# Data likelihood
like = pm.StudentT('like', nu=nu, mu=mu, sd=sig**-2,
observed=self.data[self.output_col])
# Sample from posterior
v_params = pm.variational.advi(n=n_steps)
start = pm.variational.sample_vp(v_params, 1)[0]
cov = np.power(model.dict_to_array(v_params.stds), 2)
step = pm.NUTS(scaling=cov, is_cov=True)
logging.info('Starting MCMC sampling')
trace = pm.sample(step=step, start=start, draws=2000)
self.trace = trace
self.model = model
def plot_posterior(self, rotate_xticks=False):
"""
Plots a swarm plot of the data overlaid on top of the 95% HPD and IQR
of the posterior distribution.
"""
# Make summary plot #
fig = plt.figure()
ax = fig.add_subplot(111)
# 1. Get the lower error and upper errorbars for 95% HPD and IQR.
lower, lower_q, upper_q, upper = np.percentile(self.trace['fold'][500:],
[2.5, 25, 75, 97.5],
axis=0)
summary_stats = pd.DataFrame()
summary_stats['mean'] = self.trace['fold'].mean(axis=0)
err_low = summary_stats['mean'] - lower
err_high = upper - summary_stats['mean']
iqr_low = summary_stats['mean'] - lower_q
iqr_high = upper_q - summary_stats['mean']
# 2. Plot the swarmplot and errorbars.
summary_stats['mean'].plot(ls='', ax=ax,
yerr=[err_low, err_high])
summary_stats['mean'].plot(ls='', ax=ax,
yerr=[iqr_low, iqr_high],
elinewidth=4, color='red')
sns.swarmplot(data=self.data, x=self.sample_col, y=self.output_col,
ax=ax, alpha=0.5)
if rotate_xticks:
logging.info('rotating xticks')
plt.xticks(rotation='vertical')
plt.ylabel(self.output_col)
return fig, ax
def plot_elbo(self):
"""
Plots the ELBO values to help check for convergence.
"""
fig = plt.figure()
plt.plot(-np.log10(-self.params.elbo_vals))
return fig
if __name__ == '__main__':
main()
| 35.616216
| 81
| 0.591137
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 17:56:58 2016
@author: Vivian Zhong
"""
import click
import pymc3 as pm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import logging
@click.command()
@click.option('--filename', default='data.csv',
help='File name of the data in CSV format.')
@click.option('--output_col', default='output',
help='Name of column that contains data.')
@click.option('--sample_col', default='sample_name',
help='Name of column that contains sample names.')
@click.option('--baseline_name', default='control',
help='Name of positive control in sample names column.')
@click.option('--n_steps', default=300000,
help='Number of iterations for ADVI.')
def main(filename, sample_col, baseline_name, n_steps, output_col):
data = load_data(filename)
# data, sample_names = convert_to_indices(data, sample_col)
# data = data.sort_values(by='indices')
# model = build_model(sample_names, data, baseline_name, output_col)
# trace = run_model(model, n_steps)
# plot_diagrams(trace, filename, baseline_name, output_col,
# data, sample_col)
b = BEST(data, sample_col, output_col, baseline_name)
b.fit()
b.plot_posterior()
def load_data(filename):
data = pd.read_csv(filename)
return data
class BEST(object):
"""BEST Model, based on Kruschke (2013).
Parameters
----------
data : pandas DataFrame
A pandas dataframe which has the following data:
- Each row is one replicate measurement.
- There is a column that records the treatment name.
- There is a column that records the measured value for that replicate.
sample_col : str
The name of the column containing sample names.
output_col : str
The name of the column containing values to estimate.
baseline_name : str
The name of the "control" or "baseline".
Output
------
model : PyMC3 model
Returns the BEST model containing
"""
def __init__(self, data, sample_col, output_col, baseline_name):
super(BEST, self).__init__()
self.data = data.sort_values(by=sample_col)
self.sample_col = sample_col
self.output_col = output_col
self.baseline_name = baseline_name
self.trace = None
self._convert_to_indices()
def _convert_to_indices(self):
"""
Adds the "indices" column to self.data (DataFrame). This is necessary
for the simplified model specification in the "fit" function below.
"""
sample_names = dict()
for i, name in enumerate(
list(np.unique(self.data[self.sample_col].values))):
logging.info('Sample name {0} has the index {1}'.format(name, i))
sample_names[name] = i
self.data['indices'] = self.data[self.sample_col].apply(
lambda x: sample_names[x])
def fit(self, n_steps=50000):
"""
Creates a Bayesian Estimation model for replicate measurements of
treatment(s) vs. control.
Parameters
----------
n_steps : int
The number of steps to run ADVI.
"""
sample_names = set(self.data[self.sample_col].values)
# mean_test = self.data.groupby('indices').mean()[self.output_col].values
# sd_test = self.data.groupby('indices').std()[self.output_col].values
# print(mean_test, sd_test)
with pm.Model() as model:
# Hyperpriors
# upper = pm.Exponential('upper', lam=0.05)
nu = pm.Exponential('nu_minus_one', 1/29.) + 1
# "fold", which is the estimated fold change.
fold = pm.Flat('fold', shape=len(sample_names))
# Assume that data have heteroskedastic (i.e. variable) error but
# are drawn from the same HalfCauchy distribution.
sigma = pm.HalfCauchy('sigma', beta=1, shape=len(sample_names))
# Model prediction
mu = fold[self.data['indices']]
sig = sigma[self.data['indices']]
# Data likelihood
like = pm.StudentT('like', nu=nu, mu=mu, sd=sig**-2,
observed=self.data[self.output_col])
# Sample from posterior
v_params = pm.variational.advi(n=n_steps)
start = pm.variational.sample_vp(v_params, 1)[0]
cov = np.power(model.dict_to_array(v_params.stds), 2)
step = pm.NUTS(scaling=cov, is_cov=True)
logging.info('Starting MCMC sampling')
trace = pm.sample(step=step, start=start, draws=2000)
self.trace = trace
self.model = model
def plot_posterior(self, rotate_xticks=False):
"""
Plots a swarm plot of the data overlaid on top of the 95% HPD and IQR
of the posterior distribution.
"""
# Make summary plot #
fig = plt.figure()
ax = fig.add_subplot(111)
# 1. Get the lower error and upper errorbars for 95% HPD and IQR.
lower, lower_q, upper_q, upper = np.percentile(self.trace['fold'][500:],
[2.5, 25, 75, 97.5],
axis=0)
summary_stats = pd.DataFrame()
summary_stats['mean'] = self.trace['fold'].mean(axis=0)
err_low = summary_stats['mean'] - lower
err_high = upper - summary_stats['mean']
iqr_low = summary_stats['mean'] - lower_q
iqr_high = upper_q - summary_stats['mean']
# 2. Plot the swarmplot and errorbars.
summary_stats['mean'].plot(ls='', ax=ax,
yerr=[err_low, err_high])
summary_stats['mean'].plot(ls='', ax=ax,
yerr=[iqr_low, iqr_high],
elinewidth=4, color='red')
sns.swarmplot(data=self.data, x=self.sample_col, y=self.output_col,
ax=ax, alpha=0.5)
if rotate_xticks:
logging.info('rotating xticks')
plt.xticks(rotation='vertical')
plt.ylabel(self.output_col)
return fig, ax
def plot_elbo(self):
"""
Plots the ELBO values to help check for convergence.
"""
fig = plt.figure()
plt.plot(-np.log10(-self.params.elbo_vals))
return fig
def summary_stats(self):
return pm.summary_df(self.trace)
if __name__ == '__main__':
main()
| 900
| 0
| 98
|
6192fb7a57daf01ebdb8110a9977c021054793ab
| 150
|
py
|
Python
|
python/armstrong.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | 1
|
2020-11-09T19:32:43.000Z
|
2020-11-09T19:32:43.000Z
|
python/armstrong.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | null | null | null |
python/armstrong.py
|
lukasjoc/random
|
5be080b424f02491fb219634902fc0cc192aff6c
|
[
"0BSD"
] | null | null | null |
#!/usr/local/bin/python3
if __name__ == "__main__":
arm(2)
| 13.636364
| 28
| 0.586667
|
#!/usr/local/bin/python3
def arm(number int) -> bool:
for d in number:
print(d)
return False
if __name__ == "__main__":
arm(2)
| 62
| 0
| 23
|
1772f57bf5430988d5b8a92657ecfe7e16777a86
| 4,324
|
py
|
Python
|
main_2.py
|
NathanMaton/stargan_single_image
|
b46837d1dd618bdd68ea4813d2a051df736e7399
|
[
"MIT"
] | null | null | null |
main_2.py
|
NathanMaton/stargan_single_image
|
b46837d1dd618bdd68ea4813d2a051df736e7399
|
[
"MIT"
] | 8
|
2020-11-13T18:52:25.000Z
|
2022-02-10T01:59:43.000Z
|
main_2.py
|
NathanMaton/stargan_single_image
|
b46837d1dd618bdd68ea4813d2a051df736e7399
|
[
"MIT"
] | null | null | null |
'''
TO-DO:
GET BETTER RESULTS
CLEAN UP THIS CODE TO GET THE SINGLE PROCESSING EASIER TO USE
REDO AS A PR TO ORIGINAL REPO
'''
import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
# celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
# config.celeba_crop_size, config.image_size, config.batch_size,
# 'CelebA', config.mode, config.num_workers)
if __name__ == '__main__':
config = Alt_config()
celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
config.celeba_crop_size, config.image_size, config.batch_size,
'CelebA', config.mode, config.num_workers)
solver = Solver(celeba_loader, None, config)
solver.test_single()
main(config)
| 30.027778
| 100
| 0.624191
|
'''
TO-DO:
GET BETTER RESULTS
CLEAN UP THIS CODE TO GET THE SINGLE PROCESSING EASIER TO USE
REDO AS A PR TO ORIGINAL REPO
'''
import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
# celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
# config.celeba_crop_size, config.image_size, config.batch_size,
# 'CelebA', config.mode, config.num_workers)
class Alt_config():
def __init__(self):
# self.dataset = 'CelebA'
# self.celeba_image_dir
# self.attr_path
# self.selected_attrs
# self.celeba_crop_size
# self.image_size
# self.batch_size
# self.mode
# self.num_workers
# Model configuration.
self.c_dim=5
self.c2_dim=8
self.celeba_crop_size=178
self.rafd_crop_size=256
self.image_size=128
self.g_conv_dim=64
self.d_conv_dim=64
self.g_repeat_num=6
self.d_repeat_num=6
self.lambda_cls=1
self.lambda_rec=10
self.lambda_gp=10
# Training configuration.
self.dataset='CelebA'
self.batch_size=16
self.num_iters=200000
self.num_iters_decay=100000
self.g_lr=0.0001
self.d_lr=0.0001
self.n_critic=5
self.beta1=0.5
self.beta2=0.999
self.resume_iters=None
self.selected_attrs=['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young']
# Test configuration.
self.test_iters=200000
# Miscellaneous.
self.num_workers=1
self.mode='test'
self.use_tensorboard=True
# Directories.
self.celeba_image_dir='data/celeb_test_custom/images'
self.attr_path='data/celeb_test_custom/list_attr_celeba.txt'
self.rafd_image_dir='data/RaFD/train'
self.log_dir='stargan/logs'
self.model_save_dir='stargan_celeba_256/models'
self.sample_dir='stargan/samples'
self.result_dir='stargan/test_results'
# Step size.
self.log_step=10
self.sample_step=1000
self.model_save_step=10000
self.lr_update_step=1000
def str2bool(v):
return v.lower() in ('true')
def main(config):
# For fast training.
cudnn.benchmark = True
# Create directories if not exist.
if not os.path.exists(config.log_dir):
os.makedirs(config.log_dir)
if not os.path.exists(config.model_save_dir):
os.makedirs(config.model_save_dir)
if not os.path.exists(config.sample_dir):
os.makedirs(config.sample_dir)
if not os.path.exists(config.result_dir):
os.makedirs(config.result_dir)
# Data loader.
celeba_loader = None
rafd_loader = None
if config.dataset in ['CelebA', 'Both']:
celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
config.celeba_crop_size, config.image_size, config.batch_size,
'CelebA', config.mode, config.num_workers)
if config.dataset in ['RaFD', 'Both']:
rafd_loader = get_loader(config.rafd_image_dir, None, None,
config.rafd_crop_size, config.image_size, config.batch_size,
'RaFD', config.mode, config.num_workers)
# Solver for training and testing StarGAN.
solver = Solver(celeba_loader, rafd_loader, config)
if config.mode == 'train':
if config.dataset in ['CelebA', 'RaFD']:
solver.train()
elif config.dataset in ['Both']:
solver.train_multi()
elif config.mode == 'test':
if config.dataset in ['CelebA', 'RaFD']:
solver.test()
elif config.dataset in ['Both']:
solver.test_multi()
if __name__ == '__main__':
config = Alt_config()
celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
config.celeba_crop_size, config.image_size, config.batch_size,
'CelebA', config.mode, config.num_workers)
solver = Solver(celeba_loader, None, config)
solver.test_single()
main(config)
| 3,304
| -2
| 95
|
1ef202fcfbe1e539d7edcd862acda06dba044d6d
| 1,600
|
py
|
Python
|
openchem/layers/gcn.py
|
jmhayesesq/Open-Chem
|
e612d5cd471079c64e61ceda946c3dc7cf095bd8
|
[
"MIT"
] | 466
|
2018-08-10T04:58:42.000Z
|
2022-03-29T07:04:39.000Z
|
openchem/layers/gcn.py
|
jmhayesesq/Open-Chem
|
e612d5cd471079c64e61ceda946c3dc7cf095bd8
|
[
"MIT"
] | 15
|
2019-08-06T08:23:11.000Z
|
2022-01-23T22:39:52.000Z
|
openchem/layers/gcn.py
|
jmhayesesq/Open-Chem
|
e612d5cd471079c64e61ceda946c3dc7cf095bd8
|
[
"MIT"
] | 91
|
2018-08-19T00:37:50.000Z
|
2022-02-27T11:15:31.000Z
|
# modified from https://github.com/tkipf/pygcn/blob/master/pygcn/layers.py
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
| 33.333333
| 77
| 0.625625
|
# modified from https://github.com/tkipf/pygcn/blob/master/pygcn/layers.py
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.bn = nn.BatchNorm1d(out_features)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj):
support = torch.bmm(adj, x)
result = torch.mm(support.view(-1, self.in_features), self.weight)
output = result.view(-1, adj.data.shape[1], self.out_features)
if self.bias is not None:
output = output + self.bias
output = output.transpose(1, 2).contiguous()
output = self.bn(output)
output = output.transpose(1, 2)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| 1,208
| 0
| 107
|
f4ff0fcd3bec384fcf5b2cfb85a770e9589fe089
| 80
|
py
|
Python
|
py/authdata-example.py
|
flopp/safari
|
d7c8fca838cecbba738a547a8852bf40abed0409
|
[
"MIT"
] | 1
|
2018-12-09T19:37:05.000Z
|
2018-12-09T19:37:05.000Z
|
py/authdata-example.py
|
flopp/safari
|
d7c8fca838cecbba738a547a8852bf40abed0409
|
[
"MIT"
] | 9
|
2015-02-14T20:41:08.000Z
|
2015-07-25T10:32:35.000Z
|
py/authdata-example.py
|
flopp/safari
|
d7c8fca838cecbba738a547a8852bf40abed0409
|
[
"MIT"
] | null | null | null |
OC_OKAPI_KEY = "xxx"
OC_USERNAME = "xxx"
OC_PASSWORD = "xxx"
OC_QUERYID = "xxx"
| 16
| 20
| 0.7
|
OC_OKAPI_KEY = "xxx"
OC_USERNAME = "xxx"
OC_PASSWORD = "xxx"
OC_QUERYID = "xxx"
| 0
| 0
| 0
|
2bcd8e707519ec5796351e2ec38632750a8b4827
| 2,164
|
py
|
Python
|
tests/system/data_sources/test_teradata.py
|
ajw0100/professional-services-data-validator
|
b1dc82adf92adf19702f5ef41590c62c7c128c74
|
[
"Apache-2.0"
] | 167
|
2021-05-27T19:43:43.000Z
|
2022-03-16T02:42:30.000Z
|
tests/system/data_sources/test_teradata.py
|
ajw0100/professional-services-data-validator
|
b1dc82adf92adf19702f5ef41590c62c7c128c74
|
[
"Apache-2.0"
] | 110
|
2021-05-27T14:49:09.000Z
|
2022-03-31T11:10:41.000Z
|
tests/system/data_sources/test_teradata.py
|
ajw0100/professional-services-data-validator
|
b1dc82adf92adf19702f5ef41590c62c7c128c74
|
[
"Apache-2.0"
] | 32
|
2021-06-23T22:00:59.000Z
|
2022-03-30T03:32:20.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from data_validation import data_validation, consts
TERADATA_PASSWORD = os.getenv("TERADATA_PASSWORD")
TERADATA_HOST = os.getenv("TERADATA_HOST")
PROJECT_ID = os.getenv("PROJECT_ID")
conn = {
"source_type": "Teradata",
"host": TERADATA_HOST,
"user_name": "udf",
"password": TERADATA_PASSWORD,
"port": 1025,
}
TERADATA_CONFIG = {
# Specific Connection Config
consts.CONFIG_SOURCE_CONN: conn,
consts.CONFIG_TARGET_CONN: conn,
# Validation Type
consts.CONFIG_TYPE: "Column",
# Configuration Required Depending on Validator Type
consts.CONFIG_SCHEMA_NAME: "Sys_Calendar",
consts.CONFIG_TABLE_NAME: "CALENDAR",
consts.CONFIG_AGGREGATES: [
{
consts.CONFIG_TYPE: "count",
consts.CONFIG_SOURCE_COLUMN: "year_of_calendar",
consts.CONFIG_TARGET_COLUMN: "year_of_calendar",
consts.CONFIG_FIELD_ALIAS: "count",
},
],
consts.CONFIG_FORMAT: "table",
consts.CONFIG_FILTERS: [
{
consts.CONFIG_TYPE: consts.FILTER_TYPE_EQUALS,
consts.CONFIG_FILTER_SOURCE_COLUMN: "year_of_calendar",
consts.CONFIG_FILTER_SOURCE_VALUE: 2010,
consts.CONFIG_FILTER_TARGET_COLUMN: "year_of_calendar",
consts.CONFIG_FILTER_TARGET_VALUE: 2010,
},
],
}
| 32.298507
| 77
| 0.700555
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from data_validation import data_validation, consts
TERADATA_PASSWORD = os.getenv("TERADATA_PASSWORD")
TERADATA_HOST = os.getenv("TERADATA_HOST")
PROJECT_ID = os.getenv("PROJECT_ID")
conn = {
"source_type": "Teradata",
"host": TERADATA_HOST,
"user_name": "udf",
"password": TERADATA_PASSWORD,
"port": 1025,
}
TERADATA_CONFIG = {
# Specific Connection Config
consts.CONFIG_SOURCE_CONN: conn,
consts.CONFIG_TARGET_CONN: conn,
# Validation Type
consts.CONFIG_TYPE: "Column",
# Configuration Required Depending on Validator Type
consts.CONFIG_SCHEMA_NAME: "Sys_Calendar",
consts.CONFIG_TABLE_NAME: "CALENDAR",
consts.CONFIG_AGGREGATES: [
{
consts.CONFIG_TYPE: "count",
consts.CONFIG_SOURCE_COLUMN: "year_of_calendar",
consts.CONFIG_TARGET_COLUMN: "year_of_calendar",
consts.CONFIG_FIELD_ALIAS: "count",
},
],
consts.CONFIG_FORMAT: "table",
consts.CONFIG_FILTERS: [
{
consts.CONFIG_TYPE: consts.FILTER_TYPE_EQUALS,
consts.CONFIG_FILTER_SOURCE_COLUMN: "year_of_calendar",
consts.CONFIG_FILTER_SOURCE_VALUE: 2010,
consts.CONFIG_FILTER_TARGET_COLUMN: "year_of_calendar",
consts.CONFIG_FILTER_TARGET_VALUE: 2010,
},
],
}
def test_count_validator():
validator = data_validation.DataValidation(TERADATA_CONFIG, verbose=True)
df = validator.execute()
assert int(df["source_agg_value"][0]) > 0
assert df["source_agg_value"][0] == df["target_agg_value"][0]
| 225
| 0
| 23
|
b770869444f6fb03f601c5c9ff6f8b90df966db3
| 2,065
|
py
|
Python
|
tf/mnist_test.py
|
skolchin/gbr
|
3ec4b72e0352d36f38f5cd5815b69fac0b7a3e9c
|
[
"MIT"
] | 29
|
2019-10-10T22:51:55.000Z
|
2022-03-09T05:57:59.000Z
|
tf/mnist_test.py
|
skolchin/gbr
|
3ec4b72e0352d36f38f5cd5815b69fac0b7a3e9c
|
[
"MIT"
] | 1
|
2020-12-07T06:51:50.000Z
|
2020-12-08T16:59:20.000Z
|
tf/mnist_test.py
|
skolchin/gbr
|
3ec4b72e0352d36f38f5cd5815b69fac0b7a3e9c
|
[
"MIT"
] | 11
|
2020-12-09T01:44:38.000Z
|
2022-03-20T17:40:02.000Z
|
#-------------------------------------------------------------------------------
# Name: MNIST TensorFlow example
# Purpose: Experiments with TensorFlow
#
# Author: kol
#
# Created: 09.01.2020
# Copyright: (c) kol 2020
#-------------------------------------------------------------------------------
import tensorflow as tf
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
from random import randrange
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
if Path('./mnist.m').exists():
print("Loading pre-trained model")
model = tf.keras.models.load_model('mnist.m')
else:
print("Training the model")
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.save('./mnist.m')
model.evaluate(x_test, y_test, verbose=2)
predictions = model.predict(x_test)
max_count = 10
num_rows = 5
fig = plt.figure(figsize=(8,4))
for i in range(max_count):
n = randrange(0, predictions.shape[0]-1)
fig.add_subplot(num_rows, max_count / num_rows, i+1)
plot_image(predictions[n], y_test[n], x_test[n])
if i >= max_count-1:
break
plt.tight_layout()
plt.show()
| 27.533333
| 80
| 0.582567
|
#-------------------------------------------------------------------------------
# Name: MNIST TensorFlow example
# Purpose: Experiments with TensorFlow
#
# Author: kol
#
# Created: 09.01.2020
# Copyright: (c) kol 2020
#-------------------------------------------------------------------------------
import tensorflow as tf
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
from random import randrange
def plot_image(predictions, true_label, img):
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(predicted_label,
100*np.max(predictions),
true_label),
color = color)
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
if Path('./mnist.m').exists():
print("Loading pre-trained model")
model = tf.keras.models.load_model('mnist.m')
else:
print("Training the model")
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.save('./mnist.m')
model.evaluate(x_test, y_test, verbose=2)
predictions = model.predict(x_test)
max_count = 10
num_rows = 5
fig = plt.figure(figsize=(8,4))
for i in range(max_count):
n = randrange(0, predictions.shape[0]-1)
fig.add_subplot(num_rows, max_count / num_rows, i+1)
plot_image(predictions[n], y_test[n], x_test[n])
if i >= max_count-1:
break
plt.tight_layout()
plt.show()
| 446
| 0
| 23
|
e40e9d7f20ea238e67e6d5c2573d672e7aa05b32
| 3,321
|
py
|
Python
|
ivy_mech_tests/test_orientation/orientation_data.py
|
unifyai/mech
|
d678c8732ee5aba4a92fb37b96519cd06553c0c6
|
[
"Apache-2.0"
] | 1
|
2021-10-11T17:58:41.000Z
|
2021-10-11T17:58:41.000Z
|
ivy_mech_tests/test_orientation/orientation_data.py
|
ivy-dl/mech
|
f3ce5f7b38fe2c453a066f58019ff84dcee517a6
|
[
"Apache-2.0"
] | null | null | null |
ivy_mech_tests/test_orientation/orientation_data.py
|
ivy-dl/mech
|
f3ce5f7b38fe2c453a066f58019ff84dcee517a6
|
[
"Apache-2.0"
] | null | null | null |
"""
test data for orientation functions
"""
# global
import numpy as np
| 40.5
| 109
| 0.563686
|
"""
test data for orientation functions
"""
# global
import numpy as np
class OrientationTestData:
def __init__(self):
# axis
axis = np.array([[1., 2., 3.]])
self.axis = axis / np.linalg.norm(axis)
self.batched_axis = np.expand_dims(self.axis, 0)
# angle
self.angle = np.array([[np.pi / 3]])
self.batched_angle = np.expand_dims(self.angle, 0)
# rotation vector
self.rotation_vector = self.axis * self.angle
self.batched_rotation_vector = np.expand_dims(self.rotation_vector, 0)
# axis angle
self.axis_angle = np.concatenate((self.axis, self.angle), -1)
self.batched_axis_angle = np.expand_dims(self.axis_angle, 0)
# polar axis angle
theta = np.arccos(self.axis[:, 2:3])
phi = np.arctan2(self.axis[:, 1:2], self.axis[:, 0:1])
self.polar_axis_angle = np.concatenate((theta, phi, self.angle), -1)
self.batched_polar_axis_angle = np.expand_dims(self.polar_axis_angle, 0)
# quaternion
n = np.cos(self.angle / 2)
e1 = np.sin(self.angle / 2) * self.axis[:, 0:1]
e2 = np.sin(self.angle / 2) * self.axis[:, 1:2]
e3 = np.sin(self.angle / 2) * self.axis[:, 2:3]
self.quaternion = np.concatenate((e1, e2, e3, n), -1)
self.batched_quaternion = np.expand_dims(self.quaternion, 0)
# rotation matrix
a = np.expand_dims(self.quaternion[:, 3:4], -1)
b = np.expand_dims(self.quaternion[:, 0:1], -1)
c = np.expand_dims(self.quaternion[:, 1:2], -1)
d = np.expand_dims(self.quaternion[:, 2:3], -1)
top_left = a ** 2 + b ** 2 - c ** 2 - d ** 2
top_middle = 2 * b * c - 2 * a * d
top_right = 2 * b * d + 2 * a * c
middle_left = 2 * b * c + 2 * a * d
middle_middle = a ** 2 - b ** 2 + c ** 2 - d ** 2
middle_right = 2 * c * d - 2 * a * b
bottom_left = 2 * b * d - 2 * a * c
bottom_middle = 2 * c * d + 2 * a * b
bottom_right = a ** 2 - b ** 2 - c ** 2 + d ** 2
top_row = np.concatenate((top_left, top_middle, top_right), -1)
middle_row = np.concatenate((middle_left, middle_middle, middle_right), -1)
bottom_row = np.concatenate((bottom_left, bottom_middle, bottom_right), -1)
self.rotation_matrix = np.concatenate((top_row, middle_row, bottom_row), -2)
self.batched_rotation_matrix = np.expand_dims(self.rotation_matrix, 0)
# euler
x_angle = np.arctan2(self.rotation_matrix[:, 1:2, 2:3], self.rotation_matrix[:, 2:3, 2:3])
c2 = np.sqrt(self.rotation_matrix[:, 0:1, 0:1] ** 2 + self.rotation_matrix[:, 0:1, 1:2] ** 2)
y_angle = np.arctan2(-self.rotation_matrix[:, 0:1, 2:3], c2)
s1 = np.sin(x_angle)
c1 = np.cos(x_angle)
z_angle = np.arctan2(s1 * self.rotation_matrix[:, 2:3, 0:1] - c1 *
self.rotation_matrix[:, 1:2, 0:1],
c1 * self.rotation_matrix[:, 1:2, 1:2] - s1 * self.rotation_matrix[:, 2:3, 1:2])
x_angle = -x_angle[:, :, 0]
y_angle = -y_angle[:, :, 0]
z_angle = -z_angle[:, :, 0]
self.euler_angles = np.concatenate((z_angle, y_angle, x_angle), 1)
self.batched_euler_angles = np.expand_dims(self.euler_angles, 0)
| 3,192
| 5
| 50
|
4ac0c80fe1d17851b9d6ffc3e6a8d25148170948
| 137
|
py
|
Python
|
titan/api_pkg/formtype/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
titan/api_pkg/formtype/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
titan/api_pkg/formtype/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from moonleap import Resource
@dataclass
| 13.7
| 33
| 0.766423
|
from dataclasses import dataclass
from moonleap import Resource
@dataclass
class FormType(Resource):
name: str
type_name: str
| 0
| 37
| 22
|
3374968f5ffa11f472f968bd304b1ae949911373
| 1,805
|
py
|
Python
|
drf_integrations/utils.py
|
yoyowallet/drf-integrations-framework
|
7cf5cd28e5aff80c9b1a34b461294f4bd3108fa9
|
[
"MIT"
] | 1
|
2020-07-09T11:39:19.000Z
|
2020-07-09T11:39:19.000Z
|
drf_integrations/utils.py
|
yoyowallet/drf-integrations-framework
|
7cf5cd28e5aff80c9b1a34b461294f4bd3108fa9
|
[
"MIT"
] | 5
|
2020-07-08T11:00:26.000Z
|
2021-01-13T09:33:09.000Z
|
drf_integrations/utils.py
|
yoyowallet/drf-integrations-framework
|
7cf5cd28e5aff80c9b1a34b461294f4bd3108fa9
|
[
"MIT"
] | 2
|
2021-08-12T12:23:54.000Z
|
2021-09-20T06:45:38.000Z
|
from typing import Any, Iterable, Iterator, List, Optional, Union
AnyString = Union[str, Iterable[Any]]
def split_string(string: Optional[AnyString], separator: str = ",") -> List[str]:
"""
Breaks given *string* by the specified *separator*.
If *string* is a non-``str`` iterable, then return a list if it is not already.
>>> split_string('A, B, C') # Str
['A', 'B', 'C']
>>> split_string(['A', 'B', 'C']) # List, a non-str iterable
['A', 'B', 'C']
>>> split_string(('A', 'B', 'C')) # Tuple, a non-str iterable
['A', 'B', 'C']
"""
return list(iter_split_string(string=string, separator=separator))
def iter_split_string(string: Optional[AnyString], separator: str = ",") -> Iterator[str]:
"""Generator version of :func:`split_string`."""
if string is None:
return
elif isinstance(string, str):
parts = str(string).split(separator)
for part in parts:
part = part.strip()
if part:
yield part
elif isinstance(string, Iterable):
# NOTE: Text is also an Iterable, so this should always be after the Text check.
for part in string:
part = str(part).strip()
if part:
yield part
else:
raise TypeError("Cannot split string of {!r}".format(type(string)))
def is_instance_of_all(obj, classes: Iterable[type]) -> bool:
"""
Returns ``True`` if the ``obj`` argument is an instance of all of the
classes in the ``classes`` argument.
:raises TypeError: If any element of classes is not a type.
"""
if any(not isinstance(classinfo, type) for classinfo in classes):
raise TypeError("classes must contain types")
return all(isinstance(obj, classinfo) for classinfo in classes)
| 31.12069
| 90
| 0.60831
|
from typing import Any, Iterable, Iterator, List, Optional, Union
AnyString = Union[str, Iterable[Any]]
def split_string(string: Optional[AnyString], separator: str = ",") -> List[str]:
"""
Breaks given *string* by the specified *separator*.
If *string* is a non-``str`` iterable, then return a list if it is not already.
>>> split_string('A, B, C') # Str
['A', 'B', 'C']
>>> split_string(['A', 'B', 'C']) # List, a non-str iterable
['A', 'B', 'C']
>>> split_string(('A', 'B', 'C')) # Tuple, a non-str iterable
['A', 'B', 'C']
"""
return list(iter_split_string(string=string, separator=separator))
def iter_split_string(string: Optional[AnyString], separator: str = ",") -> Iterator[str]:
"""Generator version of :func:`split_string`."""
if string is None:
return
elif isinstance(string, str):
parts = str(string).split(separator)
for part in parts:
part = part.strip()
if part:
yield part
elif isinstance(string, Iterable):
# NOTE: Text is also an Iterable, so this should always be after the Text check.
for part in string:
part = str(part).strip()
if part:
yield part
else:
raise TypeError("Cannot split string of {!r}".format(type(string)))
def is_instance_of_all(obj, classes: Iterable[type]) -> bool:
"""
Returns ``True`` if the ``obj`` argument is an instance of all of the
classes in the ``classes`` argument.
:raises TypeError: If any element of classes is not a type.
"""
if any(not isinstance(classinfo, type) for classinfo in classes):
raise TypeError("classes must contain types")
return all(isinstance(obj, classinfo) for classinfo in classes)
| 0
| 0
| 0
|
861242ea84f9b5b122ec9897c57092bfc93096a8
| 22,227
|
py
|
Python
|
pymtl/tools/translation/verilator_cffi.py
|
belang/pymtl
|
4a96738724b007cbd684753aed0ac3de5b5dbebb
|
[
"BSD-3-Clause"
] | 206
|
2015-01-05T21:53:56.000Z
|
2022-03-14T08:04:49.000Z
|
pymtl/tools/translation/verilator_cffi.py
|
belang/pymtl
|
4a96738724b007cbd684753aed0ac3de5b5dbebb
|
[
"BSD-3-Clause"
] | 84
|
2015-01-25T19:57:33.000Z
|
2021-05-11T15:46:56.000Z
|
pymtl/tools/translation/verilator_cffi.py
|
belang/pymtl
|
4a96738724b007cbd684753aed0ac3de5b5dbebb
|
[
"BSD-3-Clause"
] | 99
|
2015-02-17T17:43:44.000Z
|
2022-02-14T17:58:18.000Z
|
#=======================================================================
# verilator_cffi.py
#=======================================================================
from __future__ import print_function
import os
import shutil
import verilog_structural
from ...tools.simulation.vcd import get_vcd_timescale
from subprocess import check_output, STDOUT, CalledProcessError
from ...model.signals import InPort, OutPort
from ...model.PortBundle import PortBundle
from exceptions import VerilatorCompileError
#-----------------------------------------------------------------------
# verilog_to_pymtl
#-----------------------------------------------------------------------
# Create a PyMTL compatible interface for Verilog HDL.
#-----------------------------------------------------------------------
# verilate_model
#-----------------------------------------------------------------------
# Convert Verilog HDL into a C++ simulator using Verilator.
# http://www.veripool.org/wiki/verilator
#-----------------------------------------------------------------------
# create_c_wrapper
#-----------------------------------------------------------------------
# Generate a C wrapper file for Verilated C++.
#-----------------------------------------------------------------------
# create_shared_lib
#-----------------------------------------------------------------------
# Compile the cpp wrapper into a shared library.
#
# Verilator suggests:
#
# For best performance, run Verilator with the "-O3 --x-assign=fast
# --noassert" flags. The -O3 flag will require longer compile times, and
# --x-assign=fast may increase the risk of reset bugs in trade for
# performance; see the above documentation for these flags.
#
# Minor Verilog code changes can also give big wins. You should not have
# any UNOPTFLAT warnings from Verilator. Fixing these warnings can
# result in huge improvements; one user fixed their one UNOPTFLAT
# warning by making a simple change to a clock latch used to gate clocks
# and gained a 60% performance improvement.
#
# Beyond that, the performance of a Verilated model depends mostly on
# your C++ compiler and size of your CPU's caches.
#
# By default, the lib/verilated.mk file has optimization
# turned off. This is for the benefit of new users, as it improves
# compile times at the cost of runtimes. To add optimization as the
# default, set one of three variables, OPT, OPT_FAST, or OPT_SLOW
# lib/verilated.mk. Or, use the -CFLAGS and/or -LDFLAGS option on the
# verilator command line to pass the flags directly to the compiler or
# linker. Or, just for one run, pass them on the command line to make:
#
# make OPT_FAST="-O2" -f Vour.mk Vour__ALL.a
# OPT_FAST specifies optimizations for those programs that are part of
# the fast path, mostly code that is executed every cycle. OPT_SLOW
# specifies optimizations for slow-path files (plus tracing), which
# execute only rarely, yet take a long time to compile with optimization
# on. OPT specifies overall optimization and affects all compiles,
# including those OPT_FAST and OPT_SLOW affect. For best results, use
# OPT="-O2", and link with "-static". Nearly the same results can be had
# with much better compile times with OPT_FAST="-O1 -fstrict-aliasing".
# Higher optimization such as "-O3" may help, but gcc compile times may
# be excessive under O3 on even medium sized designs. Alternatively,
# some larger designs report better performance using "-Os".
#
# http://www.veripool.org/projects/verilator/wiki/Manual-verilator
# I have added a new feature which compiles all of the standard Verilator
# code into a static library and then simply links this in. This reduces
# compile times.
#-----------------------------------------------------------------------
# create_verilator_py_wrapper
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# get_indices
#-----------------------------------------------------------------------
# Utility function for determining assignment of wide ports
#-----------------------------------------------------------------------
# set_input_stmt
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# set_output_stmt
#-----------------------------------------------------------------------
# TODO: no way to distinguish between combinational and sequential
# outputs, so we set outputs both ways...
# This seems broken, but I can't think of a better way.
#-----------------------------------------------------------------------
# verilator_mangle
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# pymtl_wrapper_from_ports
#-----------------------------------------------------------------------
| 33.728376
| 88
| 0.587394
|
#=======================================================================
# verilator_cffi.py
#=======================================================================
from __future__ import print_function
import os
import shutil
import verilog_structural
from ...tools.simulation.vcd import get_vcd_timescale
from subprocess import check_output, STDOUT, CalledProcessError
from ...model.signals import InPort, OutPort
from ...model.PortBundle import PortBundle
from exceptions import VerilatorCompileError
#-----------------------------------------------------------------------
# verilog_to_pymtl
#-----------------------------------------------------------------------
# Create a PyMTL compatible interface for Verilog HDL.
def verilog_to_pymtl( model, verilog_file, c_wrapper_file,
lib_file, py_wrapper_file, vcd_en, lint, verilator_xinit ):
model_name = model.class_name
try:
vlinetrace = model.vlinetrace
except AttributeError:
vlinetrace = False
# Verilate the model # TODO: clean this up
verilate_model( verilog_file, model_name, vcd_en, lint )
# Add names to ports of module
for port in model.get_ports():
port.verilog_name = verilog_structural.mangle_name( port.name )
port.verilator_name = verilator_mangle( port.verilog_name )
# Create C++ Wrapper
cdefs = create_c_wrapper( model, c_wrapper_file, vcd_en, vlinetrace, verilator_xinit )
# Create Shared C Library
create_shared_lib( model_name, c_wrapper_file, lib_file,
vcd_en, vlinetrace )
# Create PyMTL wrapper for CFFI interface to Verilated model
create_verilator_py_wrapper( model, py_wrapper_file, lib_file,
cdefs, vlinetrace )
#-----------------------------------------------------------------------
# verilate_model
#-----------------------------------------------------------------------
# Convert Verilog HDL into a C++ simulator using Verilator.
# http://www.veripool.org/wiki/verilator
def verilate_model( filename, model_name, vcd_en, lint ):
# verilator commandline template
compile_cmd = ( 'verilator -cc {source} -top-module {model_name} '
'--Mdir {obj_dir} -O3 {flags}' )
# verilator commandline options
source = filename
obj_dir = 'obj_dir_' + model_name
flags = ' '.join([
'-Wno-lint' if not lint else '',
'-Wno-UNOPTFLAT',
'--unroll-count 1000000',
'--unroll-stmts 1000000',
'--assert',
'--trace' if vcd_en else '',
])
# remove the obj_dir because issues with staleness
if os.path.exists( obj_dir ):
shutil.rmtree( obj_dir )
# create the verilator compile command
compile_cmd = compile_cmd.format( **vars() )
# try compilation
try:
# print( compile_cmd )
result = check_output( compile_cmd, stderr=STDOUT, shell=True )
# print( result )
# handle verilator failure
except CalledProcessError as e:
# error_msg = """
# Module did not Verilate!
#
# Command:
# {command}
#
# Error:
# {error}
# """
# We remove the final "Error: Command Failed" line to make the output
# more succinct.
split_output = e.output.splitlines()
error = '\n'.join(split_output[:-1])
if not split_output[-1].startswith("%Error: Command Failed"):
error += "\n"+split_output[-1]
error_msg = """
See "Errors and Warnings" section in the manual located here
http://www.veripool.org/projects/verilator/wiki/Manual-verilator
for more details on various Verilator warnings and error messages.
{error}"""
raise VerilatorCompileError( error_msg.format(
command = e.cmd,
error = error
))
#-----------------------------------------------------------------------
# create_c_wrapper
#-----------------------------------------------------------------------
# Generate a C wrapper file for Verilated C++.
def create_c_wrapper( model, c_wrapper_file, vcd_en, vlinetrace, verilator_xinit ):
template_dir = os.path.dirname( os.path.abspath( __file__ ) )
template_filename = template_dir + os.path.sep + 'verilator_wrapper.templ.c'
ports = model.get_ports()
# Utility function for creating port declarations
def port_to_decl( port ):
code = '{data_type} * {verilator_name};'
verilator_name = port.verilator_name
bitwidth = port.nbits
if bitwidth <= 8: data_type = 'unsigned char'
elif bitwidth <= 16: data_type = 'unsigned short'
elif bitwidth <= 32: data_type = 'unsigned int'
elif bitwidth <= 64: data_type = 'unsigned long'
else: data_type = 'unsigned int'
return code.format( **locals() )
# Utility function for creating port initializations
def port_to_init( port ):
code = 'm->{verilator_name} = {dereference}model->{verilator_name};'
verilator_name = port.verilator_name
bitwidth = port.nbits
dereference = '&' if bitwidth <= 64 else ''
return code.format( **locals() )
# Create port declaration, initialization, and extern statements
indent_zero = '\n'
indent_two = '\n '
indent_four = '\n '
indent_six = '\n '
port_externs = indent_two .join( [ port_to_decl( x ) for x in ports ] )
port_decls = indent_zero.join( [ port_to_decl( x ) for x in ports ] )
port_inits = indent_two .join( [ port_to_init( x ) for x in ports ] )
# Convert verilator_xinit to number
if ( verilator_xinit == "zeros" ) : verilator_xinit_num = 0
elif ( verilator_xinit == "ones" ) : verilator_xinit_num = 1
elif ( verilator_xinit == "rand" ) : verilator_xinit_num = 2
else : print( "Not valid choice" )
# Generate the source code using the template
with open( template_filename , 'r' ) as template, \
open( c_wrapper_file, 'w' ) as output:
c_src = template.read()
c_src = c_src.format( model_name = model.class_name,
port_externs = port_externs,
port_decls = port_decls,
port_inits = port_inits,
# What was this for? -cbatten
# vcd_prefix = vcd_file[:-4],
vcd_timescale = get_vcd_timescale( model ),
dump_vcd = '1' if vcd_en else '0',
vlinetrace = '1' if vlinetrace else '0',
verilator_xinit_num = verilator_xinit_num,
)
output.write( c_src )
return port_decls.replace( indent_zero, indent_six )
#-----------------------------------------------------------------------
# create_shared_lib
#-----------------------------------------------------------------------
# Compile the cpp wrapper into a shared library.
#
# Verilator suggests:
#
# For best performance, run Verilator with the "-O3 --x-assign=fast
# --noassert" flags. The -O3 flag will require longer compile times, and
# --x-assign=fast may increase the risk of reset bugs in trade for
# performance; see the above documentation for these flags.
#
# Minor Verilog code changes can also give big wins. You should not have
# any UNOPTFLAT warnings from Verilator. Fixing these warnings can
# result in huge improvements; one user fixed their one UNOPTFLAT
# warning by making a simple change to a clock latch used to gate clocks
# and gained a 60% performance improvement.
#
# Beyond that, the performance of a Verilated model depends mostly on
# your C++ compiler and size of your CPU's caches.
#
# By default, the lib/verilated.mk file has optimization
# turned off. This is for the benefit of new users, as it improves
# compile times at the cost of runtimes. To add optimization as the
# default, set one of three variables, OPT, OPT_FAST, or OPT_SLOW
# lib/verilated.mk. Or, use the -CFLAGS and/or -LDFLAGS option on the
# verilator command line to pass the flags directly to the compiler or
# linker. Or, just for one run, pass them on the command line to make:
#
# make OPT_FAST="-O2" -f Vour.mk Vour__ALL.a
# OPT_FAST specifies optimizations for those programs that are part of
# the fast path, mostly code that is executed every cycle. OPT_SLOW
# specifies optimizations for slow-path files (plus tracing), which
# execute only rarely, yet take a long time to compile with optimization
# on. OPT specifies overall optimization and affects all compiles,
# including those OPT_FAST and OPT_SLOW affect. For best results, use
# OPT="-O2", and link with "-static". Nearly the same results can be had
# with much better compile times with OPT_FAST="-O1 -fstrict-aliasing".
# Higher optimization such as "-O3" may help, but gcc compile times may
# be excessive under O3 on even medium sized designs. Alternatively,
# some larger designs report better performance using "-Os".
#
# http://www.veripool.org/projects/verilator/wiki/Manual-verilator
# I have added a new feature which compiles all of the standard Verilator
# code into a static library and then simply links this in. This reduces
# compile times.
def try_cmd( name, cmd ):
# print( "cmd: ", cmd )
try:
result = check_output( cmd.split() , stderr=STDOUT )
# handle gcc/llvm failure
except CalledProcessError as e:
error_msg = """
{name} error!
Command:
{command}
Error:
{error}
"""
raise Exception( error_msg.format(
name = name,
command = ' '.join( e.cmd ),
error = e.output
))
def compile( flags, include_dirs, output_file, input_files ):
compile_cmd = 'g++ {flags} {idirs} -o {ofile} {ifiles}'
compile_cmd = compile_cmd.format(
flags = flags,
idirs = ' '.join( [ '-I'+s for s in include_dirs ] ),
ofile = output_file,
ifiles = ' '.join( input_files ),
)
try_cmd( "Compilation", compile_cmd )
def make_lib( output_file, input_files ):
# First run ar command
ar_cmd = 'ar rcv {ofile} {ifiles}'
ar_cmd = ar_cmd.format(
ofile = output_file,
ifiles = ' '.join( input_files ),
)
try_cmd( "Make library", ar_cmd )
# Then run ranlib command
ranlib_cmd = 'ranlib {lib}'
ranlib_cmd = ranlib_cmd.format(
lib = output_file,
)
try_cmd( "Make library", ranlib_cmd )
def create_shared_lib( model_name, c_wrapper_file, lib_file,
vcd_en, vlinetrace ):
# We need to find out where the verilator include directories are
# globally installed. We first check the PYMTL_VERILATOR_INCLUDE_DIR
# environment variable, and if that does not exist then we fall back on
# using pkg-config.
verilator_include_dir = os.environ.get('PYMTL_VERILATOR_INCLUDE_DIR')
if verilator_include_dir is None:
cmd = ['pkg-config', '--variable=includedir', 'verilator']
try:
verilator_include_dir = check_output( cmd, stderr=STDOUT ).strip()
except OSError as e:
error_msg = """
Error trying to find verilator include directories. The
PYMTL_VERILATOR_INCLUDE_DIR environment variable was not set,
so we attempted to use pkg-config to find where verilator was
installed, but it looks like we had trouble finding or executing
pkg-config itself. Try running the following command on your own
to debug the issue.
Command:
{command}
Error:
[Errno {errno}] {strerror}
"""
raise VerilatorCompileError( error_msg.format(
command = ' '.join( cmd ),
errno = e.errno,
strerror = e.strerror,
))
except CalledProcessError as e:
error_msg = """
Error trying to find verilator include directories. The
PYMTL_VERILATOR_INCLUDE_DIR environment variable was not set,
so we attempted to use pkg-config to find where verilator was
installed, but it looks like pkg-config had trouble finding
the verilator.pc file installed by verilator. Is a recent
version of verilator installed? Older versions of verilator
did not have pkg-config support. Try running the following
command on your own to debug the issue.
Command:
{command}
Error:
{error}
"""
raise VerilatorCompileError( error_msg.format(
command = ' '.join( e.cmd ),
error = e.output,
))
include_dirs = [
verilator_include_dir,
verilator_include_dir+"/vltstd",
]
# Compile standard Verilator code if libverilator.a does not exist.
# Originally, I was also including verilated_dpi.cpp in this library,
# but for some reason that screws up line tracing. Somehow there is
# some kind of global state or something that is shared across the
# shared libraries or something. I was able to fix it by recompiling
# verilated_dpi if linetracing is enabled. Actually, the line tracing
# doesn't work -- if you use this line tracing approach, so we are back
# to always recomping everyting every time for now.
# if not os.path.exists( "libverilator.a" ):
#
# compile(
# flags = "-O3 -c",
# include_dirs = include_dirs,
# output_file = "verilator.o",
# input_files = [ verilator_include_dir+"/verilated.cpp" ]
# )
#
# compile(
# flags = "-O3 -c",
# include_dirs = include_dirs,
# output_file = "verilator_vcd_c.o",
# input_files = [ verilator_include_dir+"/verilated_vcd_c.cpp" ]
# )
#
# # compile(
# # flags = "-O3 -c",
# # include_dirs = include_dirs,
# # output_file = "verilator_dpi.o",
# # input_files = [ verilator_include_dir+"/verilated_dpi.cpp" ]
# # )
#
# make_lib(
# output_file = "libverilator.a",
# # input_files = [ "verilator.o", "verilator_vcd_c.o", "verilator_dpi.o" ]
# input_files = [ "verilator.o", "verilator_vcd_c.o" ]
# )
obj_dir_prefix = "obj_dir_{m}/V{m}".format( m=model_name )
# We need to find a list of all the generated classes. We look in the
# Verilator makefile for that.
cpp_sources_list = []
with open( obj_dir_prefix+"_classes.mk" ) as mkfile:
found = False
for line in mkfile:
if line.startswith("VM_CLASSES_FAST += "):
found = True
elif found:
if line.strip() == "":
found = False
else:
filename = line.strip()[:-2]
cpp_file = "obj_dir_{m}/{f}.cpp".format( m=model_name, f=filename )
cpp_sources_list.append( cpp_file )
# Compile this module
cpp_sources_list += [
obj_dir_prefix+"__Syms.cpp",
verilator_include_dir+"/verilated.cpp",
verilator_include_dir+"/verilated_dpi.cpp",
c_wrapper_file,
]
if vcd_en:
cpp_sources_list += [
verilator_include_dir+"/verilated_vcd_c.cpp",
obj_dir_prefix+"__Trace.cpp",
obj_dir_prefix+"__Trace__Slow.cpp",
]
compile(
# flags = "-O1 -fstrict-aliasing -fPIC -shared -L. -lverilator",
flags = "-O0 -fPIC -shared",
include_dirs = include_dirs,
output_file = lib_file,
input_files = cpp_sources_list,
)
#-----------------------------------------------------------------------
# create_verilator_py_wrapper
#-----------------------------------------------------------------------
def create_verilator_py_wrapper( model, wrapper_filename, lib_file,
cdefs, vlinetrace ):
template_dir = os.path.dirname( os.path.abspath( __file__ ) )
template_filename = template_dir + os.path.sep + 'verilator_wrapper.templ.py'
port_defs = []
set_inputs = []
set_comb = []
set_next = []
from cpp_helpers import recurse_port_hierarchy
for x in model.get_ports( preserve_hierarchy=True ):
recurse_port_hierarchy( x, port_defs )
for port in model.get_inports():
if port.name == 'clk': continue
input_ = set_input_stmt( port )
set_inputs.extend( input_ )
for port in model.get_outports():
comb, next_ = set_output_stmt( port )
set_comb.extend( comb )
set_next.extend( next_ )
# pretty printing
indent_four = '\n '
indent_six = '\n '
# create source
with open( template_filename , 'r' ) as template, \
open( wrapper_filename, 'w' ) as output:
py_src = template.read()
py_src = py_src.format(
model_name = model.class_name,
port_decls = cdefs,
lib_file = lib_file,
port_defs = indent_four.join( port_defs ),
set_inputs = indent_six .join( set_inputs ),
set_comb = indent_six .join( set_comb ),
set_next = indent_six .join( set_next ),
vlinetrace = '1' if vlinetrace else '0',
)
#py_src += 'XTraceEverOn()' # TODO: add for tracing?
output.write( py_src )
#print( py_src )
#-----------------------------------------------------------------------
# get_indices
#-----------------------------------------------------------------------
# Utility function for determining assignment of wide ports
def get_indices( port ):
num_assigns = 1 if port.nbits <= 64 else (port.nbits-1)/32 + 1
if num_assigns == 1:
return [(0, "")]
return [ ( i, '[{}:{}]'.format( i*32, min( i*32+32, port.nbits) ) )
for i in range(num_assigns) ]
#-----------------------------------------------------------------------
# set_input_stmt
#-----------------------------------------------------------------------
def set_input_stmt( port ):
inputs = []
for idx, offset in get_indices( port ):
inputs.append( 's._m.{v_name}[{idx}] = s.{py_name}{offset}' \
.format( v_name = port.verilator_name,
py_name = port.name,
idx = idx,
offset = offset )
)
return inputs
#-----------------------------------------------------------------------
# set_output_stmt
#-----------------------------------------------------------------------
# TODO: no way to distinguish between combinational and sequential
# outputs, so we set outputs both ways...
# This seems broken, but I can't think of a better way.
def set_output_stmt( port ):
comb, next_ = [], []
for idx, offset in get_indices( port ):
assign = 's.{py_name}{offset}.{sigtype} = s._m.{v_name}[{idx}]' \
.format( v_name = port.verilator_name,
py_name = port.name,
idx = idx,
offset = offset,
sigtype = '{sigtype}' )
comb .append( assign.format( sigtype = 'value' ) )
next_.append( assign.format( sigtype = 'next' ) )
return comb, next_
#-----------------------------------------------------------------------
# verilator_mangle
#-----------------------------------------------------------------------
def verilator_mangle( signal_name ):
return signal_name.replace( '__', '___05F' ).replace( '$', '__024' )
#-----------------------------------------------------------------------
# pymtl_wrapper_from_ports
#-----------------------------------------------------------------------
def pymtl_wrapper_from_ports( in_ports, out_ports, model_name, filename_w,
vobj_name, xobj_name, cdefs ):
# Declare the interface ports for the wrapper class.
port_defs = []
for ports, port_type in [( in_ports, 'InPort' ), ( out_ports, 'OutPort' )]:
# Replace all references to _M_ with .
ports = [ ( name.replace('_M_', '.'), nbits ) for name, nbits in ports ]
lists = []
for port_name, bitwidth in ports:
# Port List
if '$' in port_name:
pfx = port_name.split('$')[0]
if pfx not in lists:
lists.append(pfx)
port_defs.append( 's.{} = []'.format( pfx ) )
port_defs.append( 's.{port_list}.append( {port_type}( {bitwidth} ) )' \
.format( port_list = pfx,
port_type = port_type,
bitwidth = bitwidth )
)
else:
port_defs.append( 's.{port_name} = {port_type}( {bitwidth} )' \
.format( port_name = port_name,
port_type = port_type,
bitwidth = bitwidth )
)
# Assigning input ports
set_inputs = []
for v_name, bitwidth in in_ports:
py_name = v_name.replace('_M_', '.')
if '$' in py_name:
name, idx = py_name.split('$')
py_name = '{name}[{idx}]'.format( name = name, idx = int(idx) )
v_name = verilator_mangle( v_name )
set_inputs.append( 's._model.{v_name}[0] = s.{py_name}' \
.format( v_name = v_name, py_name = py_name )
)
# Assigning combinational output ports
set_comb = []
for v_name, bitwidth in out_ports:
py_name = v_name.replace('_M_', '.')
if '$' in py_name:
name, idx = py_name.split('$')
py_name = '{name}[{idx}]'.format( name = name, idx = int(idx) )
v_name = verilator_mangle( v_name )
set_comb.append( 's.{py_name}.value = s._model.{v_name}[0]' \
.format( v_name = v_name, py_name = py_name )
)
# TODO: no way to distinguish between combinational and sequential
# outputs, so we set outputs both ways...
# This seems broken, but I can't think of a better way.
# Assigning sequential output ports
set_next = []
for v_name, bitwidth in out_ports:
py_name = v_name.replace('_M_', '.')
if '$' in py_name:
name, idx = py_name.split('$')
py_name = '{name}[{idx}]'.format( name = name, idx = int(idx) )
v_name = verilator_mangle( v_name )
set_next.append( 's.{py_name}.next = s._model.{v_name}[0]' \
.format( v_name = v_name, py_name = py_name )
)
with open( template_filename , 'r' ) as template, \
open( wrapper_filename, 'w' ) as output:
py_src = template.read()
py_src = py_src.format(
model_name = model_name,
port_decls = cdefs,
port_defs = '\n ' .join( port_defs ),
set_inputs = '\n '.join( set_inputs ),
set_comb = '\n '.join( set_comb ),
set_next = '\n '.join( set_next ),
)
# TODO: needed for tracing?
#w += 'XTraceEverOn()'
output.write( py_src )
#print( py_src )
| 16,982
| 0
| 294
|
ecf2ad1014ba580179cdf2f557a5ed584afa8446
| 3,595
|
py
|
Python
|
vmaig_blog/urls.py
|
wangsure/czxs
|
e45e2ffde1597cb5a41d5fb46818805a1fa0d848
|
[
"BSD-3-Clause"
] | null | null | null |
vmaig_blog/urls.py
|
wangsure/czxs
|
e45e2ffde1597cb5a41d5fb46818805a1fa0d848
|
[
"BSD-3-Clause"
] | null | null | null |
vmaig_blog/urls.py
|
wangsure/czxs
|
e45e2ffde1597cb5a41d5fb46818805a1fa0d848
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import include, url
from django.contrib.sitemaps.views import sitemap
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap, Sitemap
from django.core.urlresolvers import reverse
from django.contrib import admin
from blog.models import Article, News, Category, Column
from demoproject import views
import suit;
import xadmin;
xadmin.autodiscover();
from xadmin.plugins import xversion
xversion.register_models()
sitemaps = {
'article-is-top': GenericSitemap(
{
'queryset': Article.objects.filter(
status=0, is_top=True
).all(),
'date_field': 'pub_time'
},
priority=1.0,
changefreq='daily'
),
'article-is-not-top': GenericSitemap(
{
'queryset': Article.objects.filter(status=0).all(),
'date_field': 'pub_time'
},
priority=0.8,
changefreq='daily'
),
'news': GenericSitemap(
{
'queryset': News.objects.all(),
'data_field': 'pub_time'
},
priority=0.6,
changefreq='daily'
),
'category': GenericSitemap(
{
'queryset': Category.objects.all()
},
priority=0.9,
changefreq='daily'
),
'column': GenericSitemap(
{
'queryset': Column.objects.all()
},
priority=0.9,
changefreq='daily'
),
'static': StaticViewSitemap
}
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('blog.urls')),
url(r'', include('vmaig_comments.urls')),
url(r'', include('vmaig_auth.urls')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^piechart/', views.demo_piechart, name='demo_piechart'),
url(r'^linechart/', views.demo_linechart, name='demo_linechart'),
url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'),
url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'),
url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'),
url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'),
url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'),
url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'),
url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'),
url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'),
url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'),
url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'),
url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'),
url(r'^charthome$', views.home, name='charthome'),
url(r'^xadmin/', include(xadmin.site.urls)),
]
| 37.447917
| 115
| 0.606954
|
from django.conf.urls import include, url
from django.contrib.sitemaps.views import sitemap
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap, Sitemap
from django.core.urlresolvers import reverse
from django.contrib import admin
from blog.models import Article, News, Category, Column
from demoproject import views
import suit;
import xadmin;
xadmin.autodiscover();
from xadmin.plugins import xversion
xversion.register_models()
class StaticViewSitemap(Sitemap):
priority = 1.0
changefreq = 'daily'
def items(self):
return ['index-view', 'news-view']
def location(self, item):
return reverse(item)
sitemaps = {
'article-is-top': GenericSitemap(
{
'queryset': Article.objects.filter(
status=0, is_top=True
).all(),
'date_field': 'pub_time'
},
priority=1.0,
changefreq='daily'
),
'article-is-not-top': GenericSitemap(
{
'queryset': Article.objects.filter(status=0).all(),
'date_field': 'pub_time'
},
priority=0.8,
changefreq='daily'
),
'news': GenericSitemap(
{
'queryset': News.objects.all(),
'data_field': 'pub_time'
},
priority=0.6,
changefreq='daily'
),
'category': GenericSitemap(
{
'queryset': Category.objects.all()
},
priority=0.9,
changefreq='daily'
),
'column': GenericSitemap(
{
'queryset': Column.objects.all()
},
priority=0.9,
changefreq='daily'
),
'static': StaticViewSitemap
}
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'', include('blog.urls')),
url(r'', include('vmaig_comments.urls')),
url(r'', include('vmaig_auth.urls')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^piechart/', views.demo_piechart, name='demo_piechart'),
url(r'^linechart/', views.demo_linechart, name='demo_linechart'),
url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'),
url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'),
url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'),
url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'),
url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'),
url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'),
url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'),
url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'),
url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'),
url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'),
url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'),
url(r'^charthome$', views.home, name='charthome'),
url(r'^xadmin/', include(xadmin.site.urls)),
]
| 71
| 110
| 22
|
63789201d86443209329b9776585583fa421912b
| 2,010
|
py
|
Python
|
luigi/contrib/sparkey.py
|
shouldsee/luigi
|
54a347361ae1031f06105eaf30ff88f5ef65b00c
|
[
"Apache-2.0"
] | 5
|
2015-02-26T18:52:56.000Z
|
2017-07-07T05:47:18.000Z
|
luigi/contrib/sparkey.py
|
shouldsee/luigi
|
54a347361ae1031f06105eaf30ff88f5ef65b00c
|
[
"Apache-2.0"
] | 9
|
2017-03-22T23:38:48.000Z
|
2019-01-28T21:13:06.000Z
|
luigi/contrib/sparkey.py
|
shouldsee/luigi
|
54a347361ae1031f06105eaf30ff88f5ef65b00c
|
[
"Apache-2.0"
] | 9
|
2015-01-26T14:47:57.000Z
|
2020-07-07T17:01:25.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import luigi
class SparkeyExportTask(luigi.Task):
"""
A luigi task that writes to a local sparkey log file.
Subclasses should implement the requires and output methods. The output
must be a luigi.LocalTarget.
The resulting sparkey log file will contain one entry for every line in
the input, mapping from the first value to a tab-separated list of the
rest of the line.
To generate a simple key-value index, yield "key", "value" pairs from the input(s) to this task.
"""
# the separator used to split input lines
separator = '\t'
| 31.40625
| 100
| 0.685075
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import luigi
class SparkeyExportTask(luigi.Task):
"""
A luigi task that writes to a local sparkey log file.
Subclasses should implement the requires and output methods. The output
must be a luigi.LocalTarget.
The resulting sparkey log file will contain one entry for every line in
the input, mapping from the first value to a tab-separated list of the
rest of the line.
To generate a simple key-value index, yield "key", "value" pairs from the input(s) to this task.
"""
# the separator used to split input lines
separator = '\t'
def __init__(self, *args, **kwargs):
super(SparkeyExportTask, self).__init__(*args, **kwargs)
def run(self):
self._write_sparkey_file()
def _write_sparkey_file(self):
import sparkey
infile = self.input()
outfile = self.output()
if not isinstance(outfile, luigi.LocalTarget):
raise TypeError("output must be a LocalTarget")
# write job output to temporary sparkey file
temp_output = luigi.LocalTarget(is_tmp=True)
w = sparkey.LogWriter(temp_output.path)
for line in infile.open('r'):
k, v = line.strip().split(self.separator, 1)
w[k] = v
w.close()
# move finished sparkey file to final destination
temp_output.move(outfile.path)
| 705
| 0
| 81
|
ff451b4287664bcebaa07ec7328aecbf9cc1c8c2
| 2,639
|
py
|
Python
|
epytope/Data/pssms/smm/mat/A_23_01_11.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smm/mat/A_23_01_11.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smm/mat/A_23_01_11.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_23_01_11 = {0: {'A': 0.076, 'C': 0.036, 'E': 0.046, 'D': -0.002, 'G': 0.043, 'F': -0.067, 'I': 0.002, 'H': 0.011, 'K': 0.104, 'M': -0.205, 'L': -0.209, 'N': -0.039, 'Q': 0.186, 'P': 0.065, 'S': 0.074, 'R': -0.03, 'T': 0.087, 'W': 0.0, 'V': -0.11, 'Y': -0.068}, 1: {'A': 0.014, 'C': 0.002, 'E': 0.031, 'D': -0.004, 'G': 0.0, 'F': -0.074, 'I': 0.001, 'H': 0.0, 'K': 0.0, 'M': -0.014, 'L': 0.07, 'N': 0.0, 'Q': 0.0, 'P': -0.078, 'S': -0.005, 'R': 0.016, 'T': 0.007, 'W': 0.0, 'V': 0.059, 'Y': -0.025}, 2: {'A': -0.338, 'C': 0.178, 'E': 0.018, 'D': 0.311, 'G': 0.124, 'F': -0.229, 'I': 0.137, 'H': -0.023, 'K': 0.196, 'M': 0.154, 'L': -0.043, 'N': -0.241, 'Q': 0.259, 'P': 0.392, 'S': -0.575, 'R': 0.058, 'T': -0.008, 'W': -0.276, 'V': 0.025, 'Y': -0.12}, 3: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.0, 'I': 0.0, 'H': -0.0, 'K': -0.0, 'M': 0.0, 'L': -0.0, 'N': 0.0, 'Q': -0.0, 'P': -0.0, 'S': 0.0, 'R': -0.0, 'T': -0.0, 'W': -0.0, 'V': 0.0, 'Y': 0.0}, 4: {'A': 0.082, 'C': 0.0, 'E': 0.032, 'D': -0.128, 'G': -0.03, 'F': -0.132, 'I': -0.005, 'H': 0.036, 'K': -0.01, 'M': 0.131, 'L': -0.311, 'N': 0.064, 'Q': 0.008, 'P': 0.114, 'S': 0.029, 'R': -0.046, 'T': 0.06, 'W': 0.0, 'V': 0.106, 'Y': 0.0}, 5: {'A': -0.124, 'C': 0.112, 'E': 0.071, 'D': -0.001, 'G': 0.024, 'F': -0.01, 'I': 0.127, 'H': 0.05, 'K': -0.081, 'M': 0.0, 'L': -0.126, 'N': -0.003, 'Q': 0.065, 'P': 0.128, 'S': -0.208, 'R': 0.061, 'T': -0.106, 'W': 0.107, 'V': -0.134, 'Y': 0.046}, 6: {'A': 0.127, 'C': 0.032, 'E': 0.007, 'D': 0.097, 'G': 0.051, 'F': 0.015, 'I': 0.058, 'H': -0.182, 'K': -0.08, 'M': 0.0, 'L': -0.126, 'N': 0.0, 'Q': -0.066, 'P': 0.081, 'S': -0.056, 'R': -0.078, 'T': 0.082, 'W': 0.0, 'V': 0.088, 'Y': -0.051}, 7: {'A': -0.0, 'C': 0.0, 'E': -0.0, 'D': 0.0, 'G': 0.0, 'F': -0.0, 'I': -0.0, 'H': 0.0, 'K': -0.0, 'M': -0.0, 'L': -0.0, 'N': 0.0, 'Q': 0.0, 'P': -0.0, 'S': 0.0, 'R': 0.0, 'T': -0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': -0.0, 'C': 0.0, 'E': -0.0, 'D': 0.0, 'G': -0.0, 'F': -0.0, 'I': -0.0, 'H': 0.0, 'K': -0.0, 'M': -0.0, 'L': 0.0, 'N': 0.0, 'Q': -0.0, 'P': -0.0, 'S': -0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': -0.0, 'Y': 0.0}, 9: {'A': 0.014, 'C': 0.0, 'E': 0.02, 'D': -0.027, 'G': 0.038, 'F': 0.013, 'I': 0.018, 'H': 0.0, 'K': 0.021, 'M': 0.027, 'L': -0.029, 'N': 0.027, 'Q': -0.022, 'P': -0.062, 'S': 0.003, 'R': -0.035, 'T': 0.004, 'W': 0.0, 'V': 0.014, 'Y': -0.025}, 10: {'A': 0.184, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.852, 'I': -0.072, 'H': 0.0, 'K': 0.274, 'M': -0.026, 'L': -0.286, 'N': 0.0, 'Q': 0.179, 'P': -0.188, 'S': 0.149, 'R': 0.541, 'T': 0.0, 'W': -0.056, 'V': 0.16, 'Y': -0.007}, -1: {'con': 4.16437}}
| 2,639
| 2,639
| 0.360743
|
A_23_01_11 = {0: {'A': 0.076, 'C': 0.036, 'E': 0.046, 'D': -0.002, 'G': 0.043, 'F': -0.067, 'I': 0.002, 'H': 0.011, 'K': 0.104, 'M': -0.205, 'L': -0.209, 'N': -0.039, 'Q': 0.186, 'P': 0.065, 'S': 0.074, 'R': -0.03, 'T': 0.087, 'W': 0.0, 'V': -0.11, 'Y': -0.068}, 1: {'A': 0.014, 'C': 0.002, 'E': 0.031, 'D': -0.004, 'G': 0.0, 'F': -0.074, 'I': 0.001, 'H': 0.0, 'K': 0.0, 'M': -0.014, 'L': 0.07, 'N': 0.0, 'Q': 0.0, 'P': -0.078, 'S': -0.005, 'R': 0.016, 'T': 0.007, 'W': 0.0, 'V': 0.059, 'Y': -0.025}, 2: {'A': -0.338, 'C': 0.178, 'E': 0.018, 'D': 0.311, 'G': 0.124, 'F': -0.229, 'I': 0.137, 'H': -0.023, 'K': 0.196, 'M': 0.154, 'L': -0.043, 'N': -0.241, 'Q': 0.259, 'P': 0.392, 'S': -0.575, 'R': 0.058, 'T': -0.008, 'W': -0.276, 'V': 0.025, 'Y': -0.12}, 3: {'A': 0.0, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.0, 'I': 0.0, 'H': -0.0, 'K': -0.0, 'M': 0.0, 'L': -0.0, 'N': 0.0, 'Q': -0.0, 'P': -0.0, 'S': 0.0, 'R': -0.0, 'T': -0.0, 'W': -0.0, 'V': 0.0, 'Y': 0.0}, 4: {'A': 0.082, 'C': 0.0, 'E': 0.032, 'D': -0.128, 'G': -0.03, 'F': -0.132, 'I': -0.005, 'H': 0.036, 'K': -0.01, 'M': 0.131, 'L': -0.311, 'N': 0.064, 'Q': 0.008, 'P': 0.114, 'S': 0.029, 'R': -0.046, 'T': 0.06, 'W': 0.0, 'V': 0.106, 'Y': 0.0}, 5: {'A': -0.124, 'C': 0.112, 'E': 0.071, 'D': -0.001, 'G': 0.024, 'F': -0.01, 'I': 0.127, 'H': 0.05, 'K': -0.081, 'M': 0.0, 'L': -0.126, 'N': -0.003, 'Q': 0.065, 'P': 0.128, 'S': -0.208, 'R': 0.061, 'T': -0.106, 'W': 0.107, 'V': -0.134, 'Y': 0.046}, 6: {'A': 0.127, 'C': 0.032, 'E': 0.007, 'D': 0.097, 'G': 0.051, 'F': 0.015, 'I': 0.058, 'H': -0.182, 'K': -0.08, 'M': 0.0, 'L': -0.126, 'N': 0.0, 'Q': -0.066, 'P': 0.081, 'S': -0.056, 'R': -0.078, 'T': 0.082, 'W': 0.0, 'V': 0.088, 'Y': -0.051}, 7: {'A': -0.0, 'C': 0.0, 'E': -0.0, 'D': 0.0, 'G': 0.0, 'F': -0.0, 'I': -0.0, 'H': 0.0, 'K': -0.0, 'M': -0.0, 'L': -0.0, 'N': 0.0, 'Q': 0.0, 'P': -0.0, 'S': 0.0, 'R': 0.0, 'T': -0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': -0.0, 'C': 0.0, 'E': -0.0, 'D': 0.0, 'G': -0.0, 'F': -0.0, 'I': -0.0, 'H': 0.0, 'K': -0.0, 'M': -0.0, 'L': 0.0, 'N': 0.0, 'Q': -0.0, 'P': -0.0, 'S': -0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': -0.0, 'Y': 0.0}, 9: {'A': 0.014, 'C': 0.0, 'E': 0.02, 'D': -0.027, 'G': 0.038, 'F': 0.013, 'I': 0.018, 'H': 0.0, 'K': 0.021, 'M': 0.027, 'L': -0.029, 'N': 0.027, 'Q': -0.022, 'P': -0.062, 'S': 0.003, 'R': -0.035, 'T': 0.004, 'W': 0.0, 'V': 0.014, 'Y': -0.025}, 10: {'A': 0.184, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': -0.852, 'I': -0.072, 'H': 0.0, 'K': 0.274, 'M': -0.026, 'L': -0.286, 'N': 0.0, 'Q': 0.179, 'P': -0.188, 'S': 0.149, 'R': 0.541, 'T': 0.0, 'W': -0.056, 'V': 0.16, 'Y': -0.007}, -1: {'con': 4.16437}}
| 0
| 0
| 0
|
8805847efb84b9182830f2db109cdf8df06405a3
| 727
|
py
|
Python
|
echome/identity.py
|
mgtrrz/echome-python-sdk
|
c169510cb801ff3144cc746b96726cb2b535f9fd
|
[
"MIT"
] | 1
|
2022-01-31T19:35:14.000Z
|
2022-01-31T19:35:14.000Z
|
echome/identity.py
|
mgtrrz/echome-python-sdk
|
c169510cb801ff3144cc746b96726cb2b535f9fd
|
[
"MIT"
] | 1
|
2021-09-26T01:40:30.000Z
|
2021-09-26T01:40:30.000Z
|
echome/identity.py
|
mgtrrz/echome-python-sdk
|
c169510cb801ff3144cc746b96726cb2b535f9fd
|
[
"MIT"
] | null | null | null |
import logging
from .resource import BaseResource
logger = logging.getLogger(__name__)
| 29.08
| 61
| 0.661623
|
import logging
from .resource import BaseResource
logger = logging.getLogger(__name__)
class Identity(BaseResource):
namespace = "identity/user"
def describe_all_users(self):
return self.request_url("/describe/all")
def describe_user(self, user_id:str):
return self.request_url(f"/describe/{user_id}")
def describe_caller(self):
return self.request_url(f"/describe/caller")
def create_user(self, **kwargs):
if "Tags" in kwargs:
kwargs.update(self.unpack_tags(kwargs["Tags"]))
return self.request_url(f"/create", "post", **kwargs)
def delete_user(self, user_id:str):
return self.request_url(f"/delete/{user_id}", "post")
| 425
| 191
| 23
|
c5ef3ffd9fd787999fffa6497ba9c887822e5121
| 949
|
py
|
Python
|
core/forms.py
|
programecompedro/django-testes
|
12af91a8d175779be86c87fd12d07df0e2de214c
|
[
"Unlicense"
] | null | null | null |
core/forms.py
|
programecompedro/django-testes
|
12af91a8d175779be86c87fd12d07df0e2de214c
|
[
"Unlicense"
] | null | null | null |
core/forms.py
|
programecompedro/django-testes
|
12af91a8d175779be86c87fd12d07df0e2de214c
|
[
"Unlicense"
] | null | null | null |
from django import forms
from django.core.mail.message import EmailMessage
from django.forms.widgets import Textarea
| 36.5
| 90
| 0.646997
|
from django import forms
from django.core.mail.message import EmailMessage
from django.forms.widgets import Textarea
class ContatoForm(forms.Form):
nome = forms.CharField(label="Nome", max_length=100)
email = forms.CharField(label="E-mail", max_length=100)
assunto = forms.CharField(label="Assunto", max_length=100)
mensagem = forms.CharField(label="Mensagem", widget=forms.Textarea())
def send_email(self):
nome = self.cleaned_data['nome']
email = self.cleaned_data['email']
assunto = self.cleaned_data['assunto']
mensagem = self.cleaned_data['mensagem']
conteudo = f'Nome:{nome}\nEmail:{email}\nAssunto:{assunto}\nMensagem:{mensagem}\n'
mail = EmailMessage(
subject=assunto,
body=conteudo,
from_email='contato@fusion.com.br',
to=['contato@fusion.com.br',],
headers={'Reply-To': email}
)
mail.send()
| 520
| 290
| 23
|