hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f663e1748423f55b43e840350ce70790d4be28ce
| 4,094
|
py
|
Python
|
composer/datasets/hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/datasets/hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/datasets/hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
import abc
import dataclasses
import textwrap
from typing import Optional, Union
try:
import custom_inherit
except ImportError:
# if custom_inherit is not installed, then the docstrings will be incomplete. That's fine.
metaclass = abc.ABCMeta
else:
metaclass = custom_inherit.DocInheritMeta(style="google_with_merge", abstract_base_class=True)
import yahp as hp
from composer.core.types import DataLoader, DataSpec, MemoryFormat
from composer.datasets.dataloader import DataloaderHparams
__all__ = ["SyntheticHparamsMixin", "DatasetHparams"]
@dataclasses.dataclass
class SyntheticHparamsMixin(hp.Hparams, abc.ABC):
"""Synthetic dataset parameter mixin for :class:`DatasetHparams`.
Parameters:
use_synthetic (bool, optional): Whether to use synthetic data. (Default: ``False``)
synthetic_num_unique_samples (int, optional): The number of unique samples to allocate memory for.
Ignored if :attr:`use_synthetic` is False. (Default: ``100``)
synthetic_device (str, optonal): The device to store the sample pool.
Set to ``cuda`` to store samples on the GPU and eliminate PCI-e bandwidth with the dataloader.
Set to ``cpu`` to move data between host memory and the device on every batch.
Ignored if :attr:`use_synthetic` is False. (Default: ``cpu``)
synthetic_memory_format: The :class:`MemoryFormat` to use.
Ignored if :attr:`use_synthetic` is False. (Default: ``CONTIGUOUS_FORMAT``)
"""
use_synthetic: bool = hp.optional("Whether to use synthetic data. Defaults to False.", default=False)
synthetic_num_unique_samples: int = hp.optional("The number of unique samples to allocate memory for.", default=100)
synthetic_device: str = hp.optional("Device to store the sample pool. Should be `cuda` or `cpu`. Defauls to `cpu`.",
default="cpu")
synthetic_memory_format: MemoryFormat = hp.optional("Memory format. Defaults to contiguous format.",
default=MemoryFormat.CONTIGUOUS_FORMAT)
@dataclasses.dataclass
class DatasetHparams(hp.Hparams, abc.ABC, metaclass=metaclass):
"""Abstract base class for hyperparameters to initialize a dataset.
Parameters:
datadir (str): The path to the data directory.
is_train (bool): Whether to load the training data (the default) or validation data.
drop_last (bool):
If the number of samples is not divisible by the batch size, whether
to drop the last batch (the default) or pad the last batch with zeros.
shuffle (bool): Whether to shuffle the dataset. Defaults to True.
"""
is_train: bool = hp.optional("Whether to load the training data (the default) or validation data.", default=True)
drop_last: bool = hp.optional(textwrap.dedent("""\
If the number of samples is not divisible by the batch size,
whether to drop the last batch (the default) or pad the last batch with zeros."""),
default=True)
shuffle: bool = hp.optional("Whether to shuffle the dataset for each epoch. Defaults to True.", default=True)
datadir: Optional[str] = hp.optional("The path to the data directory", default=None)
@abc.abstractmethod
def initialize_object(self, batch_size: int, dataloader_hparams: DataloaderHparams) -> Union[DataLoader, DataSpec]:
"""Creates a :class:`DataLoader` or :class:`DataloaderSpec` for this dataset.
Parameters:
batch_size (int): The size of the batch the dataloader should yield. This batch size is
device-specific and already incorporates the world size.
dataloader_hparams (DataloaderHparams): The dataset-independent hparams for the dataloader
Returns:
Dataloader or DataSpec: The dataloader, or if the dataloader yields batches of custom types,
a :class:`DataSpec`.
"""
pass
| 47.604651
| 120
| 0.691744
|
cd042c8069db7ff3aca8db5c00df60c90c45c5db
| 1,169
|
py
|
Python
|
hd_mysql/models.py
|
Rmond/OperMge
|
926f00107614ed55b26ff3beac178fe955de856a
|
[
"Apache-2.0"
] | 1
|
2017-08-18T07:03:34.000Z
|
2017-08-18T07:03:34.000Z
|
hd_mysql/models.py
|
Rmond/OperMge
|
926f00107614ed55b26ff3beac178fe955de856a
|
[
"Apache-2.0"
] | null | null | null |
hd_mysql/models.py
|
Rmond/OperMge
|
926f00107614ed55b26ff3beac178fe955de856a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django_celery_beat.models import PeriodicTask
from celery.worker.strategy import default
# Create your models here.
class Custom_Schedule(models.Model):
sd_name=models.CharField(max_length=32)
sd_num=models.CharField(max_length=4)
class Schedule_Info(models.Model):
mysql_bk_name = models.CharField(max_length=32)
hostip = models.CharField(max_length=16)
bk_database = models.CharField(max_length=32)
bk_table = models.CharField(max_length=32,default="")
ct_shd = models.ForeignKey(Custom_Schedule)
hour_minute = models.CharField(max_length=8)
period_tk = models.ForeignKey(PeriodicTask)
class Schedule_Res(models.Model):
mysql_bk_name = models.CharField(max_length=32)
star_time = models.DateTimeField()
stop_time = models.DateTimeField()
result=models.TextField()
status=models.CharField(max_length=8)
class Sql_Info(models.Model):
sql_name = models.CharField(max_length=32)
sql_handle = models.TextField()
arg_count = models.IntegerField(null=True)
| 36.53125
| 58
| 0.734816
|
a3c02a7dbd3daef420456501718ee9a0e9c666a2
| 797
|
py
|
Python
|
eb-virt/bin/rst2odt.py
|
YutingPang/one_note
|
9dfc7061f07819cbac96e87080d767705d7dfe0c
|
[
"MIT"
] | null | null | null |
eb-virt/bin/rst2odt.py
|
YutingPang/one_note
|
9dfc7061f07819cbac96e87080d767705d7dfe0c
|
[
"MIT"
] | null | null | null |
eb-virt/bin/rst2odt.py
|
YutingPang/one_note
|
9dfc7061f07819cbac96e87080d767705d7dfe0c
|
[
"MIT"
] | null | null | null |
#!/Users/yutingpang/git/eb-flask/eb-virt/bin/python
# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
description = ('Generates OpenDocument/OpenOffice/ODF documents from '
'standalone reStructuredText sources. ' + default_description)
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
description=description)
| 25.709677
| 78
| 0.752823
|
97dd62916635bf378ab55b121c374c6aac3e081d
| 14,181
|
py
|
Python
|
tensorflow_probability/python/experimental/mcmc/diagonal_mass_matrix_adaptation.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/mcmc/diagonal_mass_matrix_adaptation.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/mcmc/diagonal_mass_matrix_adaptation.py
|
chrism0dwk/probability
|
ab260f15cae94c6802c2f2769fb448ad213b79cd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DiagonalMassMatrixAdaptation TransitionKernel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import joint_distribution_sequential as jds
from tensorflow_probability.python.experimental.distributions import mvn_precision_factor_linop as mvn_pfl
from tensorflow_probability.python.experimental.stats import sample_stats
from tensorflow_probability.python.internal import auto_composite_tensor
from tensorflow_probability.python.internal import broadcast_util as bu
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import unnest
from tensorflow_probability.python.mcmc import kernel as kernel_base
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
__all__ = [
'DiagonalMassMatrixAdaptation',
]
# Add auto-composite tensors to the global namespace to avoid creating new
# classes inside functions.
_CompositeJointDistributionSequential = auto_composite_tensor.auto_composite_tensor(
jds.JointDistributionSequential, omit_kwargs=('name',))
_CompositeLinearOperatorDiag = auto_composite_tensor.auto_composite_tensor(
tf.linalg.LinearOperatorDiag, omit_kwargs=('name',))
_CompositeMultivariateNormalPrecisionFactorLinearOperator = auto_composite_tensor.auto_composite_tensor(
mvn_pfl.MultivariateNormalPrecisionFactorLinearOperator,
omit_kwargs=('name',))
_CompositeIndependent = auto_composite_tensor.auto_composite_tensor(
independent.Independent, omit_kwargs=('name',))
def hmc_like_momentum_distribution_setter_fn(kernel_results, new_distribution):
"""Setter for `momentum_distribution` so it can be adapted."""
# Note that unnest.replace_innermost has a special path for going into
# `accepted_results` preferentially, so this will set
# `accepted_results.momentum_distribution`.
return unnest.replace_innermost(
kernel_results, momentum_distribution=new_distribution)
class DiagonalMassMatrixAdaptationResults(
mcmc_util.PrettyNamedTupleMixin,
collections.namedtuple('DiagonalMassMatrixAdaptationResults', [
'inner_results',
'running_variance',
])):
"""Results of the DiagonalMassMatrixAdaptation TransitionKernel.
Attributes:
inner_results: Results of the inner kernel.
running_variance: (List of) instance(s) of
`tfp.experimental.stats.RunningVariance`, used to set
the diagonal covariance of the momentum distribution.
"""
__slots__ = ()
class DiagonalMassMatrixAdaptation(kernel_base.TransitionKernel):
"""Adapts the inner kernel's `momentum_distribution` to estimated variance.
This kernel uses an online variance estimate to adjust a diagonal covariance
matrix for each of the state parts. More specifically, the
`momentum_distribution` of the innermost kernel is set to a diagonal
multivariate normal distribution whose variance is the *inverse* of the
online estimate. The inverse of the covariance of the momentum is often called
the "mass matrix" in the context of Hamiltonian Monte Carlo.
This preconditioning scheme works well when the covariance is diagonally
dominant, and may give reasonable results even when the number of draws is
less than the dimension. In particular, it should generally do a better job
than no preconditioning, which implicitly uses an identity mass matrix.
Note that this kernel does not implement a calibrated sampler; rather, it is
intended to be used as one step of an iterative adaptation process. It
should not be used when drawing actual samples.
"""
def __init__(
self,
inner_kernel,
initial_running_variance,
momentum_distribution_setter_fn=hmc_like_momentum_distribution_setter_fn,
validate_args=False,
name=None):
"""Creates the diagonal mass matrix adaptation kernel.
Users must provide an `initial_running_variance`, either from a previous
`DiagonalMassMatrixAdaptation`, or some other source. See
`RunningCovariance.from_stats` for a convenient way to construct these.
Args:
inner_kernel: `TransitionKernel`-like object.
initial_running_variance:
`tfp.experimental.stats.RunningVariance`-like object, or list of them,
for a batch of momentum distributions. These use `update` on the state
to maintain an estimate of the variance, and so space, and so must have
a structure compatible with the state space.
momentum_distribution_setter_fn: A callable with the signature
`(kernel_results, new_momentum_distribution) -> new_kernel_results`
where `kernel_results` are the results of the `inner_kernel`,
`new_momentum_distribution` is a `CompositeTensor` or a nested
collection of `CompositeTensor`s, and `new_kernel_results` are a
possibly-modified copy of `kernel_results`. The default,
`hmc_like_momentum_distribution_setter_fn`, presumes HMC-style
`kernel_results`, and sets the `momentum_distribution` only under the
`accepted_results` field.
validate_args: Python `bool`. When `True` kernel parameters are checked
for validity. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class. Default:
'diagonal_mass_matrix_adaptation'.
"""
inner_kernel = mcmc_util.enable_store_parameters_in_results(inner_kernel)
self._parameters = dict(
inner_kernel=inner_kernel,
initial_running_variance=initial_running_variance,
momentum_distribution_setter_fn=momentum_distribution_setter_fn,
name=name,
)
@property
def inner_kernel(self):
return self._parameters['inner_kernel']
@property
def name(self):
return self._parameters['name']
@property
def initial_running_variance(self):
return self._parameters['initial_running_variance']
def momentum_distribution_setter_fn(self, kernel_results,
new_momentum_distribution):
return self._parameters['momentum_distribution_setter_fn'](
kernel_results, new_momentum_distribution)
@property
def parameters(self):
"""Return `dict` of ``__init__`` arguments and their values."""
return self._parameters
def one_step(self, current_state, previous_kernel_results, seed=None):
with tf.name_scope(
mcmc_util.make_name(self.name, 'diagonal_mass_matrix_adaptation',
'one_step')):
variance_parts = previous_kernel_results.running_variance
diags = [variance_part.variance() for variance_part in variance_parts]
# Set the momentum.
batch_ndims = ps.rank(unnest.get_innermost(previous_kernel_results,
'target_log_prob'))
state_parts = tf.nest.flatten(current_state)
new_momentum_distribution = _make_momentum_distribution(diags,
state_parts,
batch_ndims)
inner_results = self.momentum_distribution_setter_fn(
previous_kernel_results.inner_results, new_momentum_distribution)
# Step the inner kernel.
inner_kwargs = {} if seed is None else dict(seed=seed)
new_state, new_inner_results = self.inner_kernel.one_step(
current_state, inner_results, **inner_kwargs)
new_state_parts = tf.nest.flatten(new_state)
new_variance_parts = []
for variance_part, diag, state_part in zip(variance_parts, diags,
new_state_parts):
# Compute new variance for each variance part, accounting for partial
# batching of the variance calculation across chains (ie, some, all, or
# none of the chains may share the estimated mass matrix).
#
# For example, say
#
# state_part has shape [2, 3, 4] + [5, 6] (batch + event)
# variance_part has shape [4] + [5, 6]
# log_prob has shape [2, 3, 4]
#
# i.e., we have a batch of chains of shape [2, 3, 4], and 4 mass
# matrices, each being shared across a [2, 3]-batch of chains. Note this
# division is inferred from the shapes of the state part, the log_prob,
# and the user-provided initial running variances.
#
# Until RunningVariance supports rank > 1 chunking, we need to flatten
# the states that go into updating the variance estimates. In the above
# example, `state_part` will be reshaped to `[6, 4, 5, 6]`, and
# fed to `RunningVariance.update(state_part, axis=0)`, recording
# 6 new observations in the running variance calculation.
# `RunningVariance.variance()` will then be of shape `[4, 5, 6]`, and
# the resulting momentum distribution will have batch shape of
# `[2, 3, 4]` and event_shape of `[5, 6]`, matching the state_part.
state_rank = ps.rank(state_part)
variance_rank = ps.rank(diag)
num_reduce_dims = state_rank - variance_rank
state_part_shape = ps.shape(state_part)
# This reshape adds a 1 when reduce_dims==0, and collapses all the lead
# dimensions to a single one otherwise.
reshaped_state = ps.reshape(
state_part,
ps.concat(
[[ps.reduce_prod(state_part_shape[:num_reduce_dims])],
state_part_shape[num_reduce_dims:]], axis=0))
# The `axis=0` here removes the leading dimension we got from the
# reshape above, so the new_variance_parts have the correct shape again.
new_variance_parts.append(variance_part.update(reshaped_state,
axis=0))
new_kernel_results = previous_kernel_results._replace(
inner_results=new_inner_results,
running_variance=new_variance_parts)
return new_state, new_kernel_results
def bootstrap_results(self, init_state):
with tf.name_scope(
mcmc_util.make_name(self.name, 'diagonal_mass_matrix_adaptation',
'bootstrap_results')):
if isinstance(self.initial_running_variance,
sample_stats.RunningVariance):
variance_parts = [self.initial_running_variance]
else:
variance_parts = list(self.initial_running_variance)
diags = [variance_part.variance() for variance_part in variance_parts]
# Step inner results.
inner_results = self.inner_kernel.bootstrap_results(init_state)
# Set the momentum.
batch_ndims = ps.rank(unnest.get_innermost(inner_results,
'target_log_prob'))
init_state_parts = tf.nest.flatten(init_state)
momentum_distribution = _make_momentum_distribution(
diags, init_state_parts, batch_ndims)
inner_results = self.momentum_distribution_setter_fn(
inner_results, momentum_distribution)
proposed = unnest.get_innermost(inner_results, 'proposed_results',
default=None)
if proposed is not None:
proposed = proposed._replace(
momentum_distribution=momentum_distribution)
inner_results = unnest.replace_innermost(inner_results,
proposed_results=proposed)
return DiagonalMassMatrixAdaptationResults(
inner_results=inner_results,
running_variance=variance_parts)
@property
def is_calibrated(self):
return False
def _make_momentum_distribution(running_variance_parts, state_parts,
batch_ndims):
"""Construct a momentum distribution from the running variance.
This uses a running variance to construct a momentum distribution with the
correct batch_shape and event_shape.
Args:
running_variance_parts: List of `Tensor`, outputs of
`tfp.experimental.stats.RunningVariance.variance()`.
state_parts: List of `Tensor`.
batch_ndims: Scalar, for leading batch dimensions.
Returns:
`tfd.Distribution` where `.sample` has the same structure as `state_parts`,
and `.log_prob` of the sample will have the rank of `batch_ndims`
"""
distributions = []
for variance_part, state_part in zip(running_variance_parts, state_parts):
running_variance_rank = ps.rank(variance_part)
state_rank = ps.rank(state_part)
# Pad dimensions and tile by multiplying by tf.ones to add a batch shape
ones = tf.ones(ps.shape(state_part)[:-(state_rank - running_variance_rank)],
dtype=variance_part.dtype)
ones = bu.left_justified_expand_dims_like(ones, state_part)
variance_tiled = variance_part * ones
reinterpreted_batch_ndims = state_rank - batch_ndims - 1
distributions.append(
_CompositeIndependent(
_CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=_CompositeLinearOperatorDiag(
tf.math.sqrt(variance_tiled)),
precision=_CompositeLinearOperatorDiag(variance_tiled)),
reinterpreted_batch_ndims=reinterpreted_batch_ndims))
return _CompositeJointDistributionSequential(distributions)
| 45.745161
| 106
| 0.711163
|
f26909a4f4df3e44ce2a197c7a36706f1a9635ca
| 7,448
|
py
|
Python
|
python/libraries/mypalletizer.py
|
elephantrobotics/myblockly-PI-mind-
|
d93b6c3b57abbe7cfb606f1a12bc4ddbcc5ee4dd
|
[
"MIT"
] | 1
|
2022-01-15T17:45:03.000Z
|
2022-01-15T17:45:03.000Z
|
python/libraries/mypalletizer.py
|
elephantrobotics/myblockly_plus
|
1db311950e54b9a753bfecf2902e996dcb6f5e06
|
[
"MIT"
] | null | null | null |
python/libraries/mypalletizer.py
|
elephantrobotics/myblockly_plus
|
1db311950e54b9a753bfecf2902e996dcb6f5e06
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import logging
import math
import time
from .log import setup_logging
from .generate import MyCobotCommandGenerator
from .common import ProtocolCode, read, write
class MyPalletizedataException(Exception):
pass
MIN_ID = 0
MAX_ID = 5
# In fact, most joints cannot reach plus or minus 180 degrees.
# There may be a value greater than 180 when reading the angle,
# and the maximum and minimum values are expanded for compatibility.
MIN_ANGLE = -170.0
MAX_ANGLE = 170.0
def calibration_parameters(**kwargs):
if kwargs.get("id", None) is not None and not MIN_ID <= kwargs["id"] <= MAX_ID:
raise MyPalletizedataException(
"The id not right, should be {0} ~ {1}, but received {2}.".format(
MIN_ID, MAX_ID, kwargs["id"]
)
)
if (
kwargs.get("degree", None) is not None
and not MIN_ANGLE <= kwargs["degree"] <= MAX_ANGLE
):
raise MyPalletizedataException(
"degree value not right, should be {0} ~ {1}, but received {2}".format(
MIN_ANGLE, MAX_ANGLE, kwargs["degree"]
)
)
if kwargs.get("degrees", None) is not None:
degrees = kwargs["degrees"]
if not isinstance(degrees, list):
raise MyPalletizedataException("`degrees` must be a list.")
if len(degrees) != 4:
raise MyPalletizedataException(
"The length of `degrees` must be 4.")
for idx, angle in enumerate(degrees):
if not MIN_ANGLE <= angle <= MAX_ANGLE:
raise MyPalletizedataException(
"Has invalid degree value, error on index {0}. Degree should be {1} ~ {2}.".format(
idx, MIN_ANGLE, MAX_ANGLE
)
)
if kwargs.get("coords", None) is not None:
coords = kwargs["coords"]
if not isinstance(coords, list):
raise MyPalletizedataException("`coords` must be a list.")
if len(coords) != 4:
raise MyPalletizedataException(
"The length of `coords` must be 4.")
if kwargs.get("speed", None) is not None and not 0 <= kwargs["speed"] <= 100:
raise MyPalletizedataException(
"speed value not right, should be 0 ~ 100, the error speed is %s"
% kwargs["speed"]
)
if kwargs.get("rgb", None) is not None:
rgb_str = ["r", "g", "b"]
for i, v in enumerate(kwargs["rgb"]):
if not (0 <= v <= 255):
raise MyPalletizedataException(
"The RGB value needs be 0 ~ 255, but the %s is %s" % (
rgb_str[i], v)
)
class MyPalletizer(MyCobotCommandGenerator):
def __init__(self, port, baudrate="115200", timeout=0.1, debug=False):
"""
Args:
port : port string
baudrate : baud rate string, default '115200'
timeout : default 0.1
debug : whether to show debug info
"""
super(MyPalletizer, self).__init__(debug)
self.debug = debug
setup_logging(self.debug)
self.log = logging.getLogger(__name__)
self.calibration_parameters = calibration_parameters
import serial
self._serial_port = serial.Serial(port, baudrate, timeout=timeout)
_write = write
_read = read
def _mesg(self, genre, *args, **kwargs):
"""
Args:
genre: command type (Command)
*args: other data.
It is converted to octal by default.
If the data needs to be encapsulated into hexadecimal,
the array is used to include them. (Data cannot be nested)
**kwargs: support `has_reply`
has_reply: Whether there is a return value to accept.
"""
real_command, has_reply = super(MyPalletizer, self)._mesg(
genre, *args, **kwargs
)
self._write(self._flatten(real_command))
if has_reply:
data = self._read()
res = self._process_received(data, genre)
if genre in [
ProtocolCode.IS_POWER_ON,
ProtocolCode.IS_CONTROLLER_CONNECTED,
ProtocolCode.IS_PAUSED,
ProtocolCode.IS_IN_POSITION,
ProtocolCode.IS_MOVING,
ProtocolCode.IS_SERVO_ENABLE,
ProtocolCode.IS_ALL_SERVO_ENABLE,
ProtocolCode.GET_SERVO_DATA,
ProtocolCode.GET_DIGITAL_INPUT,
ProtocolCode.GET_GRIPPER_VALUE,
ProtocolCode.IS_GRIPPER_MOVING,
ProtocolCode.GET_SPEED,
ProtocolCode.GET_ENCODER,
ProtocolCode.GET_BASIC_INPUT,
]:
return self._process_single(res)
elif genre in [ProtocolCode.GET_ANGLES]:
return [self._int2angle(angle) for angle in res]
elif genre in [ProtocolCode.GET_COORDS]:
if res:
r = []
for idx in range(3):
r.append(self._int2coord(res[idx]))
r.append(self._int2angle(res[3]))
return r
else:
return res
elif genre in [
ProtocolCode.GET_JOINT_MIN_ANGLE,
ProtocolCode.GET_JOINT_MAX_ANGLE,
]:
return self._int2angle(res[0]) if res else 0
else:
return res
return None
def get_radians(self):
"""Get all angle return a list
Return:
data_list (list[radian...]):
"""
angles = self._mesg(ProtocolCode.GET_ANGLES, has_reply=True)
return [round(angle * (math.pi / 180), 3) for angle in angles]
def send_radians(self, radians, speed):
"""Send all angles
Args:
radians (list): example [0, 0, 0, 0, 0, 0]
speed (int): 0 ~ 100
"""
calibration_parameters(len6=radians, speed=speed)
degrees = [self._angle2int(radian * (180 / math.pi))
for radian in radians]
return self._mesg(ProtocolCode.SEND_ANGLES, degrees, speed)
def sync_send_angles(self, degrees, speed, timeout=7):
t = time.time()
self.send_angles(degrees, speed)
while time.time() - t < timeout:
f = self.is_moving()
if not f:
break
time.sleep(0.1)
return self
def sync_send_coords(self, coords, speed, mode, timeout=7):
t = time.time()
self.send_coords(coords, speed, mode)
while time.time() - t < timeout:
if not self.is_moving():
break
time.sleep(0.1)
return self
# Basic for raspberry pi.
def gpio_init(self):
"""Init GPIO module.
Raspberry Pi version need this.
"""
import RPi.GPIO as GPIO # type: ignore
GPIO.setmode(GPIO.BCM)
self.gpio = GPIO
def gpio_output(self, pin, v):
"""Set GPIO output value.
Args:
pin: port number(int).
v: Output value(int), 1 - GPIO.HEIGH, 0 - GPIO.LOW
"""
self.gpio.setup(pin, self.gpio.OUT)
self.gpio.setup(pin, v)
# Other
def wait(self, t):
time.sleep(t)
return self
| 33.102222
| 103
| 0.549544
|
29e8c7f346ad6a9684312a00b1f8a214437b97c4
| 2,970
|
py
|
Python
|
py_uci/base.py
|
AlexandreAbraham/py_uci
|
cc66a43711b66a93fdd903cc1cfb1b885e6bff12
|
[
"MIT"
] | 6
|
2019-04-17T14:15:44.000Z
|
2021-05-25T14:24:24.000Z
|
py_uci/base.py
|
AlexandreAbraham/py_uci
|
cc66a43711b66a93fdd903cc1cfb1b885e6bff12
|
[
"MIT"
] | null | null | null |
py_uci/base.py
|
AlexandreAbraham/py_uci
|
cc66a43711b66a93fdd903cc1cfb1b885e6bff12
|
[
"MIT"
] | 2
|
2019-10-27T10:05:16.000Z
|
2020-10-28T09:16:28.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 14:55:27 2019
@author: nsde
"""
#%%
from bs4 import BeautifulSoup
import requests
import os
import numpy as np
import pandas as pd
from .dataset_table import T
from .utility import get_dir, download_file, check_if_file_exist, convert_to_numeric
#%%
class Dataset(object):
def __init__(self, debug=False):
# Set class properties based on data table
self.name, self.size, self.features, self.task, self.weblink = \
T.unpack(self.__class__.__name__)
self.loc = get_dir(__file__) + '/../downloaded_datasets/' + \
self.weblink.split('/')[-2].replace('-','_')
# Initialize some structures
self.files = [ ]
# Download files
self._downloader()
# Read files and format dataframe
if not check_if_file_exist(self.loc + '/processed_' + self.name + '.pkl'):
self._create_dataframe()
if not debug: self._save_dataframe()
else:
if not debug: self._load_dataframe()
def _downloader(self):
# Create directory for files
if not os.path.exists(self.loc):
os.makedirs(self.loc)
# Scrape through webpage
r = requests.get(self.weblink)
data = r.text
soup = BeautifulSoup(data,'html5lib')
# Download all files
for i, link in enumerate(soup.find_all('a')):
if i >= 1: # first is always link to parent directory
filepage = self.weblink + link.get('href')
filename = download_file(filepage, self.loc)
self.files.append(filename)
def _save_dataframe(self):
self.dataframe.to_pickle(self.loc + '/processed_' + self.name + '.pkl')
def _load_dataframe(self):
self.dataframe = pd.read_pickle(self.loc + '/processed_' + self.name + '.pkl')
def _update_files_list(self):
self.files = [ ]
for f in os.listdir(self.loc):
self.files.append(self.loc + '/' + f)
@property
def N(self):
return self.data.shape[0]
@property
def d(self):
return self.data.shape[1]
@property
def data(self):
try:
return self.dataframe.values[:,:-1].astype('float32')
except:
raise ValueError('Could not convert the dataframe automatically.'
'Need to do this yourself')
@property
def target(self):
try:
return convert_to_numeric(self.dataframe.values[:,-1])
except:
raise ValueError('Could not convert the dataframe automatically.'
'Need to do this yourself')
@property
def attribute_names(self):
return list(self.dataframe)
def _create_dataframe():
raise NotImplementedError
| 29.7
| 86
| 0.569024
|
a8e2607c3b909a26cf45664b4c4b5ef5661ce2a3
| 994
|
py
|
Python
|
profiles_project/urls.py
|
basakmatvei/profiles-rest-api
|
38b60e0779bdc1296e3ba275505beff5868c39be
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
basakmatvei/profiles-rest-api
|
38b60e0779bdc1296e3ba275505beff5868c39be
|
[
"MIT"
] | null | null | null |
profiles_project/urls.py
|
basakmatvei/profiles-rest-api
|
38b60e0779bdc1296e3ba275505beff5868c39be
|
[
"MIT"
] | null | null | null |
"""profiles_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('profiles_api.urls'))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 39.76
| 81
| 0.698189
|
07b75d3e1e65b0aa0a38ddb289289420e19a6f1a
| 17,048
|
py
|
Python
|
hata/ext/command_utils/pagination.py
|
WizzyBots/hata
|
f6991afc0bebf7dad932888a536f4d010f8663c7
|
[
"0BSD"
] | null | null | null |
hata/ext/command_utils/pagination.py
|
WizzyBots/hata
|
f6991afc0bebf7dad932888a536f4d010f8663c7
|
[
"0BSD"
] | 1
|
2022-02-08T16:54:39.000Z
|
2022-02-08T16:54:39.000Z
|
hata/ext/command_utils/pagination.py
|
WizzyBots/hata
|
f6991afc0bebf7dad932888a536f4d010f8663c7
|
[
"0BSD"
] | null | null | null |
__all__ = ('Pagination',)
from scarletio import CancelledError, copy_docs
from ...discord import Channel
from ...discord.core import BUILTIN_EMOJIS
from ...discord.exceptions import DiscordException, ERROR_CODES
from ...discord.interaction import InteractionEvent
from ...discord.message import Message
from .bases import (
GUI_STATE_CANCELLED, GUI_STATE_CANCELLING, GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE, GUI_STATE_VALUE_TO_NAME,
PaginationBase
)
from .utils import Timeouter
class Pagination(PaginationBase):
"""
A builtin option to display paginated messages, allowing the users moving between the pages with arrow emojis.
The class allows modifications and closing it's representations for every user. Also works at private channels.
Picks up on reaction additions and on reaction deletions as well and removes the added reactions on if has
permission, which might be missing, like in DM-s.
Attributes
----------
_canceller : `None`, `function`
The function called when the ``Pagination`` is cancelled or when it expires. This is a onetime use and after
it was used, is set as `None`.
_task_flag : `int`
A flag to store the state of the ``Pagination``.
Possible values:
+---------------------------+-------+-----------------------------------------------------------------------+
| Respective name | Value | Description |
+===========================+=======+=======================================================================+
| GUI_STATE_READY | 0 | The Pagination does nothing, is ready to be used. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_PAGE | 1 | The Pagination is currently changing it's page. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLING | 2 | The pagination is currently changing it's page, but it was cancelled |
| | | meanwhile. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLED | 3 | The pagination is, or is being cancelled right now. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_CTX | 4 | The Pagination is switching context. Not used by the default class, |
| | | but expected. |
+---------------------------+-------+-----------------------------------------------------------------------+
_timeouter : `None`, ``Timeouter``
Executes the timing out feature on the ``Pagination``.
channel : ``Channel``
The channel where the ``Pagination`` is executed.
client : ``Client`` of ``Embed`` (or any compatible)
The client who executes the ``Pagination``.
message : `None`, ``Message``
The message on what the ``Pagination`` is executed.
check : `None`, `callable`
A callable what decides whether the ``Pagination`` should process a received reaction event. Defaults to
`None`.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent``, ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
> ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-------------------+-----------+
| Name | Type |
+===================+===========+
| should_process | `bool` |
+-------------------+-----------+
page_index : `int`
The current page's index.
pages : `indexable`
An indexable container, what stores the displayable contents.
timeout : `float`
The timeout of the ``Pagination`` in seconds.
Class Attributes
----------------
LEFT2 : ``Emoji`` = `BUILTIN_EMOJIS['track_previous']`
The emoji used to move to the first page.
LEFT : ``Emoji`` = `BUILTIN_EMOJIS['arrow_backward']`
The emoji used to move to the previous page.
RIGHT : ``Emoji`` = `BUILTIN_EMOJIS['arrow_forward']`
The emoji used to move on the next page.
RIGHT2 : ``Emoji`` = `BUILTIN_EMOJIS['track_next']`
The emoji used to move on the last page.
CANCEL : ``Emoji`` = `BUILTIN_EMOJIS['x']`
The emoji used to cancel the ``Pagination``.
EMOJIS : `tuple` (`Emoji`, `Emoji`, `Emoji`, `Emoji`, `Emoji`) = `(LEFT2, LEFT, RIGHT, RIGHT2, CANCEL,)`
The emojis to add on the respective message in order.
"""
LEFT2 = BUILTIN_EMOJIS['track_previous']
LEFT = BUILTIN_EMOJIS['arrow_backward']
RIGHT = BUILTIN_EMOJIS['arrow_forward']
RIGHT2 = BUILTIN_EMOJIS['track_next']
CANCEL = BUILTIN_EMOJIS['x']
EMOJIS = (LEFT2, LEFT, RIGHT, RIGHT2, CANCEL,)
__slots__ = ('check', 'page_index', 'pages', 'timeout',)
async def __new__(cls, client, channel, pages, *, timeout=240., message=None, check=None):
"""
Creates a new pagination with the given parameters.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who will execute the ``Pagination``.
channel : ``Channel``, ``Message``, ``InteractionEvent``
The channel where the ``Pagination`` will be executed. Pass it as a ``Message`` to send a reply.
If given as ``InteractionEvent``, then will acknowledge it and create a new message with it as well.
Although will not acknowledge it if `message` is given.
pages : `indexable-container`
An indexable container, what stores the displayable pages.
timeout : `float` = `240.0`, Optional (Keyword only)
The timeout of the ``Pagination`` in seconds.
message : `None`, ``Message`` = `None`, Optional (Keyword only)
The message on what the ``Pagination`` will be executed. If not given a new message will be created.
check : `None`, `callable` = `None`, Optional (Keyword only)
A callable what decides whether the ``Pagination`` should process a received reaction event.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent``, ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
Note, that ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-------------------+-----------+
| Name | Type |
+===================+===========+
| should_process | `bool` |
+-------------------+-----------+
Returns
-------
self : `None`, ``Pagination``
If `pages` is an empty container, returns `None`.
Raises
------
TypeError
`channel`'s type is incorrect.
"""
if not pages:
return None
if isinstance(channel, Channel):
target_channel = channel
received_interaction = False
elif isinstance(channel, Message):
target_channel = channel.channel
received_interaction = False
elif isinstance(channel, InteractionEvent):
target_channel = channel.channel
received_interaction = True
else:
raise TypeError(
f'`channel` can be `{Channel.__name__}`, `{Message.__name__}`, `{InteractionEvent.__name__}`, '
f'got {channel.__class__.__name__}; {channel!r}.'
)
self = object.__new__(cls)
self.check = check
self.client = client
self.channel = target_channel
self.pages = pages
self.page_index = 0
self._canceller = cls._canceller_function
self._task_flag = GUI_STATE_READY
self.message = message
self.timeout = timeout
self._timeouter = None
try:
if message is None:
if received_interaction:
if not channel.is_acknowledged():
await client.interaction_response_message_create(channel)
message = await client.interaction_followup_message_create(channel, pages[0])
else:
message = await client.message_create(channel, pages[0])
self.message = message
else:
await client.message_edit(message, pages[0])
except BaseException as err:
self.cancel(err)
if isinstance(err, GeneratorExit):
raise
if isinstance(err, ConnectionError):
return self
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
ERROR_CODES.cannot_message_user, # user has dm-s disallowed
):
return self
raise
if not target_channel.cached_permissions_for(client).can_add_reactions:
await self.cancel(PermissionError())
return self
try:
if len(self.pages)>1:
for emoji in self.EMOJIS:
await client.reaction_add(message, emoji)
else:
await client.reaction_add(message, self.CANCEL)
except BaseException as err:
self.cancel(err)
if isinstance(err, GeneratorExit):
raise
if isinstance(err, ConnectionError):
return self
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.max_reactions, # reached reaction 20, some1 is trolling us.
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return self
raise
self._timeouter = Timeouter(self, timeout=timeout)
client.events.reaction_add.append(message, self)
client.events.reaction_delete.append(message, self)
return self
@copy_docs(PaginationBase.__call__)
async def __call__(self, client, event):
if event.user.is_bot:
return
if (event.emoji not in self.EMOJIS):
return
if (event.delete_reaction_with(client) == event.DELETE_REACTION_NOT_ADDED):
return
check = self.check
if (check is not None):
try:
should_continue = check(event)
except BaseException as err:
await client.events.error(client, f'{self!r}.__call__', err)
return
if not should_continue:
return
emoji = event.emoji
task_flag = self._task_flag
if task_flag != GUI_STATE_READY:
if task_flag == GUI_STATE_SWITCHING_PAGE:
if emoji is self.CANCEL:
self._task_flag = GUI_STATE_CANCELLING
return
# ignore GUI_STATE_CANCELLED and GUI_STATE_SWITCHING_CTX
return
while True:
if emoji is self.LEFT:
page_index = self.page_index - 1
break
if emoji is self.RIGHT:
page_index = self.page_index + 1
break
if emoji is self.CANCEL:
self._task_flag = GUI_STATE_CANCELLED
self.cancel()
try:
await client.message_delete(self.message)
except BaseException as err:
self.cancel(err)
if isinstance(err, GeneratorExit):
raise
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.missing_access, # client removed
):
return
await client.events.error(client, f'{self!r}.__call__', err)
return
else:
self.cancel()
return
if emoji is self.LEFT2:
page_index = 0
break
if emoji is self.RIGHT2:
page_index = len(self.pages) - 1
break
return
if page_index < 0:
page_index = 0
elif page_index >= len(self.pages):
page_index = len(self.pages) - 1
if self.page_index == page_index:
return
self.page_index = page_index
self._task_flag = GUI_STATE_SWITCHING_PAGE
try:
await client.message_edit(self.message, self.pages[page_index])
except BaseException as err:
self.cancel(err)
if isinstance(err, GeneratorExit):
raise
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
):
return
# We definitely do not want to silence `ERROR_CODES.invalid_form_body`
await client.events.error(client, f'{self!r}.__call__', err)
return
if self._task_flag == GUI_STATE_CANCELLING:
self.cancel(CancelledError())
return
self._task_flag = GUI_STATE_READY
timeouter = self._timeouter
if (timeouter is not None):
timeouter.set_timeout(self.timeout)
@copy_docs(PaginationBase.__repr__)
def __repr__(self):
repr_parts = [
'<', self.__class__.__name__,
' client=', repr(self.client),
', channel=', repr(self.channel),
', state='
]
task_flag = self._task_flag
repr_parts.append(repr(task_flag))
repr_parts.append(' (')
task_flag_name = GUI_STATE_VALUE_TO_NAME[task_flag]
repr_parts.append(task_flag_name)
repr_parts.append(')')
# Third party things go here
repr_parts.append(', pages=')
repr_parts.append(repr(len(self.pages)))
repr_parts.append(', page_index=')
repr_parts.append(repr(self.page_index))
repr_parts.append('>')
return ''.join(repr_parts)
| 40.590476
| 117
| 0.486039
|
b23c886b3ae022746f64c7c35f1a1fd94d837fe4
| 13,942
|
py
|
Python
|
env/lib/python3.6/site-packages/scipy/sparse/dia.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 69
|
2020-03-31T06:40:17.000Z
|
2022-02-25T11:48:18.000Z
|
venv/lib/python3.7/site-packages/scipy/sparse/dia.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 12
|
2018-12-06T22:06:49.000Z
|
2022-02-25T17:40:44.000Z
|
venv/lib/python3.7/site-packages/scipy/sparse/dia.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 28
|
2019-03-22T01:07:13.000Z
|
2022-02-21T16:38:27.000Z
|
"""Sparse DIAgonal format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['dia_matrix', 'isspmatrix_dia']
import numpy as np
from .base import isspmatrix, _formats, spmatrix
from .data import _data_matrix
from .sputils import (isshape, upcast_char, getdtype, get_index_dtype,
get_sum_dtype, validateaxis, check_shape)
from ._sparsetools import dia_matvec
class dia_matrix(_data_matrix):
"""Sparse matrix with DIAgonal storage
This can be instantiated in several ways:
dia_matrix(D)
with a dense matrix
dia_matrix(S)
with another sparse matrix S (equivalent to S.todia())
dia_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N),
dtype is optional, defaulting to dtype='d'.
dia_matrix((data, offsets), shape=(M, N))
where the ``data[k,:]`` stores the diagonal entries for
diagonal ``offsets[k]`` (See example below)
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
DIA format data array of the matrix
offsets
DIA format offset array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dia_matrix
>>> dia_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
>>> offsets = np.array([0, -1, 2])
>>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
format = 'dia'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix_dia(arg1):
if copy:
arg1 = arg1.copy()
self.data = arg1.data
self.offsets = arg1.offsets
self._shape = check_shape(arg1.shape)
elif isspmatrix(arg1):
if isspmatrix_dia(arg1) and copy:
A = arg1.copy()
else:
A = arg1.todia()
self.data = A.data
self.offsets = A.offsets
self._shape = check_shape(A.shape)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self._shape = check_shape(arg1)
self.data = np.zeros((0,0), getdtype(dtype, default=float))
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.offsets = np.zeros((0), dtype=idx_dtype)
else:
try:
# Try interpreting it as (data, offsets)
data, offsets = arg1
except:
raise ValueError('unrecognized form for dia_matrix constructor')
else:
if shape is None:
raise ValueError('expected a shape argument')
self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
self.offsets = np.atleast_1d(np.array(arg1[1],
dtype=get_index_dtype(maxval=max(shape)),
copy=copy))
self._shape = check_shape(shape)
else:
#must be dense, convert to COO first, then to DIA
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
A = coo_matrix(arg1, dtype=dtype, shape=shape).todia()
self.data = A.data
self.offsets = A.offsets
self._shape = check_shape(A.shape)
if dtype is not None:
self.data = self.data.astype(dtype)
#check format
if self.offsets.ndim != 1:
raise ValueError('offsets array must have rank 1')
if self.data.ndim != 2:
raise ValueError('data array must have rank 2')
if self.data.shape[0] != len(self.offsets):
raise ValueError('number of diagonals (%d) '
'does not match the number of offsets (%d)'
% (self.data.shape[0], len(self.offsets)))
if len(np.unique(self.offsets)) != len(self.offsets):
raise ValueError('offset array contains duplicate values')
def __repr__(self):
format = _formats[self.getformat()][1]
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements (%d diagonals) in %s format>" % \
(self.shape + (self.dtype.type, self.nnz, self.data.shape[0],
format))
def _data_mask(self):
"""Returns a mask of the same shape as self.data, where
mask[i,j] is True when data[i,j] corresponds to a stored element."""
num_rows, num_cols = self.shape
offset_inds = np.arange(self.data.shape[1])
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
return mask
def count_nonzero(self):
mask = self._data_mask()
return np.count_nonzero(self.data[mask])
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for DIA format")
M,N = self.shape
nnz = 0
for k in self.offsets:
if k > 0:
nnz += min(M,N-k)
else:
nnz += min(M+k,N)
return int(nnz)
getnnz.__doc__ = spmatrix.getnnz.__doc__
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def sum(self, axis=None, dtype=None, out=None):
validateaxis(axis)
if axis is not None and axis < 0:
axis += 2
res_dtype = get_sum_dtype(self.dtype)
num_rows, num_cols = self.shape
ret = None
if axis == 0:
mask = self._data_mask()
x = (self.data * mask).sum(axis=0)
if x.shape[0] == num_cols:
res = x
else:
res = np.zeros(num_cols, dtype=x.dtype)
res[:x.shape[0]] = x
ret = np.matrix(res, dtype=res_dtype)
else:
row_sums = np.zeros(num_rows, dtype=res_dtype)
one = np.ones(num_cols, dtype=res_dtype)
dia_matvec(num_rows, num_cols, len(self.offsets),
self.data.shape[1], self.offsets, self.data, one, row_sums)
row_sums = np.matrix(row_sums)
if axis is None:
return row_sums.sum(dtype=dtype, out=out)
if axis is not None:
row_sums = row_sums.T
ret = np.matrix(row_sums.sum(axis=axis))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _mul_vector(self, other):
x = other
y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
x.dtype.char))
L = self.data.shape[1]
M,N = self.shape
dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel())
return y
def _mul_multimatrix(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def _setdiag(self, values, k=0):
M, N = self.shape
if values.ndim == 0:
# broadcast
values_n = np.inf
else:
values_n = len(values)
if k < 0:
n = min(M + k, N, values_n)
min_index = 0
max_index = n
else:
n = min(M, N - k, values_n)
min_index = k
max_index = k + n
if values.ndim != 0:
# allow also longer sequences
values = values[:n]
if k in self.offsets:
self.data[self.offsets == k, min_index:max_index] = values
else:
self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
m = max(max_index, self.data.shape[1])
data = np.zeros((self.data.shape[0]+1, m), dtype=self.data.dtype)
data[:-1,:self.data.shape[1]] = self.data
data[-1, min_index:max_index] = values
self.data = data
def todia(self, copy=False):
if copy:
return self.copy()
else:
return self
todia.__doc__ = spmatrix.todia.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
num_rows, num_cols = self.shape
max_dim = max(self.shape)
# flip diagonal offsets
offsets = -self.offsets
# re-align the data matrix
r = np.arange(len(offsets), dtype=np.intc)[:, None]
c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None]
pad_amount = max(0, max_dim-self.data.shape[1])
data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
dtype=self.data.dtype)))
data = data[r, c]
return dia_matrix((data, offsets), shape=(
num_cols, num_rows), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
idx, = np.where(self.offsets == k)
first_col, last_col = max(0, k), min(rows + k, cols)
if idx.size == 0:
return np.zeros(last_col - first_col, dtype=self.data.dtype)
return self.data[idx[0], first_col:last_col]
diagonal.__doc__ = spmatrix.diagonal.__doc__
def tocsc(self, copy=False):
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
idx_dtype = get_index_dtype(maxval=max(self.shape))
indptr = np.zeros(num_cols + 1, dtype=idx_dtype)
indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0))
indptr[offset_len+1:] = indptr[offset_len]
indices = row.T[mask.T].astype(idx_dtype, copy=False)
data = self.data.T[mask.T]
return csc_matrix((data, indices, indptr), shape=self.shape,
dtype=self.dtype)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=False):
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
row = row[mask]
col = np.tile(offset_inds, num_offsets)[mask.ravel()]
data = self.data[mask]
from .coo import coo_matrix
A = coo_matrix((data,(row,col)), shape=self.shape, dtype=self.dtype)
A.has_canonical_format = True
return A
tocoo.__doc__ = spmatrix.tocoo.__doc__
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays are copied.
"""
if copy:
return dia_matrix((data, self.offsets.copy()), shape=self.shape)
else:
return dia_matrix((data,self.offsets), shape=self.shape)
def resize(self, *shape):
shape = check_shape(shape)
M, N = shape
# we do not need to handle the case of expanding N
self.data = self.data[:, :N]
if (M > self.shape[0] and
np.any(self.offsets + self.shape[0] < self.data.shape[1])):
# explicitly clear values that were previously hidden
mask = (self.offsets[:, None] + self.shape[0] <=
np.arange(self.data.shape[1]))
self.data[mask] = 0
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
def isspmatrix_dia(x):
"""Is x of dia_matrix type?
Parameters
----------
x
object to check for being a dia matrix
Returns
-------
bool
True if x is a dia matrix, False otherwise
Examples
--------
>>> from scipy.sparse import dia_matrix, isspmatrix_dia
>>> isspmatrix_dia(dia_matrix([[5]]))
True
>>> from scipy.sparse import dia_matrix, csr_matrix, isspmatrix_dia
>>> isspmatrix_dia(csr_matrix([[5]]))
False
"""
return isinstance(x, dia_matrix)
| 33.11639
| 99
| 0.548056
|
ce7162142f86944cb15789ea1a26058839c19347
| 1,538
|
py
|
Python
|
scripts/hoi4/terrain_map.py
|
SaucyPigeon/pyradox
|
a500a5628f57e056fa019ba1e114118abe6dc205
|
[
"MIT"
] | null | null | null |
scripts/hoi4/terrain_map.py
|
SaucyPigeon/pyradox
|
a500a5628f57e056fa019ba1e114118abe6dc205
|
[
"MIT"
] | null | null | null |
scripts/hoi4/terrain_map.py
|
SaucyPigeon/pyradox
|
a500a5628f57e056fa019ba1e114118abe6dc205
|
[
"MIT"
] | null | null | null |
import hoi4
import csv
import os
import re
import collections
import pyradox
definition_csv = os.path.join(pyradox.get_game_directory('HoI4'), 'map', 'definition.csv')
terrains = pyradox.txt.parse_file(os.path.join(pyradox.get_game_directory('HoI4'), 'common', 'terrain', '00_terrain.txt'), verbose=False)['categories']
color_override = {
'desert' : (255, 63, 0), # more red to avoid confusion with plains
}
symbol_override = {
'desert' : '⛭',
'hills' : '△',
'mountain' : '▲',
'ocean' : '~',
'lakes' : '',
'marsh' : '⚶',
'forest' : '♧',
'jungle' : '♣',
'plains' : '',
'urban' : '⚑',
'unknown' : '',
}
colormap = {}
textmap = {}
with open(definition_csv) as definition_file:
csv_reader = csv.reader(definition_file, delimiter = ';')
for row in csv_reader:
province_id = int(row[0])
terrain_key = row[6]
if terrain_key in color_override:
colormap[province_id] = color_override[terrain_key]
else:
colormap[province_id] = tuple(c for c in terrains[terrain_key]['color'])
textmap[province_id] = symbol_override[terrain_key]
province_map = pyradox.worldmap.ProvinceMap(game = 'HoI4')
out = province_map.generate_image(colormap, default_land_color=(255, 255, 255))
province_map.overlay_text(out, textmap, fontfile = "unifont-8.0.01.ttf", fontsize = 16, antialias = False, default_offset = (4, -2))
pyradox.image.save_using_palette(out, 'out/terrain_map.png')
| 30.76
| 152
| 0.628739
|
8765f78dabe0408b16f456e8ee232e81fbcf6478
| 517
|
py
|
Python
|
invenio_rdm_records/proxies.py
|
wgresshoff/invenio-rdm-records
|
91945829884ea4e46b05be26c97f11ffd045bcec
|
[
"MIT"
] | null | null | null |
invenio_rdm_records/proxies.py
|
wgresshoff/invenio-rdm-records
|
91945829884ea4e46b05be26c97f11ffd045bcec
|
[
"MIT"
] | 1
|
2020-10-28T16:32:43.000Z
|
2021-04-27T11:46:28.000Z
|
invenio_rdm_records/proxies.py
|
wgresshoff/invenio-rdm-records
|
91945829884ea4e46b05be26c97f11ffd045bcec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Helper proxy to the state object."""
from flask import current_app
from werkzeug.local import LocalProxy
current_rdm_records = LocalProxy(
lambda: current_app.extensions['invenio-rdm-records']
)
"""Helper proxy to get the current App ILS extension."""
| 28.722222
| 77
| 0.742747
|
8eff23305225acfbc7c3093e2ea2192bad05d4b3
| 1,309
|
py
|
Python
|
tests/test_dialog.py
|
Sab0tag3d/pyppeteer
|
5edabb3e25d72f4d1a90f0ed77f1981b2479c8ae
|
[
"MIT"
] | 3,747
|
2017-08-31T12:31:42.000Z
|
2022-03-31T07:31:16.000Z
|
tests/test_dialog.py
|
Sab0tag3d/pyppeteer
|
5edabb3e25d72f4d1a90f0ed77f1981b2479c8ae
|
[
"MIT"
] | 284
|
2017-09-03T19:02:13.000Z
|
2020-05-06T03:34:36.000Z
|
tests/test_dialog.py
|
Sab0tag3d/pyppeteer
|
5edabb3e25d72f4d1a90f0ed77f1981b2479c8ae
|
[
"MIT"
] | 487
|
2017-09-03T16:22:40.000Z
|
2022-03-22T13:23:05.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from syncer import sync
from .base import BaseTestCase
class TestDialog(BaseTestCase):
@sync
async def test_alert(self):
def dialog_test(dialog):
self.assertEqual(dialog.type, 'alert')
self.assertEqual(dialog.defaultValue, '')
self.assertEqual(dialog.message, 'yo')
asyncio.ensure_future(dialog.accept())
self.page.on('dialog', dialog_test)
await self.page.evaluate('() => alert("yo")')
@sync
async def test_prompt(self):
def dialog_test(dialog):
self.assertEqual(dialog.type, 'prompt')
self.assertEqual(dialog.defaultValue, 'yes.')
self.assertEqual(dialog.message, 'question?')
asyncio.ensure_future(dialog.accept('answer!'))
self.page.on('dialog', dialog_test)
answer = await self.page.evaluate('() => prompt("question?", "yes.")')
self.assertEqual(answer, 'answer!')
@sync
async def test_prompt_dismiss(self):
def dismiss_test(dialog, *args):
asyncio.ensure_future(dialog.dismiss())
self.page.on('dialog', dismiss_test)
result = await self.page.evaluate('() => prompt("question?", "yes.")')
self.assertIsNone(result)
| 32.725
| 78
| 0.619557
|
8b25424f5a5d9ddc44a1124c576b67341fd77d16
| 846
|
py
|
Python
|
chap6/people_info.py
|
wikilike7/python-crash-course
|
85cd7a2ab6e43a554c282b6e0c1c44c415cca3a3
|
[
"MIT"
] | null | null | null |
chap6/people_info.py
|
wikilike7/python-crash-course
|
85cd7a2ab6e43a554c282b6e0c1c44c415cca3a3
|
[
"MIT"
] | null | null | null |
chap6/people_info.py
|
wikilike7/python-crash-course
|
85cd7a2ab6e43a554c282b6e0c1c44c415cca3a3
|
[
"MIT"
] | 1
|
2019-03-05T09:31:27.000Z
|
2019-03-05T09:31:27.000Z
|
# 6.1
people_info = {
'first_name': 'shichao',
'last_name': 'wang',
'age': '21',
'city': 'nanjing'
}
for person in people_info:
print(person.title() + ': ' + people_info[person].title())
# 6.2
favorite_numbers = {
'zibba': '13',
'Andy': '1',
'Amy': '2',
}
for favorite_number in favorite_numbers:
print(favorite_number.title() + '\'s favorite number is ' + favorite_numbers[favorite_number].title())
# 6.3
vocabularys = {
'variable': 'present a placehoder or container to fill in data',
'Array': 'an orded data set',
'Tuple': 'same with Array but can\'t changed',
'Data type': 'present what kind of the data, like integer, string, float',
'condition statement': 'if condition',
}
for vocabulary in vocabularys:
print(vocabulary.title() + ': ' + vocabularys[vocabulary].title())
| 24.171429
| 106
| 0.632388
|
28ec1552de66fbf62e0108246ca5cdf2bd14a916
| 1,062
|
py
|
Python
|
scripts/ingestors/ncdc/xcheck_ghcn_stations.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | 1
|
2019-10-07T17:01:24.000Z
|
2019-10-07T17:01:24.000Z
|
scripts/ingestors/ncdc/xcheck_ghcn_stations.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
scripts/ingestors/ncdc/xcheck_ghcn_stations.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
"""Compare what we have for stations and what NCEI has for GHCN"""
from __future__ import print_function
import sys
import pandas as pd
from pyiem.network import Table as NetworkTable
def read_table(state):
"""Load up what NCEI has"""
rows = []
for line in open('ghcnd-stations.txt'):
if not line.startswith("US") or line[38:40] != state:
continue
fullid = line[:11]
name = line[41:71].strip()
rows.append(dict(name=name, fullid=fullid, lastfour=fullid[-4:]))
return pd.DataFrame(rows)
def main(argv):
"""Can we do it?"""
nt = NetworkTable("%sCLIMATE" % (argv[1], ))
ncei = read_table(argv[1])
for sid in nt.sts:
if sid[2] == 'C' or sid[-4:] == '0000':
continue
df = ncei[ncei['fullid'] == nt.sts[sid]['ncdc81']]
if len(df.index) == 1:
continue
print(("Resolve Conflict: iem: %s %s ncdc81: %s ncei: %s"
) % (sid, nt.sts[sid]['name'], nt.sts[sid]['ncdc81'], df))
if __name__ == '__main__':
main(sys.argv)
| 28.702703
| 73
| 0.576271
|
381da616768ff593aa1e45633c03aaf1d3e3f089
| 286
|
py
|
Python
|
django/social_network/feed/admin.py
|
sixfwa/django-examples
|
4da7f9d255e622482a8562f0eeb0417d623c9385
|
[
"MIT"
] | null | null | null |
django/social_network/feed/admin.py
|
sixfwa/django-examples
|
4da7f9d255e622482a8562f0eeb0417d623c9385
|
[
"MIT"
] | 24
|
2021-03-19T12:01:04.000Z
|
2022-02-10T12:21:49.000Z
|
django/social_network/feed/admin.py
|
sixfwa/django-examples
|
4da7f9d255e622482a8562f0eeb0417d623c9385
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ("id", "content", "author", "published_date")
list_filter = ("author",)
list_per_page = 25
admin.site.register(Post, PostAdmin)
| 15.888889
| 64
| 0.706294
|
a8b393545b4172630d018ea2430b0ae2615f6fed
| 3,937
|
py
|
Python
|
src/programy/clients/sanicrest.py
|
ItsPhant/program-y
|
c2b211fcaf8cedc7d6d95a8ea9470a913efa1622
|
[
"MIT"
] | null | null | null |
src/programy/clients/sanicrest.py
|
ItsPhant/program-y
|
c2b211fcaf8cedc7d6d95a8ea9470a913efa1622
|
[
"MIT"
] | null | null | null |
src/programy/clients/sanicrest.py
|
ItsPhant/program-y
|
c2b211fcaf8cedc7d6d95a8ea9470a913efa1622
|
[
"MIT"
] | 1
|
2020-02-21T17:58:05.000Z
|
2020-02-21T17:58:05.000Z
|
"""
Copyright (c) 2016-17 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#
# curl 'http://localhost:5000/api/v1.0/ask?question=hello+world&sessionid=1234567890'
#
##############################################################
# IMPORTANT
# Sanic is not supported on windows due to a dependency on
# uvloop. This code will not run on Windows
#
import logging
from sanic import Sanic
from sanic.response import json
from sanic.exceptions import ServerError
from programy.clients.rest import RestBotClient
class SanicRestBotClient(RestBotClient):
def __init__(self, argument_parser=None):
RestBotClient.__init__(self, "SanicRest", argument_parser)
def get_api_key(self, rest_request):
if 'apikey' not in rest_request.raw_args or rest_request.raw_args['apikey'] is None:
return None
return rest_request.raw_args['apikey']
def server_abort(self, message, status_code):
raise ServerError(message, status_code=status_code)
def get_question(self, rest_request):
if 'question' not in rest_request.raw_args or rest_request.raw_args['question'] is None:
print("'question' missing from rest_request")
if logging.getLogger().isEnabledFor(logging.ERROR):
logging.error("'question' missing from rest_request")
self.server_abort("'question' missing from rest_request", 500)
return rest_request.raw_args['question']
def get_sessionid(self, rest_request):
if 'sessionid' not in rest_request.raw_args or rest_request.raw_args['sessionid'] is None:
print("'sessionid' missing from rest_request")
if logging.getLogger().isEnabledFor(logging.ERROR):
logging.error("'sessionid' missing from rest_request")
self.server_abort("'sessionid' missing from rest_request", 500)
return rest_request.raw_args['sessionid']
REST_CLIENT = None
print("Initiating REST Service...")
APP = Sanic()
@APP.route('/api/v1.0/ask', methods=['GET'])
async def ask(request):
response, status = REST_CLIENT.process_request(request)
return json(response, status=status)
if __name__ == '__main__':
print("Loading, please wait...")
REST_CLIENT = SanicRestBotClient()
def run():
print("REST Client running on %s:%s" % (REST_CLIENT.configuration.client_configuration.host,
REST_CLIENT.configuration.client_configuration.port))
if REST_CLIENT.configuration.client_configuration.debug is True:
print("REST Client running in debug mode")
APP.run(host=REST_CLIENT.configuration.client_configuration.host,
port=REST_CLIENT.configuration.client_configuration.port,
debug=REST_CLIENT.configuration.client_configuration.debug,
workers=REST_CLIENT.configuration.client_configuration.workers)
run()
| 41.882979
| 120
| 0.715011
|
c022cec9261506341d84fbf4d3bf11a10045a971
| 936
|
py
|
Python
|
examples/plotting/server/lorenz.py
|
tswicegood/bokeh
|
2e74be5c9288306896e8c76af2e14a8c7513e0e3
|
[
"BSD-3-Clause"
] | 2
|
2015-07-23T21:19:52.000Z
|
2016-01-25T17:00:15.000Z
|
examples/plotting/server/lorenz.py
|
csaid/bokeh
|
4312b2de1a15fb24884fcd97eaf6442bf8b4bd7b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plotting/server/lorenz.py
|
csaid/bokeh
|
4312b2de1a15fb24884fcd97eaf6442bf8b4bd7b
|
[
"BSD-3-Clause"
] | 2
|
2015-12-22T04:13:10.000Z
|
2021-07-06T21:18:04.000Z
|
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
from scipy.integrate import odeint
from bokeh.plotting import *
sigma = 10
rho = 28
beta = 8.0/3
theta = 3 * np.pi / 4
def lorenz(xyz, t):
x, y, z = xyz
x_dot = sigma * (y - x)
y_dot = x * rho - x * z - y
z_dot = x * y - beta* z
return [x_dot, y_dot, z_dot]
initial = (-10, -7, 35)
t = np.arange(0, 100, 0.001)
solution = odeint(lorenz, initial, t)
x = solution[:, 0]
y = solution[:, 1]
z = solution[:, 2]
xprime = np.cos(theta) * x - np.sin(theta) * y
colors = ["#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B",]
output_server("lorenz")
multi_line(np.array_split(xprime, 7), np.array_split(z, 7),
line_color=colors, line_alpha=0.8, line_width=1.5,
tools="pan,wheel_zoom,box_zoom,reset,previewsave", title="lorenz example")
show() # open a browser
| 23.4
| 87
| 0.622863
|
a63fbcd877db10d43233565c64f94e78094bd3bc
| 1,486
|
py
|
Python
|
chrome/installer/mini_installer/generate_previous_version_mini_installer.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
chrome/installer/mini_installer/generate_previous_version_mini_installer.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
chrome/installer/mini_installer/generate_previous_version_mini_installer.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a mini_installer with a lower version than an existing one."""
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--alternate_version_generator',
help='Path to alternate_version_generator.')
parser.add_argument('--mini_installer', help='Path to input mini_installer')
parser.add_argument('--out', help='Path to the generated mini_installer.')
parser.add_argument('--path_7za', help='Path to 7za.exe')
args = parser.parse_args()
assert args.alternate_version_generator
assert args.mini_installer
assert args.out
assert args.path_7za
cmd = [args.alternate_version_generator,
'--force',
'--previous',
'--mini_installer=' + args.mini_installer,
'--out=' + args.out,
'--7za_path=' + args.path_7za,]
try:
# Run |cmd|, redirecting stderr to stdout in order for captured errors to be
# inline with corresponding stdout.
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise Exception("Error while running cmd: %s\n"
"Exit code: %s\n"
"Command output:\n%s" %
(e.cmd, e.returncode, e.output))
if '__main__' == __name__:
sys.exit(main())
| 33.022222
| 80
| 0.681696
|
879cea79e4099c550b6956a218c33a8c7eee4c9b
| 3,393
|
py
|
Python
|
guillotina/schema/__init__.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/schema/__init__.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina/schema/__init__.py
|
diefenbach/guillotina
|
a8c7247fca8294752901f643b35c5ed1c5dee76d
|
[
"BSD-2-Clause"
] | null | null | null |
# XXX INFO
# This package is pulled out of guillotina.schema to give guillotina more control
# over our use of fields(async) and to also provide a nicer api and less dependencies
# in order to work with guillotina
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
##############################################################################
from guillotina.schema._bootstrapinterfaces import NO_VALUE
from guillotina.schema._field import ASCII
from guillotina.schema._field import ASCIILine
from guillotina.schema._field import Bool
from guillotina.schema._field import Bytes
from guillotina.schema._field import BytesLine
from guillotina.schema._field import Choice
from guillotina.schema._field import Container
from guillotina.schema._field import Date
from guillotina.schema._field import Datetime
from guillotina.schema._field import Decimal
from guillotina.schema._field import Dict
from guillotina.schema._field import DottedName
from guillotina.schema._field import Field
from guillotina.schema._field import Float
from guillotina.schema._field import FrozenSet
from guillotina.schema._field import Id
from guillotina.schema._field import Int
from guillotina.schema._field import InterfaceField
from guillotina.schema._field import Iterable
from guillotina.schema._field import JSONField
from guillotina.schema._field import List
from guillotina.schema._field import MinMaxLen
from guillotina.schema._field import NativeString
from guillotina.schema._field import NativeStringLine
from guillotina.schema._field import Object
from guillotina.schema._field import Orderable
from guillotina.schema._field import Password
from guillotina.schema._field import Set
from guillotina.schema._field import SourceText
from guillotina.schema._field import Text
from guillotina.schema._field import TextLine
from guillotina.schema._field import Time
from guillotina.schema._field import Timedelta
from guillotina.schema._field import Tuple
from guillotina.schema._field import URI
from guillotina.schema._schema import get_fields
from guillotina.schema._schema import get_fields_in_order
from guillotina.schema._schema import getFieldNames
from guillotina.schema._schema import getFieldNamesInOrder
from guillotina.schema._schema import getSchemaValidationErrors
from guillotina.schema._schema import getValidationErrors
from guillotina.schema.accessors import accessors
from guillotina.schema.exceptions import ValidationError
getFields = get_fields # b/w
getFieldsInOrder = get_fields_in_order # b/w
# pep 8 friendlyness
ASCII, ASCIILine, Bool, Bytes, BytesLine, Choice, Container, Date, Datetime
Decimal, Dict, DottedName, Field, Float, FrozenSet, Id, Int, InterfaceField
Iterable, List, MinMaxLen, NativeString, NativeStringLine, Object, Orderable
Password, Set, SourceText, Text, TextLine, Time, Timedelta, Tuple, URI
get_fields, get_fields_in_order, getFieldNames, getFieldNamesInOrder,
getValidationErrors, getSchemaValidationErrors, JSONField
accessors
ValidationError
NO_VALUE
| 44.644737
| 85
| 0.804008
|
cfb1d1aa1e616e6ac8d5a961b08e0a5c7f0839db
| 658
|
py
|
Python
|
braid/package.py
|
alex/braid
|
63016647ab975e56680704df041d3b0f5d4e201c
|
[
"MIT"
] | 1
|
2015-11-08T13:02:34.000Z
|
2015-11-08T13:02:34.000Z
|
braid/package.py
|
alex/braid
|
63016647ab975e56680704df041d3b0f5d4e201c
|
[
"MIT"
] | null | null | null |
braid/package.py
|
alex/braid
|
63016647ab975e56680704df041d3b0f5d4e201c
|
[
"MIT"
] | null | null | null |
from braid.api import sudo, abort
from braid.info import distroFamily
def update():
"""
Update package list.
"""
if distroFamily() == 'debian':
sudo('/usr/bin/apt-get update')
elif distroFamily() == 'fedora':
# Automatic
pass
else:
abort('Unknown distro.')
def install(packages):
"""
Install a list of packages.
"""
if distroFamily() == 'debian':
sudo('/usr/bin/apt-get --yes --quiet install {}'.format(" ".join(packages)))
elif distroFamily() == 'fedora':
sudo('/usr/bin/yum install -y {}'.format(" ".join(packages)))
else:
abort('Unknown distro.')
| 23.5
| 84
| 0.569909
|
3d856d4bb5a0e1be84105869419bb01581b1339d
| 1,719
|
py
|
Python
|
samples/snippets/simple_app.py
|
KoffieLabs/python-bigquery
|
33b317abdc6d69f33722cb0504bb0b78c1c80e30
|
[
"Apache-2.0"
] | 1
|
2022-03-25T21:07:44.000Z
|
2022-03-25T21:07:44.000Z
|
samples/snippets/simple_app.py
|
abecerrilsalas/python-bigquery
|
8da4fa9e77bcfd2b68818b5d65b38ccc59899a01
|
[
"Apache-2.0"
] | null | null | null |
samples/snippets/simple_app.py
|
abecerrilsalas/python-bigquery
|
8da4fa9e77bcfd2b68818b5d65b38ccc59899a01
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple application that performs a query with BigQuery."""
# [START bigquery_simple_app_all]
# [START bigquery_simple_app_deps]
from google.cloud import bigquery
# [END bigquery_simple_app_deps]
def query_stackoverflow() -> None:
# [START bigquery_simple_app_client]
client = bigquery.Client()
# [END bigquery_simple_app_client]
# [START bigquery_simple_app_query]
query_job = client.query(
"""
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10"""
)
results = query_job.result() # Waits for job to complete.
# [END bigquery_simple_app_query]
# [START bigquery_simple_app_print]
for row in results:
print("{} : {} views".format(row.url, row.view_count))
# [END bigquery_simple_app_print]
if __name__ == "__main__":
query_stackoverflow()
# [END bigquery_simple_app_all]
| 31.254545
| 74
| 0.700407
|
eab68e72a961f36a34226a4d014217217815bced
| 8,826
|
py
|
Python
|
aem2segy/aem2segy.py
|
RichardScottOZ/AEM2SEG-Y
|
1d94ca7fd1f66e1da0aad5ac9b3b0bf85b6ac0ab
|
[
"Apache-2.0"
] | null | null | null |
aem2segy/aem2segy.py
|
RichardScottOZ/AEM2SEG-Y
|
1d94ca7fd1f66e1da0aad5ac9b3b0bf85b6ac0ab
|
[
"Apache-2.0"
] | null | null | null |
aem2segy/aem2segy.py
|
RichardScottOZ/AEM2SEG-Y
|
1d94ca7fd1f66e1da0aad5ac9b3b0bf85b6ac0ab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Created on 8/5/2019
@author: Neil Symington
Functions for converting the aseg gdf data to seg-y
'''
import numpy as np
import ast
from scipy import interpolate
# Define a function for parsing the control file
# From https://stackoverflow.com/questions/715417/converting-from-a-string-to-boolean-in-python
def to_bool(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ...
:param value: string
:return: boolean: True or False
"""
if str(value).lower() in ("yes", "y", "true", "t", "1"): return True
if str(value).lower() in ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"): return False
raise Exception('Invalid value for boolean conversion: ' + str(value))
def RepresentsInt(s):
"""
check if a string can be represented by an interger
:param s: string
:return: Boolean
"""
try:
int(s)
return True
except ValueError:
return False
def parse_control_file(infile):
"""
A function for parsing the control file
:param infile: path to control file
:return:
dictionary with key infomation needed to convert
"""
# Create diciotnary
var_dict = {}
# OPen the file
with open(infile, 'r') as f:
# Iterate through the lines in the file
for line in f:
s = line.strip()
# Pass for empty lines
if len(s.strip()) == 0:
pass
# Pass for comments
elif s.strip()[0] == '#':
pass
# Otherwise split the string on the equals and add to the dictionary with the key word as the key
else:
l = s.split('=')
var_dict[l[0].strip()] = l[1].strip()
return var_dict
def listify_data_columns(string):
"""
Take a string representing a range of integer values (e.g. 43-72) and create a pythonic range
:param string:
:return:
"""
d1 = int(string.split('-')[0])
d2 = int(string.split('-')[1])
return range(d1, d2+1)
def check_range_string(s):
"""
Check if a string is a valid range of type "34-67"
:param s: string
:return: boolean
"""
L = s.split("-")
if (len(L) != 2):
return False
elif (RepresentsInt(L[0])) & (RepresentsInt(L[1])):
return True
else:
return False
def parse_AEM(AEM_file, var_dict):
"""
This function parses the AEM asci file
:param AEM_file: path to file
:param var_dict: dictionary with column information
:return: dictionary with numpy arrays for numerical data with keyword from var_dict as the key
"""
# Dictionary for column numbers
col_dict = {}
data_dict = {}
# Find the column indices and rewrite them into a dictionary as lists
# WE will use this dictionary to extract the data
columns = ['easting', 'northing', 'elevation', 'fiducial', 'depth_of_investigation', 'data', 'depth_top']
# Flags
depth_top_in_file = True
# Iterate through columns
for item in columns[:-2]:
try:
# Extract entry from dictionary
entry = var_dict[item]
# check the data type is a string, integer or another
if type(entry) == int:
col_dict[item] = entry
elif type(entry) == str:
# get from header file
if RepresentsInt(entry):
col_dict[item] = entry
else:
print "Invalid string entry for ", item
except KeyError:
if item == 'depth_of_investigation':
data_dict[item] = None
else:
print "Please create a valid control file entry for variable ", item
return None
for item in columns[-2:]:
try:
# Extract entry from dictionary
entry = var_dict[item]
# check the data type is a string with a range or mapped to the .hdr file
if type(entry) == str:
# Checck if it is a valid range
if check_range_string(entry):
col_dict[item] = listify_data_columns(entry)
else:
# Raise flag
depth_top_in_file = False
col_dict[item] = np.array(ast.literal_eval(entry))
except KeyError:
print "Please create a valid control file entry for variable ", item
# Convert to pythonic indexing
first_col = 1
# Search for first_col keyword in case it has been included
if 'first_col' in var_dict.keys():
first_col = int(var_dict['first_col'])
t = (2 - first_col)
# Now subtract the value to all list elements in the cols directory
for item in columns[:-2]:
# Get the columns
cols = int(col_dict[item]) - t
# Extract as a numpy array
data_dict[item] = np.loadtxt(AEM_file, usecols= cols)
# Get the data cols
cols = [int(x) - t for x in col_dict['data']]
data_dict['data'] = np.loadtxt(AEM_file, usecols=cols)
# If data is resistivity, convert to conductivity
if to_bool(var_dict['resistivity']):
data_dict['data'] = 1./data_dict['data']
# Multiply data by the scaling factor
data_dict['data'] = data_dict['data'] * np.float(var_dict['scaling_factor'])
# If the depth tops are in the file extract
if depth_top_in_file:
cols = [x - t for x in col_dict['depth_top']]
# Extract and tile
data_dict['depth_top'] = np.loadtxt(AEM_file, usecols = cols)
else:
# Otherwise extract the parsed list and tile it to fit the data
data_dict['depth_top'] = np.tile(np.array(col_dict['depth_top']),
(data_dict['data'].shape[0],1))
# Assert that the depth top and data array are the same shape
assert data_dict['depth_top'].shape == data_dict['data'].shape
return data_dict
# Function for nulling all values below the doi
def remove_below_doi(interpolated_data, z_new, doi, elevation):
"""
:param interpolated_data: numpy array with interpolated data
:param z_new: new elevation intervals for segy trace
:param doi: float with fiducial depth of investigation
:param elevation: float fiducial with elevation
:return:
interpolated_data with below doi values changed to -1.
"""
doi_elevation = -1 * (elevation - doi)
# Find the indices that are below the depth of investigation
interpolated_data[np.where(z_new > doi_elevation)] = -1
return interpolated_data
# Interpolate so that we have a continuously spaced data
def interpolate_layer_data(depth_top, z_new, dat, elev, max_depth, datum):
# First find layer bottom (by adding a small delta d)
depth_bottom = depth_top[1:] - 0.01
# Now add the layer tops and bottoms into a single array and produce a
# corresponding conductivity array
# The aim is to book end each layer
z = []
new_dat = []
for i in range(len(depth_bottom)):
z.append(depth_top[i])
z.append(depth_bottom[i])
new_dat.append(dat[i])
new_dat.append(dat[i])
# Convert the depth to elevation (where negative values are above msl)
z = [x - elev for x in z]
# Finally bookend the air and give it a conductivity of 0
z.insert(0, z[0] - 0.01)
z.insert(0, datum * -1)
new_dat.insert(0, -1)
new_dat.insert(0, -1)
# Now bookend the bottom half-space to the max depth
z.append(z[-1] + 0.01)
z.append(-1 * max_depth * -1)
new_dat.append(dat[-1])
new_dat.append(dat[-1])
f = interpolate.interp1d(z, new_dat)
interpolated_dat = f(z_new)
return interpolated_dat
| 28.019048
| 109
| 0.5954
|
37638a659f24d1cbb5c7e53831f15db08a50a056
| 5,755
|
py
|
Python
|
course_access_groups/views.py
|
appsembler/course-access-groups
|
601b17b8edda8fc41594e7ea2f53ba1800e03c49
|
[
"MIT"
] | 4
|
2020-03-09T15:47:17.000Z
|
2021-09-08T09:17:42.000Z
|
course_access_groups/views.py
|
appsembler/course-access-groups
|
601b17b8edda8fc41594e7ea2f53ba1800e03c49
|
[
"MIT"
] | 51
|
2019-11-26T14:09:33.000Z
|
2022-03-09T08:27:59.000Z
|
course_access_groups/views.py
|
appsembler/course-access-groups
|
601b17b8edda8fc41594e7ea2f53ba1800e03c49
|
[
"MIT"
] | 3
|
2020-04-12T22:33:24.000Z
|
2021-09-30T20:28:03.000Z
|
# -*- coding: utf-8 -*-
"""
API Endpoints for Course Access Groups.
"""
from django.contrib.auth import get_user_model
from django_filters.rest_framework import DjangoFilterBackend
from opaque_keys.edx.keys import CourseKey
from organizations.models import OrganizationCourse, UserOrganizationMapping
from rest_framework import viewsets
from rest_framework.filters import SearchFilter
from rest_framework.pagination import LimitOffsetPagination
from .filters import CourseOverviewFilter, UserFilter
from .models import CourseAccessGroup, GroupCourse, Membership, MembershipRule, PublicCourse
from .openedx_modules import CourseOverview
from .permissions import CommonAuthMixin, get_current_organization
from .serializers import (
CourseAccessGroupSerializer,
CourseOverviewSerializer,
GroupCourseSerializer,
MembershipRuleSerializer,
MembershipSerializer,
PublicCourseSerializer,
UserSerializer
)
class CourseAccessGroupViewSet(CommonAuthMixin, viewsets.ModelViewSet):
"""REST API endpoints to manage Course Access Groups.
These endpoints follows the standard Django Rest Framework ViewSet API structure.
GET /course-access-groups/
"""
model = CourseAccessGroup
pagination_class = LimitOffsetPagination
serializer_class = CourseAccessGroupSerializer
def perform_create(self, serializer):
organization = get_current_organization(self.request)
serializer.save(organization=organization)
def get_queryset(self):
organization = get_current_organization(self.request)
return self.model.objects.filter(organization=organization)
class CourseViewSet(CommonAuthMixin, viewsets.ReadOnlyModelViewSet):
"""
API ViewSet to retrieve courses information with their Course Access Group associations.
This ViewSet is provide only the minimal course information like id and name.
For more detailed course information other specialised APIs should be used.
"""
model = CourseOverview
pagination_class = LimitOffsetPagination
serializer_class = CourseOverviewSerializer
lookup_url_kwarg = 'pk'
filterset_class = CourseOverviewFilter
filter_backends = [DjangoFilterBackend, SearchFilter]
search_fields = ['id', 'display_name']
def get_object(self):
"""
Override the GenericAPIView.get_object to fix CourseKey related issue.
"""
course_key = CourseKey.from_string(self.kwargs[self.lookup_url_kwarg])
self.kwargs[self.lookup_url_kwarg] = course_key
return super(CourseViewSet, self).get_object()
def get_queryset(self):
organization = get_current_organization(self.request)
return CourseOverview.objects.filter(
id__in=OrganizationCourse.objects.filter(
organization=organization,
active=True,
).values('course_id'),
)
class MembershipViewSet(CommonAuthMixin, viewsets.ModelViewSet):
model = Membership
pagination_class = LimitOffsetPagination
serializer_class = MembershipSerializer
def get_queryset(self):
organization = get_current_organization(self.request)
return self.model.objects.filter(
group__in=CourseAccessGroup.objects.filter(organization=organization),
)
class MembershipRuleViewSet(CommonAuthMixin, viewsets.ModelViewSet):
model = MembershipRule
pagination_class = LimitOffsetPagination
serializer_class = MembershipRuleSerializer
def get_queryset(self):
organization = get_current_organization(self.request)
return self.model.objects.filter(
group__in=CourseAccessGroup.objects.filter(organization=organization),
)
class PublicCourseViewSet(CommonAuthMixin, viewsets.ModelViewSet):
"""
API ViewSet to mark specific courses as public to circumvent the Course Access Group rules.
"""
model = PublicCourse
pagination_class = LimitOffsetPagination
serializer_class = PublicCourseSerializer
def get_queryset(self):
organization = get_current_organization(self.request)
course_links = OrganizationCourse.objects.filter(organization=organization, active=True)
return self.model.objects.filter(
course_id__in=course_links.values('course_id'),
)
class UserViewSet(CommonAuthMixin, viewsets.ReadOnlyModelViewSet):
"""
API ViewSet to retrieve user information with their Course Access Group associations.
This ViewSet is provide only the minimal user information like email and username.
For more detailed user information other specialised APIs should be used.
"""
model = get_user_model()
pagination_class = LimitOffsetPagination
serializer_class = UserSerializer
filterset_class = UserFilter
filter_backends = [DjangoFilterBackend, SearchFilter]
search_fields = ['email', 'username', 'profile__name']
def get_queryset(self):
organization = get_current_organization(self.request)
return self.model.objects.filter(
pk__in=UserOrganizationMapping.objects.filter(
organization=organization,
is_active=True, # TODO: Add test for `is_active`
is_amc_admin=False, # Site admins shouldn't be included in the API.
).values('user_id'),
)
class GroupCourseViewSet(CommonAuthMixin, viewsets.ModelViewSet):
model = GroupCourse
pagination_class = LimitOffsetPagination
serializer_class = GroupCourseSerializer
def get_queryset(self):
organization = get_current_organization(self.request)
return self.model.objects.filter(
group__in=CourseAccessGroup.objects.filter(organization=organization),
)
| 35.306748
| 96
| 0.743354
|
8fd178cdf721f4daca631dac0dba9c508f73f3b7
| 18,137
|
py
|
Python
|
pibooth/config/parser.py
|
babou7635/pibooth
|
e82b26ad792df5d5da7d2d8d63392db8442a5eb4
|
[
"MIT"
] | 1
|
2020-09-03T07:50:53.000Z
|
2020-09-03T07:50:53.000Z
|
pibooth/config/parser.py
|
babou7635/pibooth
|
e82b26ad792df5d5da7d2d8d63392db8442a5eb4
|
[
"MIT"
] | null | null | null |
pibooth/config/parser.py
|
babou7635/pibooth
|
e82b26ad792df5d5da7d2d8d63392db8442a5eb4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Pibooth configuration.
"""
import io
import ast
import os
import os.path as osp
import itertools
import inspect
from collections import OrderedDict as odict
from pibooth.utils import LOGGER, open_text_editor
from pibooth import language
from pibooth.plugins import get_plugin_name
try:
from configparser import ConfigParser
except ImportError:
# Python 2.x fallback
from ConfigParser import ConfigParser
try:
basestring
except NameError:
# Python 3.x fallback
basestring = str
def values_list_repr(values):
"""Concatenate a list of values to a readable string.
"""
return "'{}' or '{}'".format("', '".join([str(i) for i in values[:-1]]), values[-1])
DEFAULT = odict((
("GENERAL",
odict((
("language",
("en",
"User interface language: {}".format(values_list_repr(language.get_supported_languages())),
"UI language", language.get_supported_languages())),
("directory",
("~/Pictures/pibooth",
"Path to save pictures (list of quoted paths accepted)",
None, None)),
("autostart",
(False,
"Start pibooth at Raspberry Pi startup",
"Auto-start", ['True', 'False'])),
("debug",
(False,
"In debug mode, exceptions are not caught, logs are more verbose, pictures are cleared at startup",
"Debug mode", ['True', 'False'])),
("plugins",
('',
"Path to custom plugin(s) not installed with pip (list of quoted paths accepted)",
None, None)),
))
),
("WINDOW",
odict((
("size",
((800, 480),
"The (width, height) of the display window or 'fullscreen'",
'Startup size', ['(800, 480)', 'fullscreen'])),
("background",
((0, 0, 0),
"Background RGB color or image path",
None, None)),
("text_color",
((255, 255, 255),
"Text RGB color",
"Text RGB color", (255, 255, 255))),
("flash",
(True,
"Blinking background when a capture is taken",
"Flash on capture", ['True', 'False'])),
("animate",
(False,
"Animate the last taken picture by displaying captures one by one",
"Animated picture", ['True', 'False'])),
("animate_delay",
(0.2,
"How long is displayed the capture in seconds before switching to the next one",
None, None)),
("final_image_delay",
(-1,
"How long is displayed the final picture in seconds before being hidden (-1 if never hidden)",
"Final image display time", ['-1'] + [str(i) for i in range(0, 121, 5)])),
("arrows",
('bottom',
"Show arrows to indicate physical buttons: 'bottom', 'top' or 'hidden'",
"Show button arrows", ['bottom', 'top', 'hidden'])),
("arrows_x_offset",
(0,
"Apply horizontal offset to arrows position",
None, None)),
("preview_delay",
(3,
"How long is the preview in seconds",
"Preview delay", [str(i) for i in range(1, 21)])),
("preview_countdown",
(True,
"Show a countdown timer during the preview",
"Preview countdown", ['True', 'False'])),
("preview_stop_on_capture",
(False,
"Stop the preview before taking the capture",
None, None)),
))
),
("PICTURE",
odict((
("orientation",
("auto",
"Orientation of the final picture: 'auto', 'portrait' or 'landscape'",
"Orientation", ['auto', 'portrait', 'landscape'])),
("captures",
((4, 1),
"Possible choice(s) of captures numbers (numbers between 1 to 4)",
"Number of captures", ['1', '2', '3', '4'] + [str(val) for val in itertools.permutations(range(1, 5), 2)])),
("captures_effects",
("none",
"Effect applied to the captures (list of quoted names accepted)",
None, None)),
("captures_cropping",
(False,
"Crop each capture border in order to fit the paper size",
"Crop captures", ['True', 'False'])),
("margin_thick",
(100,
"Thick (in pixels) between captures and picture borders/texts",
"Borders width", [str(i) for i in range(0, 210, 10)])),
("footer_text1",
("Footer 1",
"Main text displayed",
"Title", "")),
("footer_text2",
("Footer 2",
"Secondary text displayed",
"Sub-title", "")),
("text_colors",
((0, 0, 0),
"RGB colors used for footer texts (list of tuples accepted)",
None, None)),
("text_fonts",
(('Amatic-Bold', 'AmaticSC-Regular'),
"Fonts name or file path used for footer texts (list of quoted names accepted)",
None, None)),
("text_alignments",
('center',
"Alignments used for footer texts: 'left', 'center' or 'right' (list of quoted names accepted)",
None, None)),
("overlays",
('',
"Overlay path (PNG file) with same aspect ratio than final picture (list of quoted paths accepted)",
None, None)),
("backgrounds",
((255, 255, 255),
"Background RGB color or image path (list of tuples or quoted paths accepted)",
None, None)),
))
),
("CAMERA",
odict((
("iso",
(100,
"Adjust for lighting issues, normal is 100 or 200 and dark is 800 max",
None, None)),
("flip",
(False,
"Flip horizontally the capture",
None, None)),
("rotation",
(0,
"Rotation of the camera: 0, 90, 180 or 270",
None, None)),
("resolution",
((1934, 2464),
"Resolution for camera captures (preview will have same aspect ratio)",
None, None)),
("delete_internal_memory",
(False,
"Delete captures from camera internal memory (when applicable)",
None, None)),
))
),
("PRINTER",
odict((
("printer_name",
("default",
"Name of the printer defined in CUPS (or use the 'default' one)",
None, None)),
("printer_delay",
(10,
"How long is the print view in seconds (0 to skip it)",
"Time to show print screen", [str(i) for i in range(0, 21)])),
("max_pages",
(-1,
"Maximum number of printed pages before warning on paper/ink levels (-1 = infinite)",
'Maximum of printed pages', [str(i) for i in range(-1, 1000)])),
("max_duplicates",
(3,
"Maximum number of duplicate pages sent to the printer (avoid paper waste)",
'Maximum of printed duplicates', [str(i) for i in range(0, 10)])),
("pictures_per_page",
(1,
"Print 1, 2, 3 or 4 picture copies per page",
'Number of copies per page', [str(i) for i in range(1, 5)])),
))
),
("CONTROLS",
odict((
("debounce_delay",
(0.3,
"How long to debounce the hardware buttons in seconds",
None, None)),
("picture_btn_pin",
(11,
"Physical GPIO IN pin to take a picture",
None, None)),
("picture_led_pin",
(7,
"Physical GPIO OUT pin to light a LED when picture button is pressed",
None, None)),
("print_btn_pin",
(13,
"Physical GPIO IN pin to print a picture",
None, None)),
("print_led_pin",
(15,
"Physical GPIO OUT pin to light a LED when print button is pressed",
None, None)),
))
),
))
class PiConfigParser(ConfigParser):
"""Enhenced configuration file parser.
"""
def __init__(self, filename, plugin_manager):
ConfigParser.__init__(self)
self._pm = plugin_manager
self.filename = osp.abspath(osp.expanduser(filename))
if osp.isfile(self.filename):
self.load()
def _get_abs_path(self, path):
"""Return absolute path. In case of relative path given, the absolute
one is created using config file path as reference path.
"""
if not path: # Empty string, don't process it as it is not a path
return path
path = osp.expanduser(path)
if not osp.isabs(path):
path = osp.join(osp.relpath(osp.dirname(self.filename), '.'), path)
return osp.abspath(path)
def save(self, default=False):
"""Save the current or default values into the configuration file.
"""
LOGGER.info("Generate the configuration file in '%s'", self.filename)
dirname = osp.dirname(self.filename)
if not osp.isdir(dirname):
os.makedirs(dirname)
with io.open(self.filename, 'w', encoding="utf-8") as fp:
for section, options in DEFAULT.items():
fp.write("[{}]\n".format(section))
for name, value in options.items():
if default:
val = value[0]
else:
val = self.get(section, name)
fp.write("# {}\n{} = {}\n\n".format(value[1], name, val))
self.handle_autostart()
def load(self):
"""Load configuration from file.
"""
self.read(self.filename, encoding="utf-8")
self.handle_autostart()
def edit(self):
"""Open a text editor to edit the configuration.
"""
if open_text_editor(self.filename):
# Reload config to check if autostart has changed
self.load()
def handle_autostart(self):
"""Handle desktop file to start pibooth at the Raspberry Pi startup.
"""
filename = osp.expanduser('~/.config/autostart/pibooth.desktop')
dirname = osp.dirname(filename)
enable = self.getboolean('GENERAL', 'autostart')
if enable and not osp.isfile(filename):
if not osp.isdir(dirname):
os.makedirs(dirname)
LOGGER.info("Generate the auto-startup file in '%s'", dirname)
with open(filename, 'w') as fp:
fp.write("[Desktop Entry]\n")
fp.write("Name=pibooth\n")
fp.write("Exec=pibooth\n")
fp.write("Type=application\n")
elif not enable and osp.isfile(filename):
LOGGER.info("Remove the auto-startup file in '%s'", dirname)
os.remove(filename)
def join_path(self, *names):
"""Return the directory path of the configuration file
and join it the given names.
:param names: names to join to the directory path
:type names: str
"""
return osp.join(osp.dirname(self.filename), *names)
def add_option(self, section, option, default, description, menu_name=None, menu_choices=None):
"""Add a new option to the configuration and defines its default value.
:param section: section in which the option is declared
:type section: str
:param option: option name
:type option: str
:param default: default value of the option
:type default: any
:param description: description to put in the configuration
:type description: str
:param menu_name: option label on graphical menu (hidden if None)
:type menu_name: str
:param menu_choices: option possible choices on graphical menu
:type menu_choices: any
"""
assert section, "Section name can not be empty string"
assert option, "Option name can not be empty string"
assert description, "Description can not be empty string"
# Find the caller plugin
stack = inspect.stack()
if len(stack) < 2:
plugin_name = "Unknown"
else:
plugin = inspect.getmodule(inspect.stack()[1][0])
plugin_name = get_plugin_name(self._pm, plugin, False)
# Check that the option is not already created
if section in DEFAULT and option in DEFAULT[section]:
raise ValueError("The plugin '{}' try to define the option [{}][{}] "
"which is already defined.".format(plugin_name, section, option))
# Add the option to the default dictionary
description = "{}\n# Required by '{}' plugin".format(description, plugin_name)
DEFAULT.setdefault(section, odict())[option] = (default, description, menu_name, menu_choices)
def get(self, section, option, **kwargs):
"""Override the default function of ConfigParser to add a
default value if section or option is not found.
:param section: config section name
:type section: str
:param option: option name
:type option: str
"""
if self.has_section(section) and self.has_option(section, option):
return ConfigParser.get(self, section, option, **kwargs)
return str(DEFAULT[section][option][0])
def set(self, section, option, value=None):
"""Override the default function of ConfigParser to create
the section if it does not exist."""
if not self.has_section(section):
self.add_section(section)
super(PiConfigParser, self).set(section, option, value)
def gettyped(self, section, option):
"""Get a value from config and try to convert it in a native Python
type (using the :py:mod:`ast` module).
:param section: config section name
:type section: str
:param option: option name
:type option: str
"""
value = self.get(section, option)
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
return value
def getpath(self, section, option):
"""Get a path from config, evaluate the absolute path from configuration
file path.
:param section: config section name
:type section: str
:param option: option name
:type option: str
"""
return self._get_abs_path(self.get(section, option))
@staticmethod
def _get_authorized_types(types):
"""Get a tuple of authorized types and if the color and path are accepted
"""
if not isinstance(types, (tuple, list)):
types = [types]
else:
types = list(types)
if str in types: # Python 2.x compat
types[types.index(str)] = basestring
color = False
if 'color' in types:
types.remove('color')
types.append(tuple)
types.append(list)
color = True # Option accept color tuples
path = False
if 'path' in types:
types.remove('path')
types.append(basestring)
path = True # Option accept file path
types = tuple(types)
return types, color, path
def gettuple(self, section, option, types, extend=0):
"""Get a list of values from config. The values type shall be in the
list of authorized types. This method permits to get severals values
from the same configuration option.
If the option contains one value (with acceptable type), a tuple
with one element is created and returned.
:param section: config section name
:type section: str
:param option: option name
:type option: str
:param types: list of authorized types
:type types: list
:param extend: extend the tuple with the last value until length is reached
:type extend: int
"""
values = self.gettyped(section, option)
types, color, path = self._get_authorized_types(types)
if not isinstance(values, (tuple, list)):
if not isinstance(values, types):
raise ValueError("Invalid config value [{}][{}]={}".format(section, option, values))
values = (values,)
else:
# Check if one value is given or if it is a list of value
if color and len(values) == 3 and all(isinstance(elem, int) for elem in values):
values = (values,)
elif not all(isinstance(elem, types) for elem in values):
raise ValueError("Invalid config value [{}][{}]={}".format(section, option, values))
if path:
new_values = []
for v in values:
if isinstance(v, basestring):
new_values.append(self._get_abs_path(v))
else:
new_values.append(v)
values = tuple(new_values)
while len(values) < extend:
values += (values[-1],)
return values
| 37.395876
| 125
| 0.527099
|
240a2cff0673815fdc6aeb32f9603f9a1f79c650
| 65,559
|
py
|
Python
|
sarpy/geometry/point_projection.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | 1
|
2021-02-04T08:44:18.000Z
|
2021-02-04T08:44:18.000Z
|
sarpy/geometry/point_projection.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | null | null | null |
sarpy/geometry/point_projection.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | null | null | null |
"""
Functions to map between the coordinates in image pixel space and geographical
coordinates.
"""
import logging
from typing import Tuple
from types import MethodType # for binding a method dynamically to a class
import numpy
from sarpy.compliance import string_types, int_func
from sarpy.geometry.geocoords import ecf_to_geodetic, geodetic_to_ecf, wgs_84_norm
from sarpy.io.complex.sicd_elements.blocks import Poly2DType, XYZPolyType
from sarpy.io.DEM.DEM import DEMInterpolator
from sarpy.io.DEM.DTED import DTEDList, DTEDInterpolator
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Wade Schwartzkopf")
#############
# COA Projection definition
def _validate_adj_param(value, name):
"""
Validate the aperture adjustment vector parameters.
Parameters
----------
value : None|numpy.ndarray|list|tuple
name : str
Returns
-------
numpy.ndarray
"""
if value is None:
value = numpy.array([0, 0, 0], dtype='float64')
if not isinstance(value, numpy.ndarray):
value = numpy.array(value, dtype='float64')
if value.shape != (3,):
raise ValueError('{} must have shape (3, ). Got {}'.format(name, value.shape))
return value
def _ric_ecf_mat(rarp, varp, frame_type):
"""
Computes the ECF transformation matrix for RIC frame.
Parameters
----------
rarp : numpy.ndarray
varp : numpy.ndarray
frame_type : str
the final three characters should be one of ['ECI', 'ECF']
Returns
-------
numpy.ndarray
the RIC transform matrix (array)
"""
# Angular velocity of earth in radians/second, not including precession
w = 7292115.1467E-11
typ = frame_type.upper()[-3:]
vi = varp if typ == 'ECF' else varp + numpy.cross([0, 0, w], rarp)
r = rarp/numpy.linalg.norm(rarp)
c = numpy.cross(r, vi)
c /= numpy.linalg.norm(c) # NB: perpendicular to r
i = numpy.cross(c, r)
# this is the cross of two perpendicular normal vectors, so normal
return numpy.array([r, i, c], dtype='float64')
def _get_sicd_type_specific_projection(sicd):
"""
Gets an intermediate method specific projection method with six required
calling arguments (self, row_transform, col_transform, time_coa, arp_coa, varp_coa).
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
callable
"""
def pfa_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
pfa = sicd.PFA
polar_ang_poly = pfa.PolarAngPoly
spatial_freq_sf_poly = pfa.SpatialFreqSFPoly
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)
spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
PFA specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa * ARP_minus_SCP, axis=-1) / rSCPTgtCoa
thetaTgtCoa = polar_ang_poly(time_coa)
dThetaDtTgtCoa = polar_ang_poly_der(time_coa)
# Compute polar aperture scale factor (KSF) and derivative wrt polar angle
ksfTgtCoa = spatial_freq_sf_poly(thetaTgtCoa)
dKsfDThetaTgtCoa = spatial_freq_sf_poly_der(thetaTgtCoa)
# Compute spatial frequency domain phase slopes in Ka and Kc directions
# NB: sign for the phase may be ignored as it is cancelled in a subsequent computation.
dPhiDKaTgtCoa = row_transform * numpy.cos(thetaTgtCoa) + col_transform * numpy.sin(thetaTgtCoa)
dPhiDKcTgtCoa = -row_transform * numpy.sin(thetaTgtCoa) + col_transform * numpy.cos(thetaTgtCoa)
# Compute range relative to SCP
deltaRTgtCoa = ksfTgtCoa * dPhiDKaTgtCoa
# Compute derivative of range relative to SCP wrt polar angle.
# Scale by derivative of polar angle wrt time.
dDeltaRDThetaTgtCoa = dKsfDThetaTgtCoa * dPhiDKaTgtCoa + ksfTgtCoa * dPhiDKcTgtCoa
deltaRDotTgtCoa = dDeltaRDThetaTgtCoa * dThetaDtTgtCoa
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def rgazcomp_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
az_sf = sicd.RgAzComp.AzSF
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
RgAzComp specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_SCP = arp_coa - SCP
rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)
rDotSCPTgtCoa = numpy.sum(varp_coa*ARP_minus_SCP, axis=-1)/rSCPTgtCoa
deltaRTgtCoa = row_transform
deltaRDotTgtCoa = -numpy.linalg.norm(varp_coa, axis=-1)*az_sf*col_transform
return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa
return method_projection
def inca_projection():
inca = sicd.RMA.INCA
r_ca_scp = inca.R_CA_SCP
time_ca_poly = inca.TimeCAPoly
drate_sf_poly = inca.DRateSFPoly
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
INCA specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
# compute range/time of closest approach
R_CA_TGT = r_ca_scp + row_transform # Range at closest approach
t_CA_TGT = time_ca_poly(col_transform) # Time of closest approach
# Compute ARP velocity magnitude (actually squared, since that's how it's used) at t_CA_TGT
# noinspection PyProtectedMember
VEL2_CA_TGT = numpy.sum(instance._varp_poly(t_CA_TGT)**2, axis=-1)
# Compute the Doppler Rate Scale Factor for image Grid location
DRSF_TGT = drate_sf_poly(row_transform, col_transform)
# Difference between COA time and CA time
dt_COA_TGT = time_coa - t_CA_TGT
r_tgt_coa = numpy.sqrt(R_CA_TGT*R_CA_TGT + DRSF_TGT*VEL2_CA_TGT*dt_COA_TGT*dt_COA_TGT)
r_dot_tgt_coa = (DRSF_TGT/r_tgt_coa)*VEL2_CA_TGT*dt_COA_TGT
return r_tgt_coa, r_dot_tgt_coa
return method_projection
def plane_projection():
SCP = sicd.GeoData.SCP.ECF.get_array()
uRow = sicd.Grid.Row.UVectECF.get_array()
uCol = sicd.Grid.Row.UVectECF.get_array()
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
Plane specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_IPP = arp_coa - (SCP + numpy.outer(row_transform, uRow) + numpy.outer(col_transform, uCol))
r_tgt_coa = numpy.linalg.norm(ARP_minus_IPP, axis=-1)
r_dot_tgt_coa = numpy.sum(varp_coa * ARP_minus_IPP, axis=-1)/r_tgt_coa
return r_tgt_coa, r_dot_tgt_coa
return method_projection
# NB: sicd.can_project_coordinates() has been called, so all required attributes
# must be populated
if sicd.Grid.Type == 'RGAZIM':
if sicd.ImageFormation.ImageFormAlgo == 'PFA':
return pfa_projection()
elif sicd.ImageFormation.ImageFormAlgo == 'RGAZCOMP':
return rgazcomp_projection()
elif sicd.Grid.Type == 'RGZERO':
return inca_projection()
elif sicd.Grid.Type in ['XRGYCR', 'XCTYAT', 'PLANE']:
return plane_projection()
else:
# NB: this will have been noted by sicd.can_project_coordinates(), but is
# here for completeness
raise ValueError('Unhandled Grid.Type'.format(sicd.Grid.Type))
def _get_sicd_adjustment_params(sicd, delta_arp, delta_varp, adj_params_frame):
"""
Gets the SICD adjustment params.
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
delta_arp : None|numpy.ndarray|list|tuple
delta_varp : None|numpy.ndarray|list|tuple
adj_params_frame : str
Returns
-------
(numpy.ndarray, numpy.ndarray)
"""
delta_arp = _validate_adj_param(delta_arp, 'delta_arp')
delta_varp = _validate_adj_param(delta_varp, 'delta_varp')
if adj_params_frame in ['RIC_ECI', 'RIC_ECF']:
if sicd.SCPCOA.ARPPos is None or sicd.SCPCOA.ARPVel is None:
raise ValueError(
'The adj_params_frame is of RIC type, but one of SCPCOA.ARPPos or '
'SCPCOA.ARPVel is not populated.')
ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()
ric_matrix = _ric_ecf_mat(ARP_SCP_COA, VARP_SCP_COA, adj_params_frame)
delta_arp = ric_matrix.dot(delta_arp)
delta_varp = ric_matrix.dot(delta_varp)
return delta_arp, delta_varp
def _get_sidd_type_projection(sidd):
"""
Gets an intermediate method specific projection method with six required
calling arguments (self, row_transform, col_transform, time_coa, arp_coa, varp_coa).
Parameters
----------
sidd : sarpy.io.product.sidd1_elements.SIDD.SIDDType1|sarpy.io.product.sidd2_elements.SIDD.SIDDType2
Returns
-------
(Poly2DType, callable)
"""
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
def pgp(the_sidd):
"""
Parameters
----------
the_sidd : SIDDType2|SIDDType1
Returns
-------
callable
"""
plane_proj = the_sidd.Measurement.PlaneProjection
SRP = plane_proj.ReferencePoint.ECEF.get_array()
SRP_row = plane_proj.ReferencePoint.Point.Row
SRP_col = plane_proj.ReferencePoint.Point.Col
row_vector = plane_proj.ProductPlane.RowUnitVector.get_array()*plane_proj.SampleSpacing.Row
col_vector = plane_proj.ProductPlane.ColUnitVector.get_array()*plane_proj.SampleSpacing.Col
# noinspection PyUnusedLocal, PyIncorrectDocstring
def method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa):
"""
Plane specific intermediate projection.
Parameters
----------
row_transform : numpy.ndarray
col_transform : numpy.ndarray
time_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
"""
ARP_minus_IPP = arp_coa - \
(SRP + numpy.outer(row_transform - SRP_row, row_vector) +
numpy.outer(col_transform - SRP_col, col_vector))
r_tgt_coa = numpy.linalg.norm(ARP_minus_IPP, axis=-1)
r_dot_tgt_coa = numpy.sum(varp_coa * ARP_minus_IPP, axis=-1)/r_tgt_coa
return r_tgt_coa, r_dot_tgt_coa
return plane_proj.TimeCOAPoly, method_projection
if not isinstance(sidd, (SIDDType2, SIDDType1)):
raise TypeError('Got unhandled type {}'.format(type(sidd)))
if sidd.Measurement.PlaneProjection is not None:
return pgp(sidd)
else:
raise ValueError('Currently the only supported projection is PlaneProjection.')
def _get_sidd_adjustment_params(sidd, delta_arp, delta_varp, adj_params_frame):
"""
Get the SIDD adjustment parameters.
Parameters
----------
sidd : sarpy.io.product.sidd1_elements.SIDD.SIDDType1|sarpy.io.product.sidd2_elements.SIDD.SIDDType2
delta_arp : None|numpy.ndarray|list|tuple
delta_varp : None|numpy.ndarray|list|tuple
adj_params_frame : str
Returns
-------
(numpy.ndarray, numpy.ndarray)
"""
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if not isinstance(sidd, (SIDDType2, SIDDType1)):
raise TypeError('Got sidd of unhandled type {}'.format(type(sidd)))
delta_arp = _validate_adj_param(delta_arp, 'delta_arp')
delta_varp = _validate_adj_param(delta_varp, 'delta_varp')
if adj_params_frame in ['RIC_ECI', 'RIC_ECF']:
arp_pos_poly = sidd.Measurement.ARPPoly
arp_vel_poly = arp_pos_poly.derivative(der_order=1, return_poly=True)
if sidd.Measurement.PlaneProjection is not None:
srp_row = sidd.Measurement.PlaneProjection.ReferencePoint.Point.Row
srp_col = sidd.Measurement.PlaneProjection.ReferencePoint.Point.Col
srp_coa_time = sidd.Measurement.PlaneProjection.TimeCOAPoly(srp_row, srp_col)
srp_pos = arp_pos_poly(srp_coa_time)
srp_vel = arp_vel_poly(srp_coa_time)
ric_matrix = _ric_ecf_mat(srp_pos, srp_vel, adj_params_frame)
delta_arp = ric_matrix.dot(delta_arp)
delta_varp = ric_matrix.dot(delta_varp)
else:
raise ValueError('Got unhandled projection type {}'.format(sidd.Measurement.ProjectionType))
return delta_arp, delta_varp
class COAProjection(object):
"""
The Center of Aperture projection object, which provides common projection
functionality for all image to R/Rdot projection.
"""
__slots__ = (
'_time_coa_poly', '_arp_poly', '_varp_poly', '_method_proj',
'_row_shift', '_row_mult', '_col_shift', '_col_mult',
'_delta_arp', '_delta_varp', '_range_bias',)
def __init__(self, time_coa_poly, arp_poly, method_projection,
row_shift=0, row_mult=1, col_shift=0, col_mult=1,
delta_arp=None, delta_varp=None, range_bias=None):
"""
Parameters
----------
time_coa_poly : Poly2DType
The time center of aperture polynomial.
arp_poly : XYZPolyType
The aperture position polynomial.
method_projection : callable
The method specific projection for performing the projection from image
coordinates to R/Rdot space. The call signature is expected to be
`method_projection(instance, row_transform, col_transform, time_coa, arp_coa, varp_coa)`,
where `row_transform = row_mult*(row - row_shift)`,
`col_transform = col_mult*(col - col_shift)`,
`time_coa = time_coa_poly(row_transform, col_transform)`,
`arp_coa = arp_poly(time_coa)`, and `varp_coa = varp_poly(time_coa)`.
row_shift : int|float
The shift part of the affine row transformation for plugging into the
time coa polynomial.
row_mult : int|float
The multiple part of the affine row transformation for plugging into
the time coa polynomial.
col_shift : int|float
The shift part of the affine column transformation for plugging into
the time coa polynomial.
col_mult : int|float
The multiple part of the affine column transformation for plugging into
the time coa polynomial.
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
"""
if not isinstance(time_coa_poly, Poly2DType):
raise TypeError('time_coa_poly must be a Poly2DType instance.')
self._time_coa_poly = time_coa_poly
if not isinstance(arp_poly, XYZPolyType):
raise TypeError('arp_poly must be an XYZPolyType instance.')
self._arp_poly = arp_poly
self._varp_poly = self._arp_poly.derivative(der_order=1, return_poly=True) # type: XYZPolyType
if not callable(method_projection):
raise TypeError('method_projection must be callable.')
self._method_proj = MethodType(method_projection, self)
# affine transform parameters
self._row_shift = float(row_shift)
self._row_mult = float(row_mult)
self._col_shift = float(col_shift)
self._col_mult = float(col_mult)
# aperture location adjustment parameters
self._delta_arp = _validate_adj_param(delta_arp, 'delta_arp')
self._delta_varp = _validate_adj_param(delta_varp, 'delta_varp')
self._range_bias = 0.0 if range_bias is None else float(range_bias) # type: float
@classmethod
def from_sicd(cls, sicd, delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):
"""
Construct from a SICD structure.
Parameters
----------
sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
The SICD metadata structure.
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of `('ECF', 'RIC_ECI', 'RIC_ECF')`.
Returns
-------
COAProjection
"""
if not sicd.can_project_coordinates():
raise ValueError('Insufficient metadata populated to formulate projection.')
time_coa_poly = sicd.Grid.TimeCOAPoly
# fall back to approximation if TimeCOAPoly is not populated
if time_coa_poly is None:
time_coa_poly = Poly2DType(Coefs=[[sicd.Timeline.CollectDuration/2, ], ])
logging.warning(
'Using (constant) approximation to TimeCOAPoly, which may result in poor projection results.')
arp_poly = sicd.Position.ARPPoly
# transform parameters
row_mult = sicd.Grid.Row.SS
row_shift = sicd.ImageData.SCPPixel.Row - sicd.ImageData.FirstRow
col_mult = sicd.Grid.Col.SS
col_shift = sicd.ImageData.SCPPixel.Col - sicd.ImageData.FirstCol
# location adjustment parameters
delta_arp, delta_varp = _get_sicd_adjustment_params(sicd, delta_arp, delta_varp, adj_params_frame)
return cls(time_coa_poly, arp_poly, _get_sicd_type_specific_projection(sicd),
row_shift=row_shift, row_mult=row_mult, col_shift=col_shift, col_mult=col_mult,
delta_arp=delta_arp, delta_varp=delta_varp, range_bias=range_bias)
@classmethod
def from_sidd(cls, sidd, delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):
"""
Construct from the SIDD structure.
Parameters
----------
sidd : sarpy.io.product.sidd1_elements.SIDD.SIDDType1|sarpy.io.product.sidd2_elements.SIDD.SIDDType2
delta_arp : None|numpy.ndarray|list|tuple
ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.
delta_varp : None|numpy.ndarray|list|tuple
VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.
range_bias : float|int
Range bias adjustable parameter (m), defaults to 0.
adj_params_frame : str
One of `('ECF', 'RIC_ECI', 'RIC_ECF')`.
Returns
-------
COAProjection
"""
time_coa_poly, method_projection = _get_sidd_type_projection(sidd)
arp_poly = sidd.Measurement.ARPPoly
delta_arp, delta_varp = _get_sidd_adjustment_params(
sidd, delta_arp, delta_varp, adj_params_frame)
return cls(time_coa_poly, arp_poly, method_projection,
row_shift=0, row_mult=1, col_shift=0, col_mult=1,
delta_arp=delta_arp, delta_varp=delta_varp, range_bias=range_bias)
def _init_proj(self, im_points):
"""
Parameters
----------
im_points : numpy.ndarray
Returns
-------
Tuple[numpy.ndarray,...]
"""
row_transform = (im_points[:, 0] - self._row_shift)*self._row_mult
col_transform = (im_points[:, 1] - self._col_shift)*self._col_mult
time_coa = self._time_coa_poly(row_transform, col_transform)
# calculate aperture reference position and velocity at target time
arp_coa = self._arp_poly(time_coa)
varp_coa = self._varp_poly(time_coa)
return row_transform, col_transform, time_coa, arp_coa, varp_coa
def projection(self, im_points):
"""
Perform the projection from image coordinates to R/Rdot coordinates.
Parameters
----------
im_points : numpy.ndarray
This array of image point coordinates, **expected to have shape (N, 2)**.
Returns
-------
Tuple[numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray]
* `r_tgt_coa` - range to the ARP at COA
* `r_dot_tgt_coa` - range rate relative to the ARP at COA
* `time_coa` - center of aperture time since CDP start for input ip
* `arp_coa` - aperture reference position at time_coa
* `varp_coa` - velocity at time_coa
"""
row_transform, col_transform, time_coa, arp_coa, varp_coa = self._init_proj(im_points)
r_tgt_coa, r_dot_tgt_coa = self._method_proj(row_transform, col_transform, time_coa, arp_coa, varp_coa)
# adjust parameters
arp_coa += self._delta_arp
varp_coa += self._delta_varp
r_tgt_coa += self._range_bias
return r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa
def _get_coa_projection(structure, use_structure_coa, **coa_args):
"""
Parameters
----------
structure
use_structure_coa : bool
coa_args
Returns
-------
COAProjection
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if use_structure_coa and structure.coa_projection is not None:
return structure.coa_projection
elif isinstance(structure, SICDType):
return COAProjection.from_sicd(structure, **coa_args)
elif isinstance(structure, (SIDDType2, SIDDType1)):
return COAProjection.from_sidd(structure, **coa_args)
else:
raise ValueError('Got unhandled type {}'.format(type(structure)))
###############
# General helper methods for extracting params from the sicd or sidd
def _get_reference_point(structure):
"""
Gets the reference point in ECF coordinates.
Parameters
----------
structure
Returns
-------
numpy.ndarray
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if isinstance(structure, SICDType):
return structure.GeoData.SCP.ECF.get_array(dtype='float64')
elif isinstance(structure, (SIDDType2, SIDDType1)):
proj_type = structure.Measurement.ProjectionType
if proj_type != 'PlaneProjection':
raise ValueError('Got unsupported projection type {}'.format(proj_type))
return structure.Measurement.PlaneProjection.ReferencePoint.ECEF.get_array(dtype='float64')
else:
raise TypeError('Got unhandled type {}'.format(type(structure)))
def _get_outward_norm(structure, gref):
"""
Gets the default outward unit norm.
Parameters
----------
structure
Returns
-------
numpy.ndarray
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if isinstance(structure, SICDType):
if structure.ImageFormation.ImageFormAlgo == 'PFA':
return structure.PFA.FPN.get_array()
else:
return wgs_84_norm(gref)
elif isinstance(structure, (SIDDType2, SIDDType1)):
proj_type = structure.Measurement.ProjectionType
if proj_type != 'PlaneProjection':
raise ValueError('Got unsupported projection type {}'.format(proj_type))
the_proj = structure.Measurement.PlaneProjection
# image plane details
uRow = the_proj.ProductPlane.RowUnitVector.get_array(dtype='float64')
uCol = the_proj.ProductPlane.ColUnitVector.get_array(dtype='float64')
# outward unit norm for plane
uGPN = numpy.cross(uRow, uCol)
uGPN /= numpy.linalg.norm(uGPN)
if numpy.dot(uGPN, gref) < 0:
uGPN *= -1
return uGPN
else:
raise TypeError('Got unhandled type {}'.format(type(structure)))
def _extract_plane_params(structure):
"""
Extract the required parameters for projection from ground to plane for a SICD.
Parameters
----------
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
Returns
-------
"""
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
if isinstance(structure, SICDType):
# reference point for the plane
ref_point = structure.GeoData.SCP.ECF.get_array()
ref_pixel = structure.ImageData.SCPPixel.get_array()
# pixel spacing
row_ss = structure.Grid.Row.SS
col_ss = structure.Grid.Col.SS
# image plane details
uRow = structure.Grid.Row.UVectECF.get_array() # unit normal in row direction
uCol = structure.Grid.Col.UVectECF.get_array() # unit normal in column direction
# outward unit norm
uGPN = structure.PFA.FPN.get_array() if structure.ImageFormation.ImageFormAlgo == 'PFA' \
else wgs_84_norm(ref_point)
# uSPN - defined in section 3.1 as normal to instantaneous slant plane that contains SCP at SCP COA is
# tangent to R/Rdot contour at SCP. Points away from center of Earth. Use look to establish sign.
ARP_SCP_COA = structure.SCPCOA.ARPPos.get_array()
VARP_SCP_COA = structure.SCPCOA.ARPVel.get_array()
uSPN = structure.SCPCOA.look*numpy.cross(VARP_SCP_COA, ref_point - ARP_SCP_COA)
uSPN /= numpy.linalg.norm(uSPN)
return ref_point, ref_pixel, row_ss, col_ss, uRow, uCol, uGPN, uSPN
elif isinstance(structure, (SIDDType1, SIDDType2)):
proj_type = structure.Measurement.ProjectionType
if proj_type != 'PlaneProjection':
raise ValueError('Got unsupported projection type {}'.format(proj_type))
the_proj = structure.Measurement.PlaneProjection
# reference point for the plane
ref_point = the_proj.ReferencePoint.ECEF.get_array(dtype='float64')
ref_pixel = the_proj.ReferencePoint.Point.get_array(dtype='float64')
# pixel spacing
row_ss = the_proj.SampleSpacing.Row
col_ss = the_proj.SampleSpacing.Col
# image plane details
uRow = the_proj.ProductPlane.RowUnitVector.get_array(dtype='float64')
uCol = the_proj.ProductPlane.ColUnitVector.get_array(dtype='float64')
# outward unit norm for plane
uGPN = numpy.cross(uRow, uCol)
uGPN /= numpy.linalg.norm(uGPN)
if numpy.dot(uGPN, ref_point) < 0:
uGPN *= -1
# slant plane is identical to outward unit norm
return ref_point, ref_pixel, row_ss, col_ss, uRow, uCol, uGPN, uGPN
else:
raise TypeError('Got structure unsupported type {}'.format(type(structure)))
#############
# Ground-to-Image (aka Scene-to-Image) projection.
def _validate_coords(coords):
if not isinstance(coords, numpy.ndarray):
coords = numpy.array(coords, dtype='float64')
orig_shape = coords.shape
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (1, -1))
if coords.shape[-1] != 3:
raise ValueError(
'The coords array must represent an array of points in ECF coordinates, '
'so the final dimension of coords must have length 3. Have coords.shape = {}'.format(coords.shape))
return coords, orig_shape
def _ground_to_image(coords, coa_proj, uGPN,
ref_point, ref_pixel, uIPN, sf, row_ss, col_ss, uProj,
row_col_transform, ipp_transform, tolerance, max_iterations):
"""
Basic level helper function.
Parameters
----------
coords : numpy.ndarray|tuple|list
coa_proj : COAProjection
uGPN : numpy.ndarray
ref_point : numpy.ndarray
ref_pixel : numpy.ndarray
uIPN : numpy.ndarray
sf : float
row_ss : float
col_ss : float
uProj : numpy.ndarray
row_col_transform : numpy.ndarray
ipp_transform : numpy.ndarray
tolerance : float
max_iterations : int
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,
the upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
g_n = coords.copy()
im_points = numpy.zeros((coords.shape[0], 2), dtype='float64')
delta_gpn = numpy.zeros((coords.shape[0],), dtype='float64')
cont = True
iteration = 0
matrix_transform = numpy.dot(row_col_transform, ipp_transform)
# (3 x 2)*(2 x 2) = (3 x 2)
while cont:
# project ground plane to image plane iteration
iteration += 1
dist_n = numpy.dot(ref_point - g_n, uIPN)/sf # (N, )
i_n = g_n + numpy.outer(dist_n, uProj) # (N, 3)
delta_ipp = i_n - ref_point # (N, 3)
ip_iter = numpy.dot(delta_ipp, matrix_transform) # (N, 2)
im_points[:, 0] = ip_iter[:, 0]/row_ss + ref_pixel[0]
im_points[:, 1] = ip_iter[:, 1]/col_ss + ref_pixel[1]
# transform to ground plane containing the scene points and check how it compares
p_n = _image_to_ground_plane(im_points, coa_proj, g_n, uGPN)
# compute displacement between scene point and this new projected point
diff_n = coords - p_n
delta_gpn[:] = numpy.linalg.norm(diff_n, axis=1)
g_n += diff_n
# should we continue iterating?
cont = numpy.any(delta_gpn > tolerance) and (iteration < max_iterations)
return im_points, delta_gpn, iteration
def ground_to_image(coords, structure, tolerance=1e-2, max_iterations=10, block_size=50000,
use_structure_coa=True, **coa_args):
"""
Transforms a 3D ECF point to pixel (row/column) coordinates. This is
implemented in accordance with the SICD Image Projections Description Document.
**Really Scene-To-Image projection.**"
Parameters
----------
coords : numpy.ndarray|tuple|list
ECF coordinate to map to scene coordinates, of size `N x 3`.
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD data structure.
tolerance : float|int
Ground plane displacement tol (m).
max_iterations : int
maximum number of iterations to perform
block_size : int|None
size of blocks of coordinates to transform at a time
use_structure_coa : bool
If sicd.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
The keyword arguments from the COAProjection.from_sicd class method.
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following
the SICD convention, he upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
coords, orig_shape = _validate_coords(coords)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
ref_point, ref_pixel, row_ss, col_ss, uRow, uCol, \
uGPN, uSPN = _extract_plane_params(structure)
uIPN = numpy.cross(uRow, uCol) # NB: only outward pointing if Row/Col are right handed system
uIPN /= numpy.linalg.norm(uIPN) # NB: uRow/uCol may not be perpendicular
cos_theta = numpy.dot(uRow, uCol)
sin_theta = numpy.sqrt(1 - cos_theta*cos_theta)
ipp_transform = numpy.array(
[[1, -cos_theta], [-cos_theta, 1]], dtype='float64')/(sin_theta*sin_theta)
row_col_transform = numpy.zeros((3, 2), dtype='float64')
row_col_transform[:, 0] = uRow
row_col_transform[:, 1] = uCol
sf = float(numpy.dot(uSPN, uIPN)) # scale factor
tolerance = float(tolerance)
if tolerance < 1e-12:
logging.warning(
'minimum allowed tolerance is 1e-12 meters, resetting from {}'.format(tolerance))
tolerance = 1e-12
# prepare the work space
coords_view = numpy.reshape(coords, (-1, 3)) # possibly or make 2-d flatten
num_points = coords_view.shape[0]
if block_size is None or num_points <= block_size:
image_points, delta_gpn, iters = _ground_to_image(
coords_view, coa_proj, uGPN,
ref_point, ref_pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, tolerance, max_iterations)
iters = numpy.full((num_points, ), iters)
else:
image_points = numpy.zeros((num_points, 2), dtype='float64')
delta_gpn = numpy.zeros((num_points, ), dtype='float64')
iters = numpy.zeros((num_points, ), dtype='int16')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block+block_size, num_points)
image_points[start_block:end_block, :], delta_gpn[start_block:end_block], \
iters[start_block:end_block] = _ground_to_image(
coords_view[start_block:end_block, :], coa_proj, uGPN,
ref_point, ref_pixel, uIPN, sf, row_ss, col_ss, uSPN,
row_col_transform, ipp_transform, tolerance, max_iterations)
start_block = end_block
if len(orig_shape) == 1:
image_points = numpy.reshape(image_points, (-1,))
elif len(orig_shape) > 1:
image_points = numpy.reshape(image_points, orig_shape[:-1]+(2, ))
delta_gpn = numpy.reshape(delta_gpn, orig_shape[:-1])
iters = numpy.reshape(iters, orig_shape[:-1])
return image_points, delta_gpn, iters
def ground_to_image_geo(coords, structure, ordering='latlong', **kwargs):
"""
Transforms a 3D Lat/Lon/HAE point to pixel (row/column) coordinates.
This is implemented in accordance with the SICD Image Projections Description Document.
Parameters
----------
coords : numpy.ndarray|tuple|list
Lat/Lon/HAE coordinate to map to scene coordinates, of size `N x 3`.
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
ordering : str
If 'longlat', then the input is `[longitude, latitude, hae]`.
Otherwise, the input is `[latitude, longitude, hae]`. Passed through
to :func:`sarpy.geometry.geocoords.geodetic_to_ecf`.
kwargs
See the key word arguments of :func:`ground_to_image`
Returns
-------
Tuple[numpy.ndarray, float, int]
* `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,
the upper-left pixel is [0, 0].
* `delta_gpn` - residual ground plane displacement (m).
* `iterations` - the number of iterations performed.
"""
return ground_to_image(geodetic_to_ecf(coords, ordering=ordering), structure, **kwargs)
############
# Image-To-Ground projections
def _validate_im_points(im_points):
"""
Parameters
----------
im_points : numpy.ndarray|list|tuple
Returns
-------
numpy.ndarray
"""
if im_points is None:
raise ValueError('The argument cannot be None')
if not isinstance(im_points, numpy.ndarray):
im_points = numpy.array(im_points, dtype='float64')
orig_shape = im_points.shape
if len(im_points.shape) == 1:
im_points = numpy.reshape(im_points, (1, -1))
if im_points.shape[-1] != 2:
raise ValueError(
'The im_points array must represent an array of points in pixel coordinates, '
'so the final dimension of im_points must have length 2. '
'Have im_points.shape = {}'.format(im_points.shape))
return im_points, orig_shape
def image_to_ground(
im_points, structure, block_size=50000, projection_type='HAE',
use_structure_coa=True, **kwargs):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
projection_type : str
One of ['PLANE', 'HAE', 'DEM'].
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
kwargs
keyword arguments relevant for the given projection type. See image_to_ground_plane/hae/dem methods.
Returns
-------
numpy.ndarray
Physical coordinates (in ECF) corresponding input image coordinates. The interpretation
or meaning of the physical coordinates depends on `projection_type` chosen.
"""
p_type = projection_type.upper()
if p_type == 'PLANE':
return image_to_ground_plane(
im_points, structure, block_size=block_size, use_structure_coa=use_structure_coa, **kwargs)
elif p_type == 'HAE':
return image_to_ground_hae(
im_points, structure, block_size=block_size, use_structure_coa=use_structure_coa, **kwargs)
elif p_type == 'DEM':
return image_to_ground_dem(
im_points, structure, block_size=block_size, use_structure_coa=use_structure_coa, **kwargs)
else:
raise ValueError('Got unrecognized projection type {}'.format(projection_type))
def image_to_ground_geo(
im_points, structure, ordering='latlong', block_size=50000, projection_type='HAE',
use_structure_coa=True, **kwargs):
"""
Transforms image coordinates to ground plane Lat/Lon/HAE coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
(row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).
Following SICD convention, the upper-left pixel is [0, 0].
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
ordering : str
Determines whether return is ordered as `[lat, long, hae]` or `[long, lat, hae]`.
Passed through to :func:`sarpy.geometry.geocoords.ecf_to_geodetic`.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
projection_type : str
One of ['PLANE', 'HAE', 'DEM'].
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
kwargs
See the keyword arguments in :func:`image_to_ground`.
Returns
-------
numpy.ndarray
Ground Plane Point (in Lat/Lon/HAE coordinates) along the R/Rdot contour.
"""
return ecf_to_geodetic(
image_to_ground(
im_points, structure, block_size=block_size, projection_type=projection_type,
use_structure_coa=use_structure_coa, **kwargs),
ordering=ordering)
#####
# Image-to-Ground Plane
def _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ):
"""
Parameters
----------
r_tgt_coa : numpy.ndarray
r_dot_tgt_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
gref : numpy.ndarray
uZ : numpy.ndarray
Returns
-------
numpy.ndarray
"""
# Solve for the intersection of a R/Rdot contour and a ground plane.
arpZ = numpy.sum((arp_coa - gref)*uZ, axis=-1)
arpZ[arpZ > r_tgt_coa] = numpy.nan
# ARP ground plane nadir
aGPN = arp_coa - numpy.outer(arpZ, uZ)
# Compute ground plane distance (gd) from ARP nadir to circle of const range
gd = numpy.sqrt(r_tgt_coa*r_tgt_coa - arpZ*arpZ)
# Compute sine and cosine of grazing angle
cosGraz = gd/r_tgt_coa
sinGraz = arpZ/r_tgt_coa
# Velocity components normal to ground plane and parallel to ground plane.
vMag = numpy.linalg.norm(varp_coa, axis=-1)
vZ = numpy.dot(varp_coa, uZ)
vX = numpy.sqrt(vMag*vMag - vZ*vZ) # Note: For Vx = 0, no Solution
# Orient X such that Vx > 0 and compute unit vectors uX and uY
uX = (varp_coa - numpy.outer(vZ, uZ))/vX[:, numpy.newaxis]
uY = numpy.cross(uZ, uX)
# Compute cosine of azimuth angle to ground plane point
cosAz = (-r_dot_tgt_coa+vZ*sinGraz) / (vX * cosGraz)
cosAz[numpy.abs(cosAz) > 1] = numpy.nan # R/Rdot combination not possible in given plane
# Compute sine of azimuth angle. Use LOOK to establish sign.
look = numpy.sign(numpy.dot(numpy.cross(arp_coa-gref, varp_coa), uZ))
sinAz = look*numpy.sqrt(1-cosAz*cosAz)
# Compute Ground Plane Point in ground plane and along the R/Rdot contour
return aGPN + uX*(gd*cosAz)[:, numpy.newaxis] + uY*(gd*sinAz)[:, numpy.newaxis]
def _image_to_ground_plane(im_points, coa_projection, gref, uZ):
"""
Parameters
----------
im_points : numpy.ndarray
coa_projection : COAProjection
gref : numpy.ndarray
uZ : numpy.ndarray
Returns
-------
numpy.ndarray
"""
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_projection.projection(im_points)
values = _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ)
return values
def image_to_ground_plane(
im_points, structure, block_size=50000, gref=None, ugpn=None,
use_structure_coa=True, **coa_args):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
gref : None|numpy.ndarray|list|tuple
Ground plane reference point ECF coordinates (m). The default is the SCP or Reference Point.
ugpn : None|numpy.ndarray|list|tuple
Vector normal to the plane to which we are projecting.
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
keyword arguments for COAProjection.from_sicd class method.
Returns
-------
numpy.ndarray
Ground Plane Point (in ECF coordinates) corresponding to the input image coordinates.
"""
im_points, orig_shape = _validate_im_points(im_points)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
# method parameter validation
if gref is None:
gref = _get_reference_point(structure)
if not isinstance(gref, numpy.ndarray):
gref = numpy.array(gref, dtype='float64')
if gref.size != 3:
raise ValueError('gref must have three elements.')
if gref.ndim != 1:
gref = numpy.reshape(gref, (3, ))
if ugpn is None:
ugpn = _get_outward_norm(structure, gref)
if not isinstance(ugpn, numpy.ndarray):
ugpn = numpy.array(ugpn, dtype='float64')
if ugpn.size != 3:
raise ValueError('ugpn must have three elements.')
if ugpn.ndim != 1:
ugpn = numpy.reshape(ugpn, (3, ))
uZ = ugpn/numpy.linalg.norm(ugpn)
# prepare workspace
im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten
num_points = im_points_view.shape[0]
if block_size is None or num_points <= block_size:
coords = _image_to_ground_plane(im_points_view, coa_proj, gref, uZ)
else:
coords = numpy.zeros((num_points, 3), dtype='float64')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block + block_size, num_points)
coords[start_block:end_block, :] = _image_to_ground_plane(
im_points_view[start_block:end_block], coa_proj, gref, uZ)
start_block = end_block
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (-1, ))
elif len(orig_shape) > 1:
coords = numpy.reshape(coords, orig_shape[:-1] + (3,))
return coords
#####
# Image-to-HAE
def _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_point, ugpn,
hae0, tolerance, max_iterations, ref_hae):
"""
Intermediate helper method.
Parameters
----------
r_tgt_coa : numpy.ndarray
r_dot_tgt_coa : numpy.ndarray
arp_coa : numpy.ndarray
varp_coa : numpy.ndarray
ref_point : numpy.ndarray
ugpn : numpy.ndarray
hae0 : float
tolerance : float
max_iterations : int
ref_hae : float
Returns
-------
numpy.ndarray
"""
# Compute the geodetic ground plane normal at the ref_point.
look = numpy.sign(numpy.sum(numpy.cross(arp_coa, varp_coa)*(ref_point - arp_coa), axis=1))
gref = ref_point - (ref_hae - hae0)*ugpn
# iteration variables
gpp = None
delta_hae = None
cont = True
iters = 0
while cont:
iters += 1
# Compute the precise projection along the R/Rdot contour to Ground Plane.
gpp = _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, ugpn)
# check our hae value versus hae0
gpp_llh = ecf_to_geodetic(gpp)
delta_hae = gpp_llh[:, 2] - hae0
max_abs_delta_hae = numpy.max(numpy.abs(delta_hae))
gref = gpp - (delta_hae[:, numpy.newaxis] * ugpn)
# should we stop our iteration?
cont = (max_abs_delta_hae > tolerance) and (iters < max_iterations)
# Compute the unit slant plane normal vector, uspn, that is tangent to the R/Rdot contour at point gpp
uspn = numpy.cross(varp_coa, (gpp - arp_coa))*look[:, numpy.newaxis]
uspn /= numpy.linalg.norm(uspn, axis=-1)[:, numpy.newaxis]
# For the final straight line projection, project from point gpp along
# the slant plane normal (as opposed to the ground plane normal that was
# used in the iteration) to point slp.
sf = numpy.sum(ugpn*uspn, axis=-1)
slp = gpp - uspn*(delta_hae/sf)[:, numpy.newaxis]
# Assign surface point SPP position by adjusting the HAE to be on the
# HAE0 surface.
spp_llh = ecf_to_geodetic(slp)
spp_llh[:, 2] = hae0
spp = geodetic_to_ecf(spp_llh)
return spp
def _image_to_ground_hae(
im_points, coa_projection, hae0, tolerance, max_iterations, ref_hae, ref_point):
"""
Intermediate helper function for projection.
Parameters
----------
im_points : numpy.ndarray
the image coordinate array
coa_projection : COAProjection
hae0 : float
tolerance : float
max_iterations : int
ref_hae : float
ref_point : numpy.ndarray
Returns
-------
numpy.ndarray
"""
# get (image formation specific) projection parameters
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_projection.projection(im_points)
ugpn = wgs_84_norm(ref_point)
return _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_point, ugpn,
hae0, tolerance, max_iterations, ref_hae)
def image_to_ground_hae(im_points, structure, block_size=50000,
hae0=None, tolerance=1e-3, max_iterations=10, use_structure_coa=True, **coa_args):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array will be
transformed as a single block if `None`.
hae0 : None|float|int
Surface height (m) above the WGS-84 reference ellipsoid for projection point.
Defaults to HAE at the SCP or Reference Point.
tolerance : float|int
Height threshold for convergence of iterative constant HAE computation (m).
max_iterations : int
Maximum number of iterations allowed for constant hae computation.
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
keyword arguments for COAProjection.from_sicd class method.
Returns
-------
numpy.ndarray
Ground Plane Point (in ECF coordinates) with target hae corresponding to
the input image coordinates.
"""
# coa projection creation
im_points, orig_shape = _validate_im_points(im_points)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
tolerance = float(tolerance)
if tolerance < 1e-12:
logging.warning(
'minimum allowed tolerance is 1e-12, resetting from {0:8f}'.format(tolerance))
tolerance = 1e-12
max_iterations = int(max_iterations)
if max_iterations < 1:
logging.error('max_iterations must be a positive integer, resetting to 1 from {}'.format(max_iterations))
max_iterations = 1
if max_iterations > 100:
logging.error('maximum allowed max_iterations is 100, resetting from {}'.format(max_iterations))
max_iterations = 100
# method parameter validation
ref_point = _get_reference_point(structure)
ref_llh = ecf_to_geodetic(ref_point)
ref_hae = float(ref_llh[2])
if hae0 is None:
hae0 = ref_hae
# prepare workspace
im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten
num_points = im_points_view.shape[0]
if block_size is None or num_points <= block_size:
coords = _image_to_ground_hae(im_points_view, coa_proj, hae0, tolerance, max_iterations, ref_hae, ref_point)
else:
coords = numpy.zeros((num_points, 3), dtype='float64')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block + block_size, num_points)
coords[start_block:end_block, :] = _image_to_ground_hae(
im_points_view[start_block:end_block], coa_proj, hae0, tolerance, max_iterations, ref_hae, ref_point)
start_block = end_block
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (-1,))
elif len(orig_shape) > 1:
coords = numpy.reshape(coords, orig_shape[:-1] + (3,))
return coords
#####
# Image-to-DEM
def _do_dem_iteration(previous_ecf, previous_diff, this_ecf, this_diff):
mask = (this_diff < 0)
if numpy.any(mask):
d0 = (previous_diff[mask])
d1 = numpy.abs(this_diff[mask])
return mask, (d1[:, numpy.newaxis]*previous_ecf[mask] + d0[:, numpy.newaxis]*this_ecf[mask])/((d0+d1)[:, numpy.newaxis])
else:
return None
def _image_to_ground_dem(
im_points, coa_projection, dem_interpolator, min_dem, max_dem,
vertical_step_size, ref_hae, ref_point):
"""
Parameters
----------
im_points : numpy.ndarray
coa_projection : COAProjection
dem_interpolator : DEMInterpolator
min_dem : float
max_dem : float
vertical_step_size : float|int
ref_hae: float
ref_point : numpy.ndarray
Returns
-------
numpy.ndarray
"""
# get (image formation specific) projection parameters
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_projection.projection(im_points)
ugpn = wgs_84_norm(ref_point)
tolerance = 1e-3
max_iterations = 10
# if max_dem - min_dem is sufficiently small, then pretend it's flat
if max_dem - min_dem < vertical_step_size:
return _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_point, ugpn, max_dem,
tolerance, max_iterations, ref_hae)
# set up workspace
out = numpy.zeros((im_points.shape[0], 3), dtype='float64')
cont_mask = numpy.ones((im_points.shape[0], ), dtype='bool')
cont = True
this_hae = max_dem
previous_coords = _image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_point, ugpn, max_dem,
tolerance, max_iterations, ref_hae)
previous_llh = ecf_to_geodetic(previous_coords)
previous_diff = previous_llh[:, 2] - dem_interpolator.get_elevation_hae(
previous_llh[:, 0], previous_llh[:, 1])
while cont:
this_hae -= vertical_step_size
this_coords = _image_to_ground_hae_perform(
r_tgt_coa[cont_mask], r_dot_tgt_coa[cont_mask], arp_coa[cont_mask], varp_coa[cont_mask],
ref_point, ugpn, this_hae, tolerance, max_iterations, ref_hae)
this_llh = ecf_to_geodetic(this_coords)
this_diff = this_llh[:, 2] - dem_interpolator.get_elevation_hae(this_llh[:, 0], this_llh[:, 1])
result = _do_dem_iteration(previous_coords, previous_diff, this_coords, this_diff)
if result is not None:
this_mask, this_result = result
temp_mask = numpy.zeros((im_points.shape[0], ), dtype='bool')
temp_mask[cont_mask] = this_mask
out[temp_mask, :] = this_result
cont_mask[temp_mask] = False
cont = numpy.any(cont_mask)
if cont:
previous_coords = this_coords[~this_mask, :]
previous_diff = this_diff[~this_mask]
else:
previous_coords = this_coords
previous_diff = this_diff
return out
def _image_to_ground_dem_block(
im_points, coa_projection, dem_interpolator, horizontal_step, lat_lon_box, block_size,
lat_pad, lon_pad):
"""
Parameters
----------
im_points : numpy.ndarray
coa_projection : COAProjection
dem_interpolator : DEMInterpolator
horizontal_step : float
lat_lon_box : numpy.ndarray
block_size : int|None
lat_pad : float
lon_pad : float
Returns
-------
numpy.ndarray
"""
# determine reference point
ref_lat = 0.5*(lat_lon_box[0] + lat_lon_box[1])
ref_lon = 0.5*(lat_lon_box[2] + lat_lon_box[3])
ref_hae = float(dem_interpolator.get_elevation_hae(ref_lat, ref_lon))
ref_ecf = geodetic_to_ecf([ref_lat, ref_lon, ref_hae])
# determine max/min hae in the DEM region
padded_box = numpy.array([
max(-90, lat_lon_box[0] - 0.5*lat_pad), min(lat_lon_box[1] + 0.5*lat_pad, 90),
max(-180, lat_lon_box[2] - 0.5*lon_pad), min(lat_lon_box[3] + 0.5*lon_pad, 180)], dtype='float64')
min_dem = dem_interpolator.get_min_hae(padded_box) - 10
max_dem = dem_interpolator.get_max_hae(padded_box) + 10
# prepare workspace
num_points = im_points.shape[0]
if block_size is None or num_points <= block_size:
coords = _image_to_ground_dem(
im_points, coa_projection, dem_interpolator, min_dem, max_dem,
horizontal_step, ref_hae, ref_ecf)
else:
coords = numpy.zeros((num_points, 3), dtype='float64')
# proceed with block processing
start_block = 0
while start_block < num_points:
end_block = min(start_block + block_size, num_points)
coords[start_block:end_block, :] = _image_to_ground_dem(
im_points[start_block:end_block, :], coa_projection, dem_interpolator,
min_dem, max_dem, horizontal_step, ref_hae, ref_ecf)
start_block = end_block
return coords
def image_to_ground_dem(
im_points, structure, block_size=50000, dem_interpolator=None,
dem_type=None, geoid_file=None, pad_value=0.2,
vertical_step_size=10, use_structure_coa=True, **coa_args):
"""
Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)
described in SICD Image Projections document.
Parameters
----------
im_points : numpy.ndarray|list|tuple
the image coordinate array
structure : sarpy.io.complex.sicd_elements.SICD.SICDType|sarpy.io.product.sidd2_elements.SIDD.SIDDType|sarpy.io.product.sidd1_elements.SIDD.SIDDType
The SICD or SIDD structure.
block_size : None|int
Size of blocks of coordinates to transform at a time. The entire array
will be transformed as a single block if `None`.
dem_interpolator : str|DEMInterpolator
The DEMInterpolator. If this is a string, then a DTEDInterpolator will be
constructed assuming that this is the DTED root search directory.
dem_type : None|str|List[str]
The DEM type or list of DEM types in order of priority. Only used if
`dem_interpolator` is the search path.
geoid_file : None|str|GeoidHeight
The `GeoidHeight` object, an egm file name, or root directory containing
one of the egm files in the sub-directory "geoid". If `None`, then default
to the root directory of `dted_list`. Only used if `dem_interpolator` is
the search path.
pad_value : float
The degree value to pad by for the dem interpolator. Only used if
`dem_interpolator` is the search path.
vertical_step_size : float|int
Sampling along HAE altitude at the given resolution in meters. Bounds of
`[0.1, 100]` will be enforced by replacement.
use_structure_coa : bool
If structure.coa_projection is populated, use that one **ignoring the COAProjection parameters.**
coa_args
keyword arguments for COAProjection.from_sicd class method.
Returns
-------
numpy.ndarray
Physical coordinates (in ECF coordinates) with corresponding to the input image
coordinates, assuming detected features actually correspond to the DEM.
"""
def append_grid_elements(this_lon_min, this_lon_max, the_list):
lat_start = lat_min
while lat_start < lat_max:
lon_start = this_lon_min
lat_end = min(lat_start + lat_grid_size, lat_max)
while lon_start < this_lon_max:
lon_end = min(lon_start + lon_grid_size, this_lon_max)
the_list.append((lat_start, lat_end, lon_start, lon_end))
lon_start = lon_end
lat_start = lat_end
# coa projection creation
im_points, orig_shape = _validate_im_points(im_points)
coa_proj = _get_coa_projection(structure, use_structure_coa, **coa_args)
vertical_step_size = float(vertical_step_size)
if vertical_step_size < 0.1:
vertical_step_size = 0.1
if vertical_step_size > 100:
vertical_step_size = 100
# reference point extraction
ref_ecf = _get_reference_point(structure)
ref_llh = ecf_to_geodetic(ref_ecf)
ref_hae = ref_llh[2]
# subgrid size definition
lat_grid_size = 0.03
lon_grid_size = min(10, lat_grid_size/numpy.sin(numpy.deg2rad(ref_llh[0])))
# validate the dem_interpolator
if dem_interpolator is None:
raise ValueError('dem_interpolator is None, this is unhandled.')
if isinstance(dem_interpolator, string_types):
dted_list = DTEDList(dem_interpolator)
dem_interpolator = DTEDInterpolator.from_reference_point(
ref_llh, dted_list, dem_type=dem_type, geoid_file=geoid_file, pad_value=pad_value)
if not isinstance(dem_interpolator, DEMInterpolator):
raise TypeError('dem_interpolator is of unsupported type {}'.format(type(dem_interpolator)))
# perform a projection to reference point hae for approximate lat/lon values
im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten
r_tgt_coa, r_dot_tgt_coa, time_coa, arp_coa, varp_coa = coa_proj.projection(im_points_view)
ugpn = wgs_84_norm(ref_ecf)
tolerance = 1e-3
max_iterations = 10
llh_rough = ecf_to_geodetic(_image_to_ground_hae_perform(
r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, ref_ecf, ugpn, ref_hae,
tolerance, max_iterations, ref_hae))
# segment into lat/lon grid of small size for more efficient dem lookup
lat_min = numpy.min(llh_rough[:, 0])
lat_max = numpy.max(llh_rough[:, 0])
lon_min = numpy.min(llh_rough[:, 1])
lon_max = numpy.max(llh_rough[:, 1])
lat_lon_grids = []
if (lon_min < -90) and (lon_max > 90):
# there is a -180/180 crossing
append_grid_elements(numpy.min(llh_rough[(llh_rough[:, 1] > 0), 1]), 180, lat_lon_grids)
append_grid_elements(-180, numpy.max(llh_rough[(llh_rough[:, 1] < 0), 1]), lat_lon_grids)
else:
append_grid_elements(lon_min, lon_max, lat_lon_grids)
if len(lat_lon_grids) == 1:
return _image_to_ground_dem_block(
im_points, coa_proj, dem_interpolator, vertical_step_size,
lat_lon_grids[0], block_size, lat_grid_size, lon_grid_size)
else:
num_points = im_points_view.shape[0]
coords = numpy.zeros((num_points, 3), dtype='float64')
for entry in lat_lon_grids:
mask = ((llh_rough[:, 0] >= entry[0]) & (llh_rough[:, 0] <= entry[1]) &
(llh_rough[:, 1] >= entry[2]) & (llh_rough[:, 1] <= entry[3]))
if numpy.any(mask):
coords[mask, :] = _image_to_ground_dem_block(
im_points[mask, :], coa_proj, dem_interpolator, vertical_step_size,
entry, block_size, lat_grid_size, lon_grid_size)
if len(orig_shape) == 1:
coords = numpy.reshape(coords, (-1,))
elif len(orig_shape) > 1:
coords = numpy.reshape(coords, orig_shape[:-1] + (3,))
return coords
| 39.35114
| 152
| 0.664211
|
30e264edeed3ddfda3c19863f984cf0a6ed23edb
| 18,318
|
py
|
Python
|
tests/test_algorithms.py
|
OGKG/CGLib
|
1c4a4b0a28cdbaf5c0c4b1b82b75048cdfac2ab6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_algorithms.py
|
OGKG/CGLib
|
1c4a4b0a28cdbaf5c0c4b1b82b75048cdfac2ab6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_algorithms.py
|
OGKG/CGLib
|
1c4a4b0a28cdbaf5c0c4b1b82b75048cdfac2ab6
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from models import (
Point,
Vertex,
Graph,
Edge,
BinTree,
ChainsBinTree,
KdTree,
Node,
OrientedGraph,
OrientedEdge,
NodeWithParent,
RegionTree
)
from collections import OrderedDict
from algo.stripe_method import stripe
from algo.kd_tree_method import kd_tree
from algo.jarvis import jarvis
from algo.graham import graham
from algo.quickhull import quickhull
from algo.loci import Loci
from algo.chain_method import chain_method
from algo.dc_closest_points import closest_points
from algo.region_tree_method import region_tree_method
import math
import copy
class TestAlgorithms(unittest.TestCase):
"""Algorithm tests."""
def test_stripe(self):
p1 = Vertex(Point(7, 0))
p2 = Vertex(Point(2, 2.5))
p3 = Vertex(Point(12, 3))
p4 = Vertex(Point(8, 5))
p5 = Vertex(Point(0, 7))
p6 = Vertex(Point(13, 8))
p7 = Vertex(Point(6, 11))
g = Graph()
g.add_vertex(p1)
g.add_vertex(p2)
g.add_vertex(p3)
g.add_vertex(p4)
g.add_vertex(p5)
g.add_vertex(p6)
g.add_vertex(p7)
g.add_edge(p1, p2)
g.add_edge(p1, p3)
g.add_edge(p2, p3)
g.add_edge(p7, p6)
g.add_edge(p3, p6)
g.add_edge(p4, p6)
g.add_edge(p4, p5)
g.add_edge(p4, p7)
g.add_edge(p5, p7)
g.add_edge(p2, p5)
dot = Point(11.5, 5.5)
ans = list(stripe(g, dot))
self.assertEqual(
ans[0],
[
(-math.inf, 0.0),
(0.0, 2.5),
(2.5, 3.0),
(3.0, 5.0),
(5.0, 7.0),
(7.0, 8.0),
(8.0, 11.0),
(11.0, math.inf),
],
)
self.assertTrue(
TestAlgorithms.fragmentation_eq(
ans[1],
{
(-math.inf, 0.0): [],
(0.0, 2.5): [Edge(p1, p2), Edge(p1, p3)],
(2.5, 3.0): [Edge(p1, p3), Edge(p2, p3), Edge(p2, p5)],
(3.0, 5.0): [Edge(p2, p5), Edge(p3, p6)],
(5.0, 7.0): [
Edge(p2, p5),
Edge(p4, p5),
Edge(p4, p7),
Edge(p4, p6),
Edge(p3, p6),
],
(7.0, 8.0): [
Edge(p5, p7),
Edge(p4, p7),
Edge(p4, p6),
Edge(p3, p6),
],
(8.0, 11.0): [Edge(p5, p7), Edge(p4, p7), Edge(p7, p6)],
(11.0, math.inf): [],
},
)
)
self.assertEqual(ans[2], (5.0, 7.0))
self.assertEqual(ans[3], [Edge(p4, p6), Edge(p3, p6)])
@staticmethod
def fragmentation_eq(f1, f2):
for i in f1:
for item in f1[i]:
if item not in f2[i]:
return False
for i in f2:
for item in f2[i]:
if item not in f1[i]:
return False
return True
def test_jarvis1(self):
pts = [
Point(1, 4),
Point(0, 0),
Point(3, 3),
Point(3, 1),
Point(7, 0),
Point(5, 5),
Point(5, 2),
Point(9, 6),
]
hull = [Point(0, 0), Point(1, 4), Point(9, 6), Point(7, 0)]
ans = jarvis(pts)
self.assertEqual(ans, hull)
def test_jarvis2(self):
pts = [Point(3, 3), Point(1, 1), Point(5, 0)]
hull = [Point(1, 1), Point(3, 3), Point(5, 0)]
ans = jarvis(pts)
self.assertEqual(ans, hull)
def test_kd_tree(self):
pts = [
Point(0, 9),
Point(2, 3),
Point(3, 6),
Point(5, 8),
Point(6, 1),
Point(8, 13),
Point(10, 2),
Point(12, 4),
Point(14, 11),
Point(15, 5),
Point(17, 10),
]
rx = [3, 14]
ry = [0, 8]
tree = KdTree(Node(Point(8, 13)), [], [])
tree.root.left = Node(Point(3, 6))
tree.root.left.left = Node(Point(6, 1))
tree.root.left.left.left = Node(Point(2, 3))
tree.root.left.right = Node(Point(5, 8))
tree.root.left.right.left = Node(Point(0, 9))
tree.root.right = Node(Point(15, 5))
tree.root.right.left = Node(Point(12, 4))
tree.root.right.left.left = Node(Point(10, 2))
tree.root.right.right = Node(Point(17, 10))
tree.root.right.right.left = Node(Point(14, 11))
r_pts = [
Point(3, 6),
Point(5, 8),
Point(6, 1),
Point(10, 2),
Point(12, 4),
]
ans = kd_tree(pts, rx, ry)
self.assertEqual(sorted(pts), next(ans))
self.assertEqual(tree, next(ans))
self.assertEqual(r_pts, sorted(next(ans)))
def test_graham1(self):
pts = [Point(7, 0), Point(3, 3), Point(0, 0)]
centroid = Point(3.3333333333333335, 1.0)
ordered = [Point(0, 0), Point(7, 0), Point(3, 3)]
origin = Point(0, 0)
steps = [([0, 1, 2], True, 1), ([1, 2, 0], True, 2)]
hull = [Point(0, 0), Point(7, 0), Point(3, 3)]
ans = graham(pts)
self.assertAlmostEqual(centroid, next(ans))
self.assertEqual(ordered, next(ans))
self.assertEqual(origin, next(ans))
self.assertEqual(steps, next(ans))
self.assertEqual(hull, next(ans))
def test_graham2(self):
pts = [
Point(3, 10),
Point(6, 8),
Point(3, 5),
Point(2, 8),
Point(4, 8),
Point(5, 5),
Point(3, 3),
Point(7, 7),
Point(5, 0),
Point(0, 0),
Point(10, 3),
]
centroid = Point(4.0, 7.666666666666667)
ordered = [
Point(0, 0),
Point(3, 5),
Point(3, 3),
Point(5, 0),
Point(5, 5),
Point(10, 3),
Point(7, 7),
Point(6, 8),
Point(4, 8),
Point(3, 10),
Point(2, 8),
]
origin = Point(0, 0)
steps = [
([0, 1, 2], False, 1),
([0, 2, 3], False, 2),
([0, 3, 4], True, 3),
([3, 4, 5], False, 4),
([0, 3, 5], True, 3),
([3, 5, 6], True, 5),
([5, 6, 7], True, 6),
([6, 7, 8], True, 7),
([7, 8, 9], False, 8),
([6, 7, 9], True, 7),
([7, 9, 10], True, 9),
([9, 10, 0], True, 10)
]
hull = [
Point(0, 0),
Point(5, 0),
Point(10, 3),
Point(7, 7),
Point(6, 8),
Point(3, 10),
Point(2, 8),
]
ans = graham(pts)
self.assertAlmostEqual(centroid, next(ans))
self.assertEqual(ordered, next(ans))
self.assertEqual(origin, next(ans))
self.assertEqual(steps, next(ans))
self.assertEqual(hull, next(ans))
def test_graham3(self):
pts = [
Point(2, 8),
Point(5, 6),
Point(7, 8),
Point(8, 11),
Point(7, 5),
Point(10, 7),
Point(11, 5),
Point(8, 2),
Point(1, 3),
Point(5, 2),
]
centroid = Point(4.666666666666667, 7.333333333333333)
ordered = [
Point(5, 2),
Point(5, 6),
Point(8, 2),
Point(7, 5),
Point(11, 5),
Point(10, 7),
Point(7, 8),
Point(8, 11),
Point(2, 8),
Point(1, 3),
]
origin = Point(5, 2)
steps = [
([0, 1, 2], False, 1),
([0, 2, 3], True, 2),
([2, 3, 4], False, 3),
([0, 2, 4], True, 2),
([2, 4, 5], True, 4),
([4, 5, 6], True, 5),
([5, 6, 7], False, 6),
([4, 5, 7], False, 5),
([2, 4, 7], True, 4),
([4, 7, 8], True, 7),
([7, 8, 9], True, 8),
([8, 9, 0], True, 9)
]
hull = [
Point(5, 2),
Point(8, 2),
Point(11, 5),
Point(8, 11),
Point(2, 8),
Point(1, 3),
]
ans = graham(pts)
self.assertAlmostEqual(centroid, next(ans))
self.assertEqual(ordered, next(ans))
self.assertEqual(origin, next(ans))
self.assertEqual(steps, next(ans))
self.assertEqual(hull, next(ans))
def test_quickhull1(self):
pts = [Point(3, 4), Point(0, 0), Point(7, 2)]
tree = BinTree(Node([pts[1], pts[0], pts[2]]))
tree.root.left = Node([pts[1], pts[0], pts[2]])
tree.root.right = Node([pts[2], pts[1]])
tree.root.left.left = Node([pts[1], pts[0]])
tree.root.left.right = Node([pts[0], pts[2]])
hull = [pts[1], pts[0], pts[2]]
ans = quickhull(pts)
self.assertEqual(tree, next(ans))
self.assertEqual(hull, next(ans))
def test_quickhull2(self):
pts = [
Point(0, 6),
Point(8, 11),
Point(10, 4),
Point(7, 13),
Point(6, 3),
Point(3, 0),
Point(4, 2),
Point(12, 1),
Point(14, 10),
Point(5, 9),
Point(3, 11),
Point(1, 4),
]
tree = BinTree(
Node(
[
pts[0],
pts[10],
pts[9],
pts[3],
pts[1],
pts[8],
pts[7],
pts[2],
pts[4],
pts[6],
pts[5],
pts[11],
]
)
)
tree.root.left = Node([pts[0], pts[10], pts[9], pts[3], pts[1], pts[8]])
tree.root.right = Node(
[pts[8], pts[7], pts[2], pts[4], pts[6], pts[5], pts[11], pts[0]]
)
tree.root.left.left = Node([pts[0], pts[10], pts[3]])
tree.root.left.right = Node([pts[3], pts[8]])
tree.root.left.left.left = Node([pts[0], pts[10]])
tree.root.left.left.right = Node([pts[10], pts[3]])
tree.root.right.left = Node([pts[8], pts[7]])
tree.root.right.right = Node([pts[7], pts[4], pts[6], pts[5], pts[11], pts[0]])
tree.root.right.right.left = Node([pts[7], pts[5]])
tree.root.right.right.right = Node([pts[5], pts[0]])
hull = [pts[0], pts[10], pts[3], pts[8], pts[7], pts[5]]
ans = quickhull(pts)
self.assertEqual(tree, next(ans))
self.assertEqual(hull, next(ans))
def test_loci(self):
l = Loci()
p1 = Point(1, 1)
p2 = Point(2, 1)
p3 = Point(2, 3)
p4 = Point(2, 2)
l.append_points(p1, p2, p3, p4)
q = l.query(Point(2.5, 0.5))
self.assertEqual(q, 0)
res = l.get_points_in_rect(((1.5, 2.5), (0.5, 3.5)))
res2 = l.get_points_in_rect(((0.5, 2.5), (0.5, 3.5)))
self.assertEqual(res, 3)
self.assertEqual(res2, 4)
p1 = Point(2, 1)
p2 = Point(1, 2)
p3 = Point(0, 3)
l = Loci()
l.append_points(p1, p2, p3)
res = l.get_points_in_rect(((0.5, 2.5), (0.5, 2.5)))
self.assertEqual(res, 2)
def test_chain_method(self):
graph = OrientedGraph()
point = Point(4, 5)
v1 = Vertex(Point(4, 2))
v2 = Vertex(Point(2, 4))
v3 = Vertex(Point(6, 5))
v4 = Vertex(Point(5, 7))
e1 = OrientedEdge(v1, v2, 1)
e2 = OrientedEdge(v1, v3, 1)
e3 = OrientedEdge(v2, v3, 1)
e4 = OrientedEdge(v2, v4, 1)
e5 = OrientedEdge(v3, v4, 1)
graph.add_vertex(v1)
graph.add_vertex(v2)
graph.add_vertex(v3)
graph.add_vertex(v4)
graph.add_edge(v1, v2, 1)
graph.add_edge(v1, v3, 1)
graph.add_edge(v2, v3, 1)
graph.add_edge(v2, v4, 1)
graph.add_edge(v3, v4, 1)
ordered = [v1, v2, v3, v4]
weight_table = OrderedDict(
{
v1: {"vin": [], "vout": [e1, e2], "win": 0, "wout": 2},
v2: {"vin": [e1], "vout": [e4, e3], "win": 1, "wout": 2},
v3: {"vin": [e3, e2], "vout": [e5], "win": 2, "wout": 1},
v4: {"vin": [e4, e5], "vout": [], "win": 2, "wout": 0},
}
)
e1_balanced = copy.deepcopy(e1)
e1_balanced.weight = 2
e5_balanced = copy.deepcopy(e5)
e5_balanced.weight = 2
weight_table_balanced = {
v1: {"vin": [], "vout": [e1_balanced, e2], "win": 0, "wout": 3},
v2: {"vin": [e1_balanced], "vout": [e4, e3], "win": 2, "wout": 2},
v3: {"vin": [e3, e2], "vout": [e5_balanced], "win": 2, "wout": 2},
v4: {"vin": [e4, e5_balanced], "vout": [], "win": 3, "wout": 0},
}
e1_new = copy.deepcopy(e1)
e1_new.weight = 0
e2_new = copy.deepcopy(e2)
e2_new.weight = 0
e3_new = copy.deepcopy(e3)
e3_new.weight = 0
e4_new = copy.deepcopy(e4)
e4_new.weight = 0
e5_new = copy.deepcopy(e5)
e5_new.weight = 0
chains = [[e1_new, e4_new], [e1_new, e3_new, e5_new], [e2_new, e5_new]]
root = NodeWithParent(data=chains[1])
tree = ChainsBinTree(root)
tree.root.left = NodeWithParent(data=chains[0], parent=root)
tree.root.right = NodeWithParent(data=chains[2], parent=root)
point_between = (chains[0], chains[1])
ans = chain_method(graph, point)
self.assertEqual(ordered, next(ans))
self.assertEqual(weight_table, next(ans))
self.assertEqual(weight_table_balanced, next(ans))
self.assertEqual(chains, next(ans))
self.assertEqual(tree, next(ans))
self.assertEqual(point_between, next(ans))
def test_closest_points(self):
points_test = [Point(3, 3), Point(6, 2), Point(5, 6), Point(7, 4), Point(2, 9)]
close_pair_true = (Point(6, 2), Point(7, 4))
self.assertTupleEqual(closest_points(points_test), close_pair_true)
def test_region_tree_method(self):
pts = [Point(1, 9), Point(7, 13), Point(3, 3), Point(1.5, 3), Point(5, 7),
Point(9, 8), Point(6, 9), Point(7, 5), Point(7, 12), Point(4, 11), Point(1, 5)]
x_range, y_range = [2.2, 7.7], [6.6, 11.11]
pre = (sorted(pts), sorted(sorted(pts), key=lambda u: u.y))
projections = [
[Point(1, 5), Point(1, 9)],
[Point(1.5, 3)],
[Point(3, 3)],
[Point(4, 11)],
[Point(5, 7)],
[Point(6, 9)],
[Point(7, 5), Point(7, 12), Point(7, 13)],
[Point(9, 8)]
]
tree = BinTree(Node([[1, 8], [Point(1.5, 3),
Point(3, 3),
Point(1, 5),
Point(7, 5),
Point(5, 7),
Point(9, 8),
Point(1, 9),
Point(6, 9),
Point(4, 11),
Point(7, 12),
Point(7, 13)]]))
tree.root.left = Node([[1, 4], [Point(1.5, 3),
Point(3, 3),
Point(1, 5),
Point(1, 9),
Point(4, 11)]])
tree.root.left.left = Node([[1, 2], [Point(1.5, 3), Point(1, 5), Point(1, 9)]])
tree.root.left.right = Node([[2, 4], [Point(1.5, 3), Point(3, 3), Point(4, 11)]])
tree.root.left.right.left = Node([[2, 3], [Point(1.5, 3), Point(3, 3)]])
tree.root.left.right.right = Node([[3, 4], [Point(3, 3), Point(4, 11)]])
tree.root.right = Node([[4, 8], [Point(7, 5),
Point(5, 7),
Point(9, 8),
Point(6, 9),
Point(4, 11),
Point(7, 12),
Point(7, 13)]])
tree.root.right.left = Node([[4, 6], [Point(5, 7), Point(6, 9), Point(4, 11)]])
tree.root.right.left.left = Node([[4, 5], [Point(5, 7), Point(4, 11)]])
tree.root.right.left.right = Node([[5, 6], [Point(5, 7), Point(6, 9)]])
tree.root.right.right = Node([[6, 8], [Point(7, 5),
Point(9, 8),
Point(6, 9),
Point(7, 12),
Point(7, 13)]])
tree.root.right.right.left = Node([[6, 7], [Point(7, 5),
Point(6, 9),
Point(7, 12),
Point(7, 13)]])
tree.root.right.right.right = Node([[7, 8], [Point(7, 5),
Point(9, 8),
Point(7, 12),
Point(7, 13)]])
ps = [tree.root.left.right.right, tree.root.right.left, tree.root.right.right.left]
ss = [[Point(4, 11)], [Point(5, 7), Point(6, 9), Point(4, 11)], [Point(6, 9)]]
ans = region_tree_method(pts, x_range, y_range)
self.assertEqual(pre, next(ans))
self.assertEqual(projections, next(ans))
self.assertEqual(tree, next(ans))
self.assertEqual([3, 7], next(ans))
self.assertEqual(ps, next(ans))
self.assertEqual(ss, next(ans))
| 32.594306
| 94
| 0.41784
|
799389e659b5c231da36de7c3437f06c93c079c6
| 1,929
|
py
|
Python
|
saas/backend/apps/approval/audit.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 7
|
2021-08-13T03:48:16.000Z
|
2021-12-20T15:31:38.000Z
|
saas/backend/apps/approval/audit.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 456
|
2021-08-16T02:13:57.000Z
|
2022-03-30T10:02:49.000Z
|
saas/backend/apps/approval/audit.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 17
|
2021-08-10T04:08:46.000Z
|
2022-03-14T14:24:36.000Z
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from backend.apps.role.audit import BaseRoleDataProvider
from backend.audit.audit import audit_context_getter
from backend.audit.constants import AuditType
class ApprovalProcessGlobalConfigAuditProvider(BaseRoleDataProvider):
type = AuditType.APPROVAL_GLOBAL_UPDATE.value
@property
def extra(self):
return {
"type": audit_context_getter(self.request, "type"),
"process_id": audit_context_getter(self.request, "process_id"),
}
class ApprovalProcessActionAuditProvider(BaseRoleDataProvider):
type = AuditType.APPROVAL_ACTION_UPDATE.value
@property
def extra(self):
return {
"system_id": audit_context_getter(self.request, "system_id"),
"action_ids": audit_context_getter(self.request, "action_ids"),
"process_id": audit_context_getter(self.request, "process_id"),
}
class ApprovalProcessGroupAuditProvider(BaseRoleDataProvider):
type = AuditType.APPROVAL_GROUP_UPDATE.value
@property
def extra(self):
return {
"group_ids": audit_context_getter(self.request, "group_ids"),
"process_id": audit_context_getter(self.request, "process_id"),
}
| 40.1875
| 115
| 0.736133
|
b93cadbbe4ea25ac36c3b35c1b890c3dd39e1c1b
| 1,372
|
py
|
Python
|
modules/platforms/python/examples/scans.py
|
FedorUporov/gridgain
|
883125f943743fa8198d88be98dfe61bde86ad96
|
[
"CC0-1.0"
] | null | null | null |
modules/platforms/python/examples/scans.py
|
FedorUporov/gridgain
|
883125f943743fa8198d88be98dfe61bde86ad96
|
[
"CC0-1.0"
] | null | null | null |
modules/platforms/python/examples/scans.py
|
FedorUporov/gridgain
|
883125f943743fa8198d88be98dfe61bde86ad96
|
[
"CC0-1.0"
] | null | null | null |
#
# Copyright 2019 GridGain Systems, Inc. and Contributors.
#
# Licensed under the GridGain Community Edition License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyignite import Client
client = Client()
client.connect('127.0.0.1', 10800)
my_cache = client.create_cache('my cache')
my_cache.put_all({'key_{}'.format(v): v for v in range(20)})
# {
# 'key_0': 0,
# 'key_1': 1,
# 'key_2': 2,
# ... 20 elements in total...
# 'key_18': 18,
# 'key_19': 19
# }
result = my_cache.scan()
for k, v in result:
print(k, v)
# 'key_17' 17
# 'key_10' 10
# 'key_6' 6,
# ... 20 elements in total...
# 'key_16' 16
# 'key_12' 12
result = my_cache.scan()
print(dict(result))
# {
# 'key_17': 17,
# 'key_10': 10,
# 'key_6': 6,
# ... 20 elements in total...
# 'key_16': 16,
# 'key_12': 12
# }
my_cache.destroy()
client.close()
| 24.5
| 101
| 0.663994
|
fd2313ec0dec46e00ba0eb58eef8490d66679307
| 57,659
|
py
|
Python
|
sdk/python/pulumi_aws/amplify/branch.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/amplify/branch.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/amplify/branch.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BranchArgs', 'Branch']
@pulumi.input_type
class BranchArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
branch_name: pulumi.Input[str],
backend_environment_arn: Optional[pulumi.Input[str]] = None,
basic_auth_credentials: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_basic_auth: Optional[pulumi.Input[bool]] = None,
enable_notification: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
framework: Optional[pulumi.Input[str]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Branch resource.
:param pulumi.Input[str] app_id: The unique ID for an Amplify app.
:param pulumi.Input[str] branch_name: The name for the branch.
:param pulumi.Input[str] backend_environment_arn: The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.
:param pulumi.Input[str] basic_auth_credentials: The basic authorization credentials for the branch.
:param pulumi.Input[str] description: The description for the branch.
:param pulumi.Input[str] display_name: The display name for a branch. This is used as the default domain prefix.
:param pulumi.Input[bool] enable_auto_build: Enables auto building for the branch.
:param pulumi.Input[bool] enable_basic_auth: Enables basic authorization for the branch.
:param pulumi.Input[bool] enable_notification: Enables notifications for the branch.
:param pulumi.Input[bool] enable_performance_mode: Enables performance mode for the branch.
:param pulumi.Input[bool] enable_pull_request_preview: Enables pull request previews for this branch.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: The environment variables for the branch.
:param pulumi.Input[str] framework: The framework for the branch.
:param pulumi.Input[str] pull_request_environment_name: The Amplify environment name for the pull request.
:param pulumi.Input[str] stage: Describes the current stage for the branch. Valid values: `PRODUCTION`, `BETA`, `DEVELOPMENT`, `EXPERIMENTAL`, `PULL_REQUEST`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] ttl: The content Time To Live (TTL) for the website in seconds.
"""
pulumi.set(__self__, "app_id", app_id)
pulumi.set(__self__, "branch_name", branch_name)
if backend_environment_arn is not None:
pulumi.set(__self__, "backend_environment_arn", backend_environment_arn)
if basic_auth_credentials is not None:
pulumi.set(__self__, "basic_auth_credentials", basic_auth_credentials)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enable_auto_build is not None:
pulumi.set(__self__, "enable_auto_build", enable_auto_build)
if enable_basic_auth is not None:
pulumi.set(__self__, "enable_basic_auth", enable_basic_auth)
if enable_notification is not None:
pulumi.set(__self__, "enable_notification", enable_notification)
if enable_performance_mode is not None:
pulumi.set(__self__, "enable_performance_mode", enable_performance_mode)
if enable_pull_request_preview is not None:
pulumi.set(__self__, "enable_pull_request_preview", enable_pull_request_preview)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if framework is not None:
pulumi.set(__self__, "framework", framework)
if pull_request_environment_name is not None:
pulumi.set(__self__, "pull_request_environment_name", pull_request_environment_name)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
"""
The unique ID for an Amplify app.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> pulumi.Input[str]:
"""
The name for the branch.
"""
return pulumi.get(self, "branch_name")
@branch_name.setter
def branch_name(self, value: pulumi.Input[str]):
pulumi.set(self, "branch_name", value)
@property
@pulumi.getter(name="backendEnvironmentArn")
def backend_environment_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.
"""
return pulumi.get(self, "backend_environment_arn")
@backend_environment_arn.setter
def backend_environment_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_environment_arn", value)
@property
@pulumi.getter(name="basicAuthCredentials")
def basic_auth_credentials(self) -> Optional[pulumi.Input[str]]:
"""
The basic authorization credentials for the branch.
"""
return pulumi.get(self, "basic_auth_credentials")
@basic_auth_credentials.setter
def basic_auth_credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "basic_auth_credentials", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the branch.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name for a branch. This is used as the default domain prefix.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> Optional[pulumi.Input[bool]]:
"""
Enables auto building for the branch.
"""
return pulumi.get(self, "enable_auto_build")
@enable_auto_build.setter
def enable_auto_build(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_build", value)
@property
@pulumi.getter(name="enableBasicAuth")
def enable_basic_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Enables basic authorization for the branch.
"""
return pulumi.get(self, "enable_basic_auth")
@enable_basic_auth.setter
def enable_basic_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_basic_auth", value)
@property
@pulumi.getter(name="enableNotification")
def enable_notification(self) -> Optional[pulumi.Input[bool]]:
"""
Enables notifications for the branch.
"""
return pulumi.get(self, "enable_notification")
@enable_notification.setter
def enable_notification(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_notification", value)
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> Optional[pulumi.Input[bool]]:
"""
Enables performance mode for the branch.
"""
return pulumi.get(self, "enable_performance_mode")
@enable_performance_mode.setter
def enable_performance_mode(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_performance_mode", value)
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> Optional[pulumi.Input[bool]]:
"""
Enables pull request previews for this branch.
"""
return pulumi.get(self, "enable_pull_request_preview")
@enable_pull_request_preview.setter
def enable_pull_request_preview(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_pull_request_preview", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The environment variables for the branch.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter
def framework(self) -> Optional[pulumi.Input[str]]:
"""
The framework for the branch.
"""
return pulumi.get(self, "framework")
@framework.setter
def framework(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "framework", value)
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> Optional[pulumi.Input[str]]:
"""
The Amplify environment name for the pull request.
"""
return pulumi.get(self, "pull_request_environment_name")
@pull_request_environment_name.setter
def pull_request_environment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pull_request_environment_name", value)
@property
@pulumi.getter
def stage(self) -> Optional[pulumi.Input[str]]:
"""
Describes the current stage for the branch. Valid values: `PRODUCTION`, `BETA`, `DEVELOPMENT`, `EXPERIMENTAL`, `PULL_REQUEST`.
"""
return pulumi.get(self, "stage")
@stage.setter
def stage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stage", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value mapping of resource tags. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[str]]:
"""
The content Time To Live (TTL) for the website in seconds.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl", value)
@pulumi.input_type
class _BranchState:
def __init__(__self__, *,
app_id: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
associated_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backend_environment_arn: Optional[pulumi.Input[str]] = None,
basic_auth_credentials: Optional[pulumi.Input[str]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
custom_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_branch: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_basic_auth: Optional[pulumi.Input[bool]] = None,
enable_notification: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
framework: Optional[pulumi.Input[str]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
source_branch: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Branch resources.
:param pulumi.Input[str] app_id: The unique ID for an Amplify app.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) for the branch.
:param pulumi.Input[Sequence[pulumi.Input[str]]] associated_resources: A list of custom resources that are linked to this branch.
:param pulumi.Input[str] backend_environment_arn: The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.
:param pulumi.Input[str] basic_auth_credentials: The basic authorization credentials for the branch.
:param pulumi.Input[str] branch_name: The name for the branch.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_domains: The custom domains for the branch.
:param pulumi.Input[str] description: The description for the branch.
:param pulumi.Input[str] destination_branch: The destination branch if the branch is a pull request branch.
:param pulumi.Input[str] display_name: The display name for a branch. This is used as the default domain prefix.
:param pulumi.Input[bool] enable_auto_build: Enables auto building for the branch.
:param pulumi.Input[bool] enable_basic_auth: Enables basic authorization for the branch.
:param pulumi.Input[bool] enable_notification: Enables notifications for the branch.
:param pulumi.Input[bool] enable_performance_mode: Enables performance mode for the branch.
:param pulumi.Input[bool] enable_pull_request_preview: Enables pull request previews for this branch.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: The environment variables for the branch.
:param pulumi.Input[str] framework: The framework for the branch.
:param pulumi.Input[str] pull_request_environment_name: The Amplify environment name for the pull request.
:param pulumi.Input[str] source_branch: The source branch if the branch is a pull request branch.
:param pulumi.Input[str] stage: Describes the current stage for the branch. Valid values: `PRODUCTION`, `BETA`, `DEVELOPMENT`, `EXPERIMENTAL`, `PULL_REQUEST`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
:param pulumi.Input[str] ttl: The content Time To Live (TTL) for the website in seconds.
"""
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if associated_resources is not None:
pulumi.set(__self__, "associated_resources", associated_resources)
if backend_environment_arn is not None:
pulumi.set(__self__, "backend_environment_arn", backend_environment_arn)
if basic_auth_credentials is not None:
pulumi.set(__self__, "basic_auth_credentials", basic_auth_credentials)
if branch_name is not None:
pulumi.set(__self__, "branch_name", branch_name)
if custom_domains is not None:
pulumi.set(__self__, "custom_domains", custom_domains)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_branch is not None:
pulumi.set(__self__, "destination_branch", destination_branch)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enable_auto_build is not None:
pulumi.set(__self__, "enable_auto_build", enable_auto_build)
if enable_basic_auth is not None:
pulumi.set(__self__, "enable_basic_auth", enable_basic_auth)
if enable_notification is not None:
pulumi.set(__self__, "enable_notification", enable_notification)
if enable_performance_mode is not None:
pulumi.set(__self__, "enable_performance_mode", enable_performance_mode)
if enable_pull_request_preview is not None:
pulumi.set(__self__, "enable_pull_request_preview", enable_pull_request_preview)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if framework is not None:
pulumi.set(__self__, "framework", framework)
if pull_request_environment_name is not None:
pulumi.set(__self__, "pull_request_environment_name", pull_request_environment_name)
if source_branch is not None:
pulumi.set(__self__, "source_branch", source_branch)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID for an Amplify app.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) for the branch.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="associatedResources")
def associated_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of custom resources that are linked to this branch.
"""
return pulumi.get(self, "associated_resources")
@associated_resources.setter
def associated_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "associated_resources", value)
@property
@pulumi.getter(name="backendEnvironmentArn")
def backend_environment_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.
"""
return pulumi.get(self, "backend_environment_arn")
@backend_environment_arn.setter
def backend_environment_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_environment_arn", value)
@property
@pulumi.getter(name="basicAuthCredentials")
def basic_auth_credentials(self) -> Optional[pulumi.Input[str]]:
"""
The basic authorization credentials for the branch.
"""
return pulumi.get(self, "basic_auth_credentials")
@basic_auth_credentials.setter
def basic_auth_credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "basic_auth_credentials", value)
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the branch.
"""
return pulumi.get(self, "branch_name")
@branch_name.setter
def branch_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "branch_name", value)
@property
@pulumi.getter(name="customDomains")
def custom_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The custom domains for the branch.
"""
return pulumi.get(self, "custom_domains")
@custom_domains.setter
def custom_domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_domains", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description for the branch.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destinationBranch")
def destination_branch(self) -> Optional[pulumi.Input[str]]:
"""
The destination branch if the branch is a pull request branch.
"""
return pulumi.get(self, "destination_branch")
@destination_branch.setter
def destination_branch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_branch", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name for a branch. This is used as the default domain prefix.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> Optional[pulumi.Input[bool]]:
"""
Enables auto building for the branch.
"""
return pulumi.get(self, "enable_auto_build")
@enable_auto_build.setter
def enable_auto_build(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_build", value)
@property
@pulumi.getter(name="enableBasicAuth")
def enable_basic_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Enables basic authorization for the branch.
"""
return pulumi.get(self, "enable_basic_auth")
@enable_basic_auth.setter
def enable_basic_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_basic_auth", value)
@property
@pulumi.getter(name="enableNotification")
def enable_notification(self) -> Optional[pulumi.Input[bool]]:
"""
Enables notifications for the branch.
"""
return pulumi.get(self, "enable_notification")
@enable_notification.setter
def enable_notification(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_notification", value)
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> Optional[pulumi.Input[bool]]:
"""
Enables performance mode for the branch.
"""
return pulumi.get(self, "enable_performance_mode")
@enable_performance_mode.setter
def enable_performance_mode(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_performance_mode", value)
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> Optional[pulumi.Input[bool]]:
"""
Enables pull request previews for this branch.
"""
return pulumi.get(self, "enable_pull_request_preview")
@enable_pull_request_preview.setter
def enable_pull_request_preview(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_pull_request_preview", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The environment variables for the branch.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter
def framework(self) -> Optional[pulumi.Input[str]]:
"""
The framework for the branch.
"""
return pulumi.get(self, "framework")
@framework.setter
def framework(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "framework", value)
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> Optional[pulumi.Input[str]]:
"""
The Amplify environment name for the pull request.
"""
return pulumi.get(self, "pull_request_environment_name")
@pull_request_environment_name.setter
def pull_request_environment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pull_request_environment_name", value)
@property
@pulumi.getter(name="sourceBranch")
def source_branch(self) -> Optional[pulumi.Input[str]]:
"""
The source branch if the branch is a pull request branch.
"""
return pulumi.get(self, "source_branch")
@source_branch.setter
def source_branch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_branch", value)
@property
@pulumi.getter
def stage(self) -> Optional[pulumi.Input[str]]:
"""
Describes the current stage for the branch. Valid values: `PRODUCTION`, `BETA`, `DEVELOPMENT`, `EXPERIMENTAL`, `PULL_REQUEST`.
"""
return pulumi.get(self, "stage")
@stage.setter
def stage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stage", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value mapping of resource tags. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[str]]:
"""
The content Time To Live (TTL) for the website in seconds.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl", value)
class Branch(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
backend_environment_arn: Optional[pulumi.Input[str]] = None,
basic_auth_credentials: Optional[pulumi.Input[str]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_basic_auth: Optional[pulumi.Input[bool]] = None,
enable_notification: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
framework: Optional[pulumi.Input[str]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an Amplify Branch resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.amplify.App("example")
master = aws.amplify.Branch("master",
app_id=example.id,
branch_name="master",
framework="React",
stage="PRODUCTION",
environment_variables={
"REACT_APP_API_SERVER": "https://api.example.com",
})
```
### Notifications
Amplify Console uses EventBridge (formerly known as CloudWatch Events) and SNS for email notifications. To implement the same functionality, you need to set `enable_notification` in a `amplify.Branch` resource, as well as creating an EventBridge Rule, an SNS topic, and SNS subscriptions.
```python
import pulumi
import json
import pulumi_aws as aws
example = aws.amplify.App("example")
master = aws.amplify.Branch("master",
app_id=example.id,
branch_name="master",
enable_notification=True)
# EventBridge Rule for Amplify notifications
amplify_app_master_event_rule = aws.cloudwatch.EventRule("amplifyAppMasterEventRule",
description=master.branch_name.apply(lambda branch_name: f"AWS Amplify build notifications for : App: {aws_amplify_app['app']['id']} Branch: {branch_name}"),
event_pattern=pulumi.Output.all(example.id, master.branch_name).apply(lambda id, branch_name: json.dumps({
"detail": {
"appId": [id],
"branchName": [branch_name],
"jobStatus": [
"SUCCEED",
"FAILED",
"STARTED",
],
},
"detail-type": ["Amplify Deployment Status Change"],
"source": ["aws.amplify"],
})))
amplify_app_master_topic = aws.sns.Topic("amplifyAppMasterTopic")
amplify_app_master_event_target = aws.cloudwatch.EventTarget("amplifyAppMasterEventTarget",
rule=amplify_app_master_event_rule.name,
arn=amplify_app_master_topic.arn,
input_transformer=aws.cloudwatch.EventTargetInputTransformerArgs(
input_paths={
"jobId": "$.detail.jobId",
"appId": "$.detail.appId",
"region": "$.region",
"branch": "$.detail.branchName",
"status": "$.detail.jobStatus",
},
input_template="\"Build notification from the AWS Amplify Console for app: https://<branch>.<appId>.amplifyapp.com/. Your build status is <status>. Go to https://console.aws.amazon.com/amplify/home?region=<region>#<appId>/<branch>/<jobId> to view details on your build. \"",
))
# SNS Topic for Amplify notifications
amplify_app_master_policy_document = pulumi.Output.all(master.arn, amplify_app_master_topic.arn).apply(lambda masterArn, amplifyAppMasterTopicArn: aws.iam.get_policy_document_output(statements=[aws.iam.GetPolicyDocumentStatementArgs(
sid=f"Allow_Publish_Events {master_arn}",
effect="Allow",
actions=["SNS:Publish"],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="Service",
identifiers=["events.amazonaws.com"],
)],
resources=[amplify_app_master_topic_arn],
)]))
amplify_app_master_topic_policy = aws.sns.TopicPolicy("amplifyAppMasterTopicPolicy",
arn=amplify_app_master_topic.arn,
policy=amplify_app_master_policy_document.json)
```
## Import
Amplify branch can be imported using `app_id` and `branch_name`, e.g.,
```sh
$ pulumi import aws:amplify/branch:Branch master d2ypk4k47z8u6/master
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_id: The unique ID for an Amplify app.
:param pulumi.Input[str] backend_environment_arn: The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.
:param pulumi.Input[str] basic_auth_credentials: The basic authorization credentials for the branch.
:param pulumi.Input[str] branch_name: The name for the branch.
:param pulumi.Input[str] description: The description for the branch.
:param pulumi.Input[str] display_name: The display name for a branch. This is used as the default domain prefix.
:param pulumi.Input[bool] enable_auto_build: Enables auto building for the branch.
:param pulumi.Input[bool] enable_basic_auth: Enables basic authorization for the branch.
:param pulumi.Input[bool] enable_notification: Enables notifications for the branch.
:param pulumi.Input[bool] enable_performance_mode: Enables performance mode for the branch.
:param pulumi.Input[bool] enable_pull_request_preview: Enables pull request previews for this branch.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: The environment variables for the branch.
:param pulumi.Input[str] framework: The framework for the branch.
:param pulumi.Input[str] pull_request_environment_name: The Amplify environment name for the pull request.
:param pulumi.Input[str] stage: Describes the current stage for the branch. Valid values: `PRODUCTION`, `BETA`, `DEVELOPMENT`, `EXPERIMENTAL`, `PULL_REQUEST`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] ttl: The content Time To Live (TTL) for the website in seconds.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BranchArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Amplify Branch resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.amplify.App("example")
master = aws.amplify.Branch("master",
app_id=example.id,
branch_name="master",
framework="React",
stage="PRODUCTION",
environment_variables={
"REACT_APP_API_SERVER": "https://api.example.com",
})
```
### Notifications
Amplify Console uses EventBridge (formerly known as CloudWatch Events) and SNS for email notifications. To implement the same functionality, you need to set `enable_notification` in a `amplify.Branch` resource, as well as creating an EventBridge Rule, an SNS topic, and SNS subscriptions.
```python
import pulumi
import json
import pulumi_aws as aws
example = aws.amplify.App("example")
master = aws.amplify.Branch("master",
app_id=example.id,
branch_name="master",
enable_notification=True)
# EventBridge Rule for Amplify notifications
amplify_app_master_event_rule = aws.cloudwatch.EventRule("amplifyAppMasterEventRule",
description=master.branch_name.apply(lambda branch_name: f"AWS Amplify build notifications for : App: {aws_amplify_app['app']['id']} Branch: {branch_name}"),
event_pattern=pulumi.Output.all(example.id, master.branch_name).apply(lambda id, branch_name: json.dumps({
"detail": {
"appId": [id],
"branchName": [branch_name],
"jobStatus": [
"SUCCEED",
"FAILED",
"STARTED",
],
},
"detail-type": ["Amplify Deployment Status Change"],
"source": ["aws.amplify"],
})))
amplify_app_master_topic = aws.sns.Topic("amplifyAppMasterTopic")
amplify_app_master_event_target = aws.cloudwatch.EventTarget("amplifyAppMasterEventTarget",
rule=amplify_app_master_event_rule.name,
arn=amplify_app_master_topic.arn,
input_transformer=aws.cloudwatch.EventTargetInputTransformerArgs(
input_paths={
"jobId": "$.detail.jobId",
"appId": "$.detail.appId",
"region": "$.region",
"branch": "$.detail.branchName",
"status": "$.detail.jobStatus",
},
input_template="\"Build notification from the AWS Amplify Console for app: https://<branch>.<appId>.amplifyapp.com/. Your build status is <status>. Go to https://console.aws.amazon.com/amplify/home?region=<region>#<appId>/<branch>/<jobId> to view details on your build. \"",
))
# SNS Topic for Amplify notifications
amplify_app_master_policy_document = pulumi.Output.all(master.arn, amplify_app_master_topic.arn).apply(lambda masterArn, amplifyAppMasterTopicArn: aws.iam.get_policy_document_output(statements=[aws.iam.GetPolicyDocumentStatementArgs(
sid=f"Allow_Publish_Events {master_arn}",
effect="Allow",
actions=["SNS:Publish"],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="Service",
identifiers=["events.amazonaws.com"],
)],
resources=[amplify_app_master_topic_arn],
)]))
amplify_app_master_topic_policy = aws.sns.TopicPolicy("amplifyAppMasterTopicPolicy",
arn=amplify_app_master_topic.arn,
policy=amplify_app_master_policy_document.json)
```
## Import
Amplify branch can be imported using `app_id` and `branch_name`, e.g.,
```sh
$ pulumi import aws:amplify/branch:Branch master d2ypk4k47z8u6/master
```
:param str resource_name: The name of the resource.
:param BranchArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BranchArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
backend_environment_arn: Optional[pulumi.Input[str]] = None,
basic_auth_credentials: Optional[pulumi.Input[str]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_basic_auth: Optional[pulumi.Input[bool]] = None,
enable_notification: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
framework: Optional[pulumi.Input[str]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BranchArgs.__new__(BranchArgs)
if app_id is None and not opts.urn:
raise TypeError("Missing required property 'app_id'")
__props__.__dict__["app_id"] = app_id
__props__.__dict__["backend_environment_arn"] = backend_environment_arn
__props__.__dict__["basic_auth_credentials"] = basic_auth_credentials
if branch_name is None and not opts.urn:
raise TypeError("Missing required property 'branch_name'")
__props__.__dict__["branch_name"] = branch_name
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["enable_auto_build"] = enable_auto_build
__props__.__dict__["enable_basic_auth"] = enable_basic_auth
__props__.__dict__["enable_notification"] = enable_notification
__props__.__dict__["enable_performance_mode"] = enable_performance_mode
__props__.__dict__["enable_pull_request_preview"] = enable_pull_request_preview
__props__.__dict__["environment_variables"] = environment_variables
__props__.__dict__["framework"] = framework
__props__.__dict__["pull_request_environment_name"] = pull_request_environment_name
__props__.__dict__["stage"] = stage
__props__.__dict__["tags"] = tags
__props__.__dict__["ttl"] = ttl
__props__.__dict__["arn"] = None
__props__.__dict__["associated_resources"] = None
__props__.__dict__["custom_domains"] = None
__props__.__dict__["destination_branch"] = None
__props__.__dict__["source_branch"] = None
__props__.__dict__["tags_all"] = None
super(Branch, __self__).__init__(
'aws:amplify/branch:Branch',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
associated_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backend_environment_arn: Optional[pulumi.Input[str]] = None,
basic_auth_credentials: Optional[pulumi.Input[str]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
custom_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_branch: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_basic_auth: Optional[pulumi.Input[bool]] = None,
enable_notification: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
framework: Optional[pulumi.Input[str]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
source_branch: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[str]] = None) -> 'Branch':
"""
Get an existing Branch resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_id: The unique ID for an Amplify app.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) for the branch.
:param pulumi.Input[Sequence[pulumi.Input[str]]] associated_resources: A list of custom resources that are linked to this branch.
:param pulumi.Input[str] backend_environment_arn: The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.
:param pulumi.Input[str] basic_auth_credentials: The basic authorization credentials for the branch.
:param pulumi.Input[str] branch_name: The name for the branch.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_domains: The custom domains for the branch.
:param pulumi.Input[str] description: The description for the branch.
:param pulumi.Input[str] destination_branch: The destination branch if the branch is a pull request branch.
:param pulumi.Input[str] display_name: The display name for a branch. This is used as the default domain prefix.
:param pulumi.Input[bool] enable_auto_build: Enables auto building for the branch.
:param pulumi.Input[bool] enable_basic_auth: Enables basic authorization for the branch.
:param pulumi.Input[bool] enable_notification: Enables notifications for the branch.
:param pulumi.Input[bool] enable_performance_mode: Enables performance mode for the branch.
:param pulumi.Input[bool] enable_pull_request_preview: Enables pull request previews for this branch.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment_variables: The environment variables for the branch.
:param pulumi.Input[str] framework: The framework for the branch.
:param pulumi.Input[str] pull_request_environment_name: The Amplify environment name for the pull request.
:param pulumi.Input[str] source_branch: The source branch if the branch is a pull request branch.
:param pulumi.Input[str] stage: Describes the current stage for the branch. Valid values: `PRODUCTION`, `BETA`, `DEVELOPMENT`, `EXPERIMENTAL`, `PULL_REQUEST`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
:param pulumi.Input[str] ttl: The content Time To Live (TTL) for the website in seconds.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BranchState.__new__(_BranchState)
__props__.__dict__["app_id"] = app_id
__props__.__dict__["arn"] = arn
__props__.__dict__["associated_resources"] = associated_resources
__props__.__dict__["backend_environment_arn"] = backend_environment_arn
__props__.__dict__["basic_auth_credentials"] = basic_auth_credentials
__props__.__dict__["branch_name"] = branch_name
__props__.__dict__["custom_domains"] = custom_domains
__props__.__dict__["description"] = description
__props__.__dict__["destination_branch"] = destination_branch
__props__.__dict__["display_name"] = display_name
__props__.__dict__["enable_auto_build"] = enable_auto_build
__props__.__dict__["enable_basic_auth"] = enable_basic_auth
__props__.__dict__["enable_notification"] = enable_notification
__props__.__dict__["enable_performance_mode"] = enable_performance_mode
__props__.__dict__["enable_pull_request_preview"] = enable_pull_request_preview
__props__.__dict__["environment_variables"] = environment_variables
__props__.__dict__["framework"] = framework
__props__.__dict__["pull_request_environment_name"] = pull_request_environment_name
__props__.__dict__["source_branch"] = source_branch
__props__.__dict__["stage"] = stage
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["ttl"] = ttl
return Branch(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[str]:
"""
The unique ID for an Amplify app.
"""
return pulumi.get(self, "app_id")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) for the branch.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="associatedResources")
def associated_resources(self) -> pulumi.Output[Sequence[str]]:
"""
A list of custom resources that are linked to this branch.
"""
return pulumi.get(self, "associated_resources")
@property
@pulumi.getter(name="backendEnvironmentArn")
def backend_environment_arn(self) -> pulumi.Output[Optional[str]]:
"""
The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.
"""
return pulumi.get(self, "backend_environment_arn")
@property
@pulumi.getter(name="basicAuthCredentials")
def basic_auth_credentials(self) -> pulumi.Output[Optional[str]]:
"""
The basic authorization credentials for the branch.
"""
return pulumi.get(self, "basic_auth_credentials")
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> pulumi.Output[str]:
"""
The name for the branch.
"""
return pulumi.get(self, "branch_name")
@property
@pulumi.getter(name="customDomains")
def custom_domains(self) -> pulumi.Output[Sequence[str]]:
"""
The custom domains for the branch.
"""
return pulumi.get(self, "custom_domains")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description for the branch.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationBranch")
def destination_branch(self) -> pulumi.Output[str]:
"""
The destination branch if the branch is a pull request branch.
"""
return pulumi.get(self, "destination_branch")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The display name for a branch. This is used as the default domain prefix.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> pulumi.Output[Optional[bool]]:
"""
Enables auto building for the branch.
"""
return pulumi.get(self, "enable_auto_build")
@property
@pulumi.getter(name="enableBasicAuth")
def enable_basic_auth(self) -> pulumi.Output[Optional[bool]]:
"""
Enables basic authorization for the branch.
"""
return pulumi.get(self, "enable_basic_auth")
@property
@pulumi.getter(name="enableNotification")
def enable_notification(self) -> pulumi.Output[Optional[bool]]:
"""
Enables notifications for the branch.
"""
return pulumi.get(self, "enable_notification")
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> pulumi.Output[Optional[bool]]:
"""
Enables performance mode for the branch.
"""
return pulumi.get(self, "enable_performance_mode")
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> pulumi.Output[Optional[bool]]:
"""
Enables pull request previews for this branch.
"""
return pulumi.get(self, "enable_pull_request_preview")
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The environment variables for the branch.
"""
return pulumi.get(self, "environment_variables")
@property
@pulumi.getter
def framework(self) -> pulumi.Output[Optional[str]]:
"""
The framework for the branch.
"""
return pulumi.get(self, "framework")
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> pulumi.Output[Optional[str]]:
"""
The Amplify environment name for the pull request.
"""
return pulumi.get(self, "pull_request_environment_name")
@property
@pulumi.getter(name="sourceBranch")
def source_branch(self) -> pulumi.Output[str]:
"""
The source branch if the branch is a pull request branch.
"""
return pulumi.get(self, "source_branch")
@property
@pulumi.getter
def stage(self) -> pulumi.Output[Optional[str]]:
"""
Describes the current stage for the branch. Valid values: `PRODUCTION`, `BETA`, `DEVELOPMENT`, `EXPERIMENTAL`, `PULL_REQUEST`.
"""
return pulumi.get(self, "stage")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value mapping of resource tags. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter
def ttl(self) -> pulumi.Output[Optional[str]]:
"""
The content Time To Live (TTL) for the website in seconds.
"""
return pulumi.get(self, "ttl")
| 46.574313
| 297
| 0.654954
|
a45592004ec3dec77d2b25fdcd4f1879d41e8de7
| 3,460
|
py
|
Python
|
liquid/builtin/tags/cycle_tag.py
|
jg-rp/liquid
|
a2946d3ee7f8ac0c8db05f943ff8deb10bfad18c
|
[
"MIT"
] | 19
|
2021-03-20T05:44:23.000Z
|
2022-03-24T18:34:34.000Z
|
liquid/builtin/tags/cycle_tag.py
|
jg-rp/liquid
|
a2946d3ee7f8ac0c8db05f943ff8deb10bfad18c
|
[
"MIT"
] | 50
|
2020-12-07T15:30:56.000Z
|
2022-03-25T17:10:10.000Z
|
liquid/builtin/tags/cycle_tag.py
|
jg-rp/liquid
|
a2946d3ee7f8ac0c8db05f943ff8deb10bfad18c
|
[
"MIT"
] | 3
|
2021-01-24T15:51:22.000Z
|
2022-03-25T16:00:17.000Z
|
"""Tag and node definition for the built-in "cycle" tag."""
import sys
from typing import Any
from typing import List
from typing import Optional
from typing import TextIO
from liquid.ast import Node
from liquid.context import Context
from liquid.exceptions import LiquidSyntaxError
from liquid.expression import Expression
from liquid.lex import tokenize_filtered_expression
from liquid.parse import expect
from liquid.parse import parse_expression
from liquid.parse import parse_string_or_identifier
from liquid.stream import TokenStream
from liquid.tag import Tag
from liquid.token import Token
from liquid.token import TOKEN_TAG
from liquid.token import TOKEN_EXPRESSION
from liquid.token import TOKEN_EOF
from liquid.token import TOKEN_COMMA
from liquid.token import TOKEN_COLON
TAG_CYCLE = sys.intern("cycle")
class CycleNode(Node):
"""Parse tree node for the built-in "cycle" tag."""
__slots__ = ("tok", "group", "args", "key")
def __init__(self, tok: Token, group: Optional[Expression], args: List[Any]):
self.tok = tok
self.group = group
self.args = args
def __str__(self) -> str:
buf = ["cycle ("]
if self.group:
buf.append(f"{self.group}: ")
buf.append(", ".join([str(arg) for arg in self.args]))
buf.append(")")
return "".join(buf)
def render_to_output(self, context: Context, buffer: TextIO) -> Optional[bool]:
if self.group:
group_name = str(self.group.evaluate(context))
else:
group_name = ""
args = [arg.evaluate(context) for arg in self.args]
buffer.write(str(next(context.cycle(group_name, args))))
return True
async def render_to_output_async(
self, context: Context, buffer: TextIO
) -> Optional[bool]:
if self.group:
group_name = str(await self.group.evaluate_async(context))
else:
group_name = ""
args = [await arg.evaluate_async(context) for arg in self.args]
buffer.write(str(next(context.cycle(group_name, args))))
return True
class CycleTag(Tag):
"""The built-in "cycle" tag."""
name = TAG_CYCLE
block = False
def parse(self, stream: TokenStream) -> Node:
expect(stream, TOKEN_TAG, value=TAG_CYCLE)
tok = stream.current
stream.next_token()
expect(stream, TOKEN_EXPRESSION)
expr_stream = TokenStream(tokenize_filtered_expression(stream.current.value))
group_name: Optional[Expression] = None
if ":" in stream.current.value:
group_name = parse_string_or_identifier(expr_stream, linenum=tok.linenum)
expr_stream.next_token()
expect(expr_stream, TOKEN_COLON)
expr_stream.next_token()
args = []
while expr_stream.current.type != TOKEN_EOF:
val = parse_expression(expr_stream)
args.append(val)
expr_stream.next_token()
if expr_stream.current.type == TOKEN_COMMA:
expr_stream.next_token() # Eat comma
elif expr_stream.current.type == TOKEN_EOF:
break
else:
raise LiquidSyntaxError(
f"expected a comma separated list of arguments, "
f"found {expr_stream.current.type}",
linenum=tok.linenum,
)
return CycleNode(tok, group_name, args)
| 29.827586
| 85
| 0.643064
|
6c12e6fb6d21856a4eecf35c56cd8757cfa96c0a
| 22,801
|
py
|
Python
|
interfaces/ATS_VM/python_apps/api_clients/api_client.py
|
krattai/AEBL
|
a7b12c97479e1236d5370166b15ca9f29d7d4265
|
[
"BSD-2-Clause"
] | 4
|
2016-04-26T03:43:54.000Z
|
2016-11-17T08:09:04.000Z
|
interfaces/ATS_VM/python_apps/api_clients/api_client.py
|
krattai/AEBL
|
a7b12c97479e1236d5370166b15ca9f29d7d4265
|
[
"BSD-2-Clause"
] | 17
|
2015-01-05T21:06:22.000Z
|
2015-12-07T20:45:44.000Z
|
interfaces/ATS_VM/python_apps/api_clients/api_client.py
|
krattai/AEBL
|
a7b12c97479e1236d5370166b15ca9f29d7d4265
|
[
"BSD-2-Clause"
] | 3
|
2016-04-26T03:43:55.000Z
|
2020-11-06T11:02:08.000Z
|
###############################################################################
# This file holds the implementations for all the API clients.
#
# If you want to develop a new client, here are some suggestions: Get the fetch
# methods working first, then the push, then the liquidsoap notifier. You will
# probably want to create a script on your server side to automatically
# schedule a playlist one minute from the current time.
###############################################################################
import sys
import time
import urllib
import urllib2
import socket
import logging
import json
import base64
import traceback
from configobj import ConfigObj
AIRTIME_API_VERSION = "1.1"
# TODO : Place these functions in some common module. Right now, media
# monitor uses the same functions and it would be better to reuse them
# instead of copy pasting them around
def to_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def encode_to(obj, encoding='utf-8'):
if isinstance(obj, unicode):
obj = obj.encode(encoding)
return obj
def convert_dict_value_to_utf8(md):
#list comprehension to convert all values of md to utf-8
return dict([(item[0], encode_to(item[1], "utf-8")) for item in md.items()])
api_config = {}
# URL to get the version number of the server API
api_config['version_url'] = 'version/api_key/%%api_key%%'
#URL to register a components IP Address with the central web server
api_config['register_component'] = 'register-component/format/json/api_key/%%api_key%%/component/%%component%%'
#media-monitor
api_config['media_setup_url'] = 'media-monitor-setup/format/json/api_key/%%api_key%%'
api_config['upload_recorded'] = 'upload-recorded/format/json/api_key/%%api_key%%/fileid/%%fileid%%/showinstanceid/%%showinstanceid%%'
api_config['update_media_url'] = 'reload-metadata/format/json/api_key/%%api_key%%/mode/%%mode%%'
api_config['list_all_db_files'] = 'list-all-files/format/json/api_key/%%api_key%%/dir_id/%%dir_id%%/all/%%all%%'
api_config['list_all_watched_dirs'] = 'list-all-watched-dirs/format/json/api_key/%%api_key%%'
api_config['add_watched_dir'] = 'add-watched-dir/format/json/api_key/%%api_key%%/path/%%path%%'
api_config['remove_watched_dir'] = 'remove-watched-dir/format/json/api_key/%%api_key%%/path/%%path%%'
api_config['set_storage_dir'] = 'set-storage-dir/format/json/api_key/%%api_key%%/path/%%path%%'
api_config['update_fs_mount'] = 'update-file-system-mount/format/json/api_key/%%api_key%%'
api_config['reload_metadata_group'] = 'reload-metadata-group/format/json/api_key/%%api_key%%'
api_config['handle_watched_dir_missing'] = 'handle-watched-dir-missing/format/json/api_key/%%api_key%%/dir/%%dir%%'
#show-recorder
api_config['show_schedule_url'] = 'recorded-shows/format/json/api_key/%%api_key%%'
api_config['upload_file_url'] = 'upload-file/format/json/api_key/%%api_key%%'
api_config['upload_retries'] = '3'
api_config['upload_wait'] = '60'
#pypo
api_config['export_url'] = 'schedule/api_key/%%api_key%%'
api_config['get_media_url'] = 'get-media/file/%%file%%/api_key/%%api_key%%'
api_config['update_item_url'] = 'notify-schedule-group-play/api_key/%%api_key%%/schedule_id/%%schedule_id%%'
api_config['update_start_playing_url'] = 'notify-media-item-start-play/api_key/%%api_key%%/media_id/%%media_id%%/'
api_config['get_stream_setting'] = 'get-stream-setting/format/json/api_key/%%api_key%%/'
api_config['update_liquidsoap_status'] = 'update-liquidsoap-status/format/json/api_key/%%api_key%%/msg/%%msg%%/stream_id/%%stream_id%%/boot_time/%%boot_time%%'
api_config['update_source_status'] = 'update-source-status/format/json/api_key/%%api_key%%/sourcename/%%sourcename%%/status/%%status%%'
api_config['check_live_stream_auth'] = 'check-live-stream-auth/format/json/api_key/%%api_key%%/username/%%username%%/password/%%password%%/djtype/%%djtype%%'
api_config['get_bootstrap_info'] = 'get-bootstrap-info/format/json/api_key/%%api_key%%'
api_config['get_files_without_replay_gain'] = 'get-files-without-replay-gain/api_key/%%api_key%%/dir_id/%%dir_id%%'
api_config['update_replay_gain_value'] = 'update-replay-gain-value/format/json/api_key/%%api_key%%'
api_config['notify_webstream_data'] = 'notify-webstream-data/api_key/%%api_key%%/media_id/%%media_id%%/format/json'
api_config['notify_liquidsoap_started'] = 'rabbitmq-do-push/api_key/%%api_key%%/format/json'
api_config['get_stream_parameters'] = 'get-stream-parameters/api_key/%%api_key%%/format/json'
api_config['push_stream_stats'] = 'push-stream-stats/api_key/%%api_key%%/format/json'
api_config['update_stream_setting_table'] = 'update-stream-setting-table/api_key/%%api_key%%/format/json'
api_config['get_files_without_silan_value'] = 'get-files-without-silan-value/api_key/%%api_key%%'
api_config['update_cue_values_by_silan'] = 'update-cue-values-by-silan/api_key/%%api_key%%'
################################################################################
# Airtime API Client
################################################################################
class UrlException(Exception): pass
class IncompleteUrl(UrlException):
def __init__(self, url): self.url = url
def __str__(self): return "Incomplete url: '%s'" % self.url
class UrlBadParam(UrlException):
def __init__(self, url, param):
self.url = url
self.param = param
def __str__(self):
return "Bad param '%s' passed into url: '%s'" % (self.param, self.url)
class ApcUrl(object):
""" A safe abstraction and testable for filling in parameters in
api_client.cfg"""
def __init__(self, base_url): self.base_url = base_url
def params(self, **params):
temp_url = self.base_url
for k, v in params.iteritems():
wrapped_param = "%%" + k + "%%"
if wrapped_param in temp_url:
temp_url = temp_url.replace(wrapped_param, str(v))
else: raise UrlBadParam(self.base_url, k)
return ApcUrl(temp_url)
def url(self):
if '%%' in self.base_url: raise IncompleteUrl(self.base_url)
else: return self.base_url
class ApiRequest(object):
API_HTTP_REQUEST_TIMEOUT = 30 # 30 second HTTP request timeout
def __init__(self, name, url, logger=None):
self.name = name
self.url = url
self.__req = None
if logger is None: self.logger = logging
else: self.logger = logger
def __call__(self,_post_data=None, **kwargs):
final_url = self.url.params(**kwargs).url()
if _post_data is not None: _post_data = urllib.urlencode(_post_data)
self.logger.debug(final_url)
try:
req = urllib2.Request(final_url, _post_data)
f = urllib2.urlopen(req, timeout=ApiRequest.API_HTTP_REQUEST_TIMEOUT)
content_type = f.info().getheader('Content-Type')
response = f.read()
#Everything that calls an ApiRequest should be catching URLError explicitly
#(according to the other comments in this file and a cursory grep through the code)
#Note that URLError can occur for timeouts as well as socket.timeout
except socket.timeout:
self.logger.error('HTTP request to %s timed out', final_url)
raise
except Exception, e:
#self.logger.error('Exception: %s', e)
#self.logger.error("traceback: %s", traceback.format_exc())
raise
try:
if content_type == 'application/json':
data = json.loads(response)
return data
else:
raise InvalidContentType()
except Exception:
#self.logger.error(response)
#self.logger.error("traceback: %s", traceback.format_exc())
raise
def req(self, *args, **kwargs):
self.__req = lambda : self(*args, **kwargs)
return self
def retry(self, n, delay=5):
"""Try to send request n times. If after n times it fails then
we finally raise exception"""
for i in range(0,n-1):
try: return self.__req()
except Exception: time.sleep(delay)
return self.__req()
class RequestProvider(object):
""" Creates the available ApiRequest instance that can be read from
a config file """
def __init__(self, cfg):
self.config = cfg
self.requests = {}
if self.config["base_dir"].startswith("/"):
self.config["base_dir"] = self.config["base_dir"][1:]
self.url = ApcUrl("http://%s:%s/%s%s/%s" \
% (self.config["host"], str(self.config["base_port"]),
self.config["base_dir"], self.config["api_base"],
'%%action%%'))
# Now we must discover the possible actions
actions = dict( (k,v) for k,v in cfg.iteritems() if '%%api_key%%' in v)
for action_name, action_value in actions.iteritems():
new_url = self.url.params(action=action_value).params(
api_key=self.config['api_key'])
self.requests[action_name] = ApiRequest(action_name, new_url)
def available_requests(self) : return self.requests.keys()
def __contains__(self, request) : return request in self.requests
def __getattr__(self, attr):
if attr in self: return self.requests[attr]
else: return super(RequestProvider, self).__getattribute__(attr)
class AirtimeApiClient(object):
def __init__(self, logger=None,config_path='/etc/airtime/api_client.cfg'):
if logger is None: self.logger = logging
else: self.logger = logger
# loading config file
try:
self.config = ConfigObj(config_path)
self.config.update(api_config)
self.services = RequestProvider(self.config)
except Exception, e:
self.logger.error('Error loading config file: %s', config_path)
self.logger.error("traceback: %s", traceback.format_exc())
sys.exit(1)
def __get_airtime_version(self):
try: return self.services.version_url()[u'airtime_version']
except Exception: return -1
def __get_api_version(self):
try: return self.services.version_url()[u'api_version']
except Exception: return -1
def is_server_compatible(self, verbose=True):
logger = self.logger
api_version = self.__get_api_version()
# logger.info('Airtime version found: ' + str(version))
if api_version == -1:
if verbose:
logger.info('Unable to get Airtime API version number.\n')
return False
elif api_version[0:3] != AIRTIME_API_VERSION[0:3]:
if verbose:
logger.info('Airtime API version found: ' + str(api_version))
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION)
return False
else:
if verbose:
logger.info('Airtime API version found: ' + str(api_version))
logger.info('pypo is only compatible with API version: ' + AIRTIME_API_VERSION)
return True
def get_schedule(self):
# TODO : properly refactor this routine
# For now the return type is a little messed up for compatibility reasons
try: return (True, self.services.export_url())
except: return (False, None)
def notify_liquidsoap_started(self):
try:
self.services.notify_liquidsoap_started()
except Exception, e:
self.logger.error(str(e))
def notify_media_item_start_playing(self, media_id):
""" This is a callback from liquidsoap, we use this to notify
about the currently playing *song*. We get passed a JSON string
which we handed to liquidsoap in get_liquidsoap_data(). """
try:
return self.services.update_start_playing_url(media_id=media_id)
except Exception, e:
self.logger.error(str(e))
return None
def get_shows_to_record(self):
try:
return self.services.show_schedule_url()
except Exception, e:
self.logger.error(str(e))
return None
def upload_recorded_show(self, data, headers):
logger = self.logger
response = ''
retries = int(self.config["upload_retries"])
retries_wait = int(self.config["upload_wait"])
url = self.construct_url("upload_file_url")
logger.debug(url)
for i in range(0, retries):
logger.debug("Upload attempt: %s", i + 1)
try:
request = urllib2.Request(url, data, headers)
response = urllib2.urlopen(request, timeout=ApiClient.API_HTTP_REQUEST_TIMEOUT).read().strip()
logger.info("uploaded show result %s", response)
break
except urllib2.HTTPError, e:
logger.error("Http error code: %s", e.code)
except urllib2.URLError, e:
logger.error("Server is down: %s", e.args)
except Exception, e:
logger.error("Exception: %s", e)
#wait some time before next retry
time.sleep(retries_wait)
return response
def check_live_stream_auth(self, username, password, dj_type):
try:
return self.services.check_live_stream_auth(
username=username, password=password, djtype=dj_type)
except Exception, e:
self.logger.error(str(e))
return {}
def construct_url(self,config_action_key):
"""Constructs the base url for every request"""
# TODO : Make other methods in this class use this this method.
if self.config["base_dir"].startswith("/"):
self.config["base_dir"] = self.config["base_dir"][1:]
url = "http://%s:%s/%s%s/%s" % \
(self.config["host"], str(self.config["base_port"]),
self.config["base_dir"], self.config["api_base"],
self.config[config_action_key])
url = url.replace("%%api_key%%", self.config["api_key"])
return url
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def setup_media_monitor(self):
return self.services.media_setup_url()
def send_media_monitor_requests(self, action_list, dry=False):
"""
Send a gang of media monitor events at a time. actions_list is a
list of dictionaries where every dictionary is representing an
action. Every action dict must contain a 'mode' key that says
what kind of action it is and an optional 'is_record' key that
says whether the show was recorded or not. The value of this key
does not matter, only if it's present or not.
"""
# We are assuming that action_list is a list of dictionaries such
# that every dictionary represents the metadata of a file along
# with a special mode key that is the action to be executed by the
# controller.
valid_actions = []
# We could get a list of valid_actions in a much shorter way using
# filter but here we prefer a little more verbosity to help
# debugging
for action in action_list:
if not 'mode' in action:
self.logger.debug("Warning: Trying to send a request element without a 'mode'")
self.logger.debug("Here is the the request: '%s'" % str(action) )
else:
# We alias the value of is_record to true or false no
# matter what it is based on if it's absent in the action
if 'is_record' not in action:
action['is_record'] = 0
valid_actions.append(action)
# Note that we must prefix every key with: mdX where x is a number
# Is there a way to format the next line a little better? The
# parenthesis make the code almost unreadable
md_list = dict((("md%d" % i), json.dumps(convert_dict_value_to_utf8(md))) \
for i,md in enumerate(valid_actions))
# For testing we add the following "dry" parameter to tell the
# controller not to actually do any changes
if dry: md_list['dry'] = 1
self.logger.info("Pumping out %d requests..." % len(valid_actions))
return self.services.reload_metadata_group(_post_data=md_list)
#returns a list of all db files for a given directory in JSON format:
#{"files":["path/to/file1", "path/to/file2"]}
#Note that these are relative paths to the given directory. The full
#path is not returned.
def list_all_db_files(self, dir_id, all_files=True):
logger = self.logger
try:
all_files = u"1" if all_files else u"0"
response = self.services.list_all_db_files(dir_id=dir_id,
all=all_files)
except Exception, e:
response = {}
logger.error("Exception: %s", e)
try:
return response["files"]
except KeyError:
self.logger.error("Could not find index 'files' in dictionary: %s",
str(response))
return []
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def list_all_watched_dirs(self):
return self.services.list_all_watched_dirs()
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def add_watched_dir(self, path):
return self.services.add_watched_dir(path=base64.b64encode(path))
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def remove_watched_dir(self, path):
return self.services.remove_watched_dir(path=base64.b64encode(path))
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def set_storage_dir(self, path):
return self.services.set_storage_dir(path=base64.b64encode(path))
"""
Caller of this method needs to catch any exceptions such as
ValueError thrown by json.loads or URLError by urllib2.urlopen
"""
def get_stream_setting(self):
return self.services.get_stream_setting()
def register_component(self, component):
""" Purpose of this method is to contact the server with a "Hey its
me!" message. This will allow the server to register the component's
(component = media-monitor, pypo etc.) ip address, and later use it
to query monit via monit's http service, or download log files via a
http server. """
return self.services.register_component(component=component)
def notify_liquidsoap_status(self, msg, stream_id, time):
logger = self.logger
try:
post_data = {"msg_post": msg}
#encoded_msg is no longer used server_side!!
encoded_msg = urllib.quote('dummy')
self.services.update_liquidsoap_status.req(post_data,
msg=encoded_msg,
stream_id=stream_id,
boot_time=time).retry(5)
except Exception, e:
#TODO
logger.error("Exception: %s", e)
def notify_source_status(self, sourcename, status):
try:
logger = self.logger
return self.services.update_source_status.req(sourcename=sourcename,
status=status).retry(5)
except Exception, e:
#TODO
logger.error("Exception: %s", e)
def get_bootstrap_info(self):
""" Retrieve infomations needed on bootstrap time """
return self.services.get_bootstrap_info()
def get_files_without_replay_gain_value(self, dir_id):
"""
Download a list of files that need to have their ReplayGain value
calculated. This list of files is downloaded into a file and the path
to this file is the return value.
"""
#http://localhost/api/get-files-without-replay-gain/dir_id/1
try:
return self.services.get_files_without_replay_gain(dir_id=dir_id)
except Exception, e:
self.logger.error(str(e))
return []
def get_files_without_silan_value(self):
"""
Download a list of files that need to have their cue in/out value
calculated. This list of files is downloaded into a file and the path
to this file is the return value.
"""
try:
return self.services.get_files_without_silan_value()
except Exception, e:
self.logger.error(str(e))
return []
def update_replay_gain_values(self, pairs):
"""
'pairs' is a list of pairs in (x, y), where x is the file's database
row id and y is the file's replay_gain value in dB
"""
self.logger.debug(self.services.update_replay_gain_value(
_post_data={'data': json.dumps(pairs)}))
def update_cue_values_by_silan(self, pairs):
"""
'pairs' is a list of pairs in (x, y), where x is the file's database
row id and y is the file's cue values in dB
"""
return self.services.update_cue_values_by_silan(_post_data={'data': json.dumps(pairs)})
def notify_webstream_data(self, data, media_id):
"""
Update the server with the latest metadata we've received from the
external webstream
"""
self.logger.info( self.services.notify_webstream_data.req(
_post_data={'data':data}, media_id=str(media_id)).retry(5))
def get_stream_parameters(self):
response = self.services.get_stream_parameters()
self.logger.debug(response)
return response
def push_stream_stats(self, data):
# TODO : users of this method should do their own error handling
response = self.services.push_stream_stats(_post_data={'data': json.dumps(data)})
return response
def update_stream_setting_table(self, data):
try:
response = self.services.update_stream_setting_table(_post_data={'data': json.dumps(data)})
return response
except Exception, e:
#TODO
self.logger.error(str(e))
class InvalidContentType(Exception):
pass
| 42.539179
| 159
| 0.636069
|
25ca759e3aa528ea35e9d6b2fffdc214ec98cee1
| 1,133
|
py
|
Python
|
tests/unit/test_calibration_timing.py
|
VMS19/Inhalator
|
77ff3f063efa48e825d1c5ef648203b2d70b753e
|
[
"MIT"
] | 9
|
2020-03-30T08:27:57.000Z
|
2020-04-11T12:37:28.000Z
|
tests/unit/test_calibration_timing.py
|
VMS19/Inhalator
|
77ff3f063efa48e825d1c5ef648203b2d70b753e
|
[
"MIT"
] | 145
|
2020-03-25T20:41:24.000Z
|
2020-04-15T17:39:10.000Z
|
tests/unit/test_calibration_timing.py
|
VMS19/Inhalator
|
77ff3f063efa48e825d1c5ef648203b2d70b753e
|
[
"MIT"
] | 4
|
2020-03-22T09:57:27.000Z
|
2020-04-15T18:10:48.000Z
|
import pytest
from unittest.mock import patch
from logic.auto_calibration import TailDetector, AutoFlowCalibrator
from drivers.driver_factory import DriverFactory
@pytest.fixture
def calibrator():
return AutoFlowCalibrator(
dp_driver=DriverFactory(True).differential_pressure,
interval_length=100,
iterations=4,
iteration_length=4,
sample_threshold=8.0,
slope_threshold=10.0,
min_tail_length=12,
grace_length=5,
)
@patch("logic.auto_calibration.TailDetector.process")
@patch("drivers.mocks.sensor.DifferentialPressureMockSensor.get_calibration_offset")
def test_calibration_timing_interval(mock_pressure, mock_tail, calibrator):
for i in range(1000):
calibrator.get_offset(None, i)
assert mock_pressure.call_count == 10
@patch("logic.auto_calibration.TailDetector.process")
@patch("drivers.mocks.sensor.DifferentialPressureMockSensor.get_calibration_offset")
def test_calibration_process_timing(mock_pressure, mock_tail, calibrator):
for i in range(1000):
calibrator.get_offset(None, i)
assert mock_tail.call_count == 40
| 30.621622
| 84
| 0.762577
|
1d13c72090cd337c6131080d1b17c30d975868ed
| 3,658
|
py
|
Python
|
examples/dtq/train_val_script.py
|
nasioutz/DeepHash
|
963ca74037f0694955571a178d2fb0bc380e9706
|
[
"MIT"
] | null | null | null |
examples/dtq/train_val_script.py
|
nasioutz/DeepHash
|
963ca74037f0694955571a178d2fb0bc380e9706
|
[
"MIT"
] | null | null | null |
examples/dtq/train_val_script.py
|
nasioutz/DeepHash
|
963ca74037f0694955571a178d2fb0bc380e9706
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.io as sio
import warnings
import data_provider.image as dataset
import model.dtq as model
from pprint import pprint
import os
import argparse
warnings.filterwarnings("ignore", category = DeprecationWarning)
warnings.filterwarnings("ignore", category = FutureWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
parser = argparse.ArgumentParser(description='Triplet Hashing')
parser.add_argument('--lr', '--learning-rate', default=0.00003, type=float)
parser.add_argument('--triplet-margin', default=30, type=float)
parser.add_argument('--select-strategy', default='margin', choices=['hard', 'all', 'margin'])
parser.add_argument('--output-dim', default=64, type=int) # 256, 128
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--cq-lambda', default=0, type=float)
parser.add_argument('--subspace', default=4, type=int)
parser.add_argument('--subcenter', default=256, type=int)
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--gpus', default='0', type=str)
parser.add_argument('--log-dir', default='tflog', type=str)
parser.add_argument('--dist-type', default='euclidean2', type=str,
choices=['euclidean2', 'cosine', 'inner_product', 'euclidean'])
parser.add_argument('-b', '--batch-size', default=128, type=int)
parser.add_argument('-vb', '--val-batch-size', default=16, type=int)
parser.add_argument('--decay-step', default=10000, type=int)
parser.add_argument('--decay-factor', default=0.1, type=int)
tanh_parser = parser.add_mutually_exclusive_group(required=False)
tanh_parser.add_argument('--with-tanh', dest='with_tanh', action='store_true')
tanh_parser.add_argument('--without-tanh', dest='with_tanh', action='store_false')
parser.set_defaults(with_tanh=True)
parser.add_argument('--img-model', default='alexnet', type=str)
parser.add_argument('--model-weights', type=str,
default='../../deephash/architecture/pretrained_model/reference_pretrain.npy')
parser.add_argument('--finetune-all', default=True, type=bool)
parser.add_argument('--max-iter-update-b', default=3, type=int)
parser.add_argument('--max-iter-update-Cb', default=1, type=int)
parser.add_argument('--code-batch-size', default=500, type=int)
parser.add_argument('--n-part', default=20, type=int)
parser.add_argument('--triplet-thresold', default=64000, type=int)
parser.add_argument('--save-dir', default="./models/", type=str)
parser.add_argument('--data-dir', default="~/data/", type=str)
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true')
parser.add_argument('--val-freq', default=1, type=int)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
label_dims = {'cifar10': 10, 'nuswide_81': 81, 'coco': 80, 'imagenet': 100}
Rs = {'cifar10': 54000, 'nuswide_81': 5000, 'coco': 5000, 'imagenet': 5000}
args.R = Rs[args.dataset]
args.label_dim = label_dims[args.dataset]
args.img_tr = os.path.join(args.data_dir, args.dataset, "train.txt")
args.img_te = os.path.join(args.data_dir, args.dataset, "test.txt")
args.img_db = os.path.join(args.data_dir, args.dataset, "database.txt")
pprint(vars(args))
data_root = os.path.join(args.data_dir, args.dataset)
query_img, database_img = dataset.import_validation(data_root, args.img_te, args.img_db)
if not args.evaluate:
train_img = dataset.import_train(data_root, args.img_tr)
model_weights = model.train(train_img, database_img, query_img, args)
args.model_weights = model_weights
else:
maps = model.validation(database_img, query_img, args)
for key in maps:
print(("{}\t{}".format(key, maps[key])))
pprint(vars(args))
| 45.725
| 98
| 0.734008
|
ea0a0c27eef1ca8e9724fe9a048f73541a89de42
| 819
|
py
|
Python
|
tests/integration/pillar/test_pillar_include.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
tests/integration/pillar/test_pillar_include.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
tests/integration/pillar/test_pillar_include.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
# -*- coding: utf-8 -*-
"""
Pillar include tests
"""
from __future__ import absolute_import, unicode_literals
from tests.support.case import ModuleCase
class PillarIncludeTest(ModuleCase):
def test_pillar_include(self):
"""
Test pillar include
"""
ret = self.minion_run("pillar.items")
assert "a" in ret["element"]
assert ret["element"]["a"] == {"a": ["Entry A"]}
assert "b" in ret["element"]
assert ret["element"]["b"] == {"b": ["Entry B"]}
def test_pillar_glob_include(self):
"""
Test pillar include via glob pattern
"""
ret = self.minion_run("pillar.items")
assert "glob-a" in ret
assert ret["glob-a"] == ["Entry A"]
assert "glob-b" in ret
assert ret["glob-b"] == ["Entry B"]
| 27.3
| 56
| 0.568987
|
cc4441a8e3f2dfa7a912100959f28d7d824ba958
| 4,301
|
py
|
Python
|
ant.py
|
hpharmsen/antology
|
080ae615d4d59a9df5abfdf5720c5b58d6aa66db
|
[
"Unlicense"
] | null | null | null |
ant.py
|
hpharmsen/antology
|
080ae615d4d59a9df5abfdf5720c5b58d6aa66db
|
[
"Unlicense"
] | null | null | null |
ant.py
|
hpharmsen/antology
|
080ae615d4d59a9df5abfdf5720c5b58d6aa66db
|
[
"Unlicense"
] | null | null | null |
import random
from arcade import Sprite, load_texture, check_for_collision_with_list
from activities import explore, backtrack, follow_the_food, find_the_food
from path import Path
class Ant(Sprite):
def __init__(self, x, y, arena, colony, scale=1, activity="wander"):
super().__init__(center_x=x, center_y=y, scale=scale)
self.arena = arena
self.colony = colony
self.speed = 1
self.textures = {
"black": load_texture("graphics/ant_black.png"),
"green": load_texture("graphics/ant_green.png"),
"red": load_texture("graphics/ant_red.png"),
"blue": load_texture("graphics/ant_blue.png"),
"black_green": load_texture("graphics/ant_black_green.png"),
}
self.set_activity(explore)
self.back_track_path = Path((x, y))
self.food_search_timer = 0 # Used to get a limited number of turns to find food at end of promising path
def move(self):
if self.activity in (explore, find_the_food):
# Ant is exploring the environment in search of food
explore(self)
if check_for_collision_with_list(self, self.arena.wall_list):
# Hit a wall, backup
backtrack(self)
food_list = check_for_collision_with_list(self, self.arena.food_list)
if food_list:
# Food found! Take it and back to the colony
self.arena.food_list.remove(food_list[0])
# assert self.back_track_path.is_valid()
self.colony.found_food(self.back_track_path)
self.set_activity(backtrack)
self.food_search_timer = 0
elif self.food_search_timer:
# Ant followed the path to food and is now at the end of it. Where is it?
self.food_search_timer -= 1
if not self.food_search_timer:
# Searched at the end of the path but no food in sight. Report and continue exploring
# assert self.path_to_food.is_valid()
self.colony.no_food_at(self.path_to_food)
self.set_activity(explore)
elif random.random() < 0.001:
self.set_activity(backtrack)
self.texture = self.textures["black_green"]
elif self.activity == backtrack:
# Ant has found food and is tracing back it's steps to the colony
if not backtrack(self):
# No more backtracking left. We're back at the colony.
self.colony.deliver_food()
self.path_to_food = self.colony.get_path_to_follow()
if self.path_to_food:
# assert self.path_to_food.is_valid()
# Colony has instructed this ant to follow a path to food
self.set_activity(follow_the_food)
else:
# Colony has instructed this ant to go and find food
self.set_activity(explore)
elif self.activity == follow_the_food:
# Ant is following a path to where food should be
if not follow_the_food(self):
# End of the path, explore and get 10 turns to find the food
self.back_track_path = self.path_to_food.reverse()
# assert self.back_track_path.is_valid()
# assert self.back_track_path.is_valid()
self.food_search_timer = 10
self.set_activity(explore)
self.texture = self.textures["blue"]
self.update()
def set_activity(self, activity):
self.activity = activity
self.texture = self.textures[self.activity.color]
# if activity == explore:
# self.texture = self.textures['black']
# else:
# self.texture = self.textures['green']
def move_to(self, coo):
dx = coo[0] - self.center_x
dy = coo[1] - self.center_y
if dx < 0:
self.angle = 90
elif dx > 0:
self.angle = 270
elif dy > 0:
self.angle = 0
else:
self.angle = 180
self.speed = abs(dx) + abs(dy)
self.center_x = coo[0]
self.center_y = coo[1]
| 41.757282
| 113
| 0.57847
|
12652e3dccdc8638fffda78afd982ac66354304a
| 19,392
|
py
|
Python
|
aqc_utils/db_functions_julosdu13.py
|
truejulosdu13/NiCOlit
|
879c155e2bd017da4f8d89d4bfd24196039944c4
|
[
"MIT"
] | 2
|
2022-03-25T15:24:44.000Z
|
2022-03-25T15:24:54.000Z
|
aqc_utils/db_functions_julosdu13.py
|
truejulosdu13/NiCOlit
|
879c155e2bd017da4f8d89d4bfd24196039944c4
|
[
"MIT"
] | null | null | null |
aqc_utils/db_functions_julosdu13.py
|
truejulosdu13/NiCOlit
|
879c155e2bd017da4f8d89d4bfd24196039944c4
|
[
"MIT"
] | null | null | null |
import logging
try:
from openbabel import pybel # ob 3.0.0
except ImportError: # ob 2.4
import pybel
import re
import numpy as np
import pandas as pd
import pymongo
from bson.objectid import ObjectId
from rdkit import Chem
from rdkit.Chem import rdFMCS
from aqc_utils.helper_classes import config
from aqc_utils.helper_functions import add_numbers_to_repeated_items
logger = logging.getLogger(__name__)
desc_presets = ['global', 'min_max', 'substructure', 'core', 'labeled', 'transitions']
desc_presets_long = ['Global', 'Min Max Atomic', 'Substructure Atomic', 'Common Core Atomic', 'Labeled Atomic',
"Excited State Transitions"]
conf_options = ['boltzmann', 'max', 'min', 'mean', 'std', 'any']
conf_options_long = ['Boltzman Average', 'Lowest Energy Conformer', 'Highest Energy Conformer', 'Arithmetic Average',
'Standard Deviation', 'Random']
class InconsistentLabelsException(Exception):
"""Raised when a set of molecules is inconsistently labeled"""
pass
def db_connect(collection=None) -> pymongo.collection.Collection:
"""Create a connection to the database and return the table (Collection).
:return: pymongo.collection.Collection
"""
cli = pymongo.MongoClient("mongodb+srv://julosdu13:yPbcgFmN6Uji9Gt@cluster0.f4bxg.mongodb.net/Cluster0?retryWrites=true&w=majority")
#db = client.test
# cli = pymongo.MongoClient(config['mongoDB']['host'],
# username=config['mongoDB']['user'],
# password=config['mongoDB']['password'],
# port=config['mongoDB']['port'])
if collection is None:
return cli['dft_for_nicoupling']
else:
return cli['dft_for_nicoupling'][collection]
def db_upload_molecule(can, tags, metadata, weights, conformations, logs) -> ObjectId:
"""Upload single molecule to DB and all child objects tags, features
and log files for its conformations"""
db = db_connect()
mols_coll = db['molecules']
tags_coll = db['tags']
# create molecule record and insert it
mol_data = {'can': can, 'metadata': metadata}
# try/except added by julosdu13
try:
ret = mols_coll.insert_one(mol_data)
mol_id = ret.inserted_id
# insert tag record
for tag in tags:
tags_coll.insert_one({'tag': tag, 'molecule_id': mol_id, 'can': can})
for weight, conformation, log in zip(weights, conformations, logs):
db_upload_conformation(mol_id, can, weight, conformation, log, check_mol_exists=False)
except:
mol_id = None
return mol_id
def db_upload_conformation(mol_id, can, weight, conformation, log, check_mol_exists=True):
"""Upload single conformation features and log file to DB, requires a molecule
to be present"""
db = db_connect()
# check if the molecule with a given id exists in the DB
mols_coll = db["molecules"]
if check_mol_exists:
assert mols_coll.find_one({'_id': mol_id}) is not None
# connect to features and logs collections
feats_coll = db['qchem_descriptors']
logs_coll = db['log_files']
data = {'molecule_id': mol_id, 'weight': weight, 'can': can}
# update with descriptors
data.update(conformation)
# db insertion
feats_coll.insert_one(data)
logs_coll.insert_one({'molecule_id': mol_id, 'log': log, 'can': can})
def db_delete_molecule(mol_id):
"""Delete molecule from DB, cascade all child objects: tags, features and log files"""
db = db_connect()
if isinstance(mol_id, str):
mol_id = ObjectId(mol_id)
print(mol_id)
db['qchem_descriptors'].delete_many({"molecule_id": mol_id}) # features
db['log_files'].delete_many({"molecule_id": mol_id}) # log files
db['tags'].delete_many({"molecule_id": mol_id}) # tags
db['molecules'].delete_one({"_id": mol_id}) # molecule itself
def db_select_molecules(cls=None, subcls=None, type=None, subtype=None, tags=[], substructure="") -> pd.DataFrame:
"""Get a summary frame of molecules in the database
:param tags: a list of tags of the db records (if multiple an 'OR' is taken)
:type tags: list
:param substructure: substructure SMARTS string
:type substructure: str
:return: pandas.core.frame.DataFrame
"""
db = db_connect()
tags_coll = db['tags']
mols_coll = db['molecules']
feats_coll = db['qchem_descriptors']
tags_cur = tags_coll.find({'tag': {'$in': tags}} if tags else {})
tags_df = pd.DataFrame(tags_cur)
filter = {}
if cls != "" and cls is not None:
filter['metadata.class'] = cls
if subcls != "" and subcls is not None:
filter['metadata.subclass'] = subcls
if type != "" and type is not None:
filter['metadata.type'] = type
if subtype != "" and subtype is not None:
filter['metadata.subtype'] = subtype
filter['_id'] = {'$in': tags_df.molecule_id.tolist()}
mols_cur = mols_coll.find(filter)
mols_df = pd.DataFrame(mols_cur)
if 'name' not in mols_df.columns:
mols_df['name'] = None
if substructure:
pattern = pybel.Smarts(substructure)
mols_df['pybel_mol'] = mols_df['can'].map(lambda can: pybel.readstring("smi", can))
mols_df = mols_df[mols_df['pybel_mol'].map(lambda mol: bool(pattern.findall(mol)))]
mols_df = mols_df.drop('pybel_mol', axis=1)
# merge tags in an outer way
df = pd.merge(mols_df, tags_df, how='outer', left_on='_id', right_on='molecule_id', suffixes=('', '_tag'))
# make tags into a list of tags
df['metadata_str'] = df['metadata'].map(repr)
grouped = df.groupby(['can', 'metadata_str'])
# groupby tags
df = pd.concat([grouped['metadata', 'molecule_id', 'name'].first(),
grouped['tag'].apply(list)], axis=1).reset_index().drop('metadata_str', axis=1)
# fetch ids and weights
feats_cur = feats_coll.find({'molecule_id': {'$in': df.molecule_id.tolist()}},
{'_id': 1, 'weight': 1, 'molecule_id': 1})
feats_df = pd.DataFrame(feats_cur)
feats_df = feats_df.groupby('molecule_id').agg(list).reset_index()
feats_df = feats_df.rename(columns={'_id': '_ids', 'weight': 'weights'})
# merge into df
df = df.merge(feats_df, on='molecule_id')
df['num_conformers'] = df['_ids'].map(len)
return df
def db_check_exists(can, gaussian_config, max_num_conformers) -> tuple:
"""Check if a molecule is already present in the database with the same Gaussian config (theory, basis_sets, etc.)
:param can: canonical smiles
:type can: str
:param gaussian_config: gaussian config dictionary
:type gaussian_config: dict
:return: exists(bool), list of tags that are associated with the molecule if it exists
"""
db = db_connect()
mols_coll = db['molecules']
tags_coll = db['tags']
mol_id = mols_coll.find_one({"can": can,
"metadata.gaussian_config": gaussian_config,
"metadata.max_num_conformers": max_num_conformers},
{"molecule_id": 1})
exists, tags = False, []
if mol_id is not None:
exists = True
tags = tags_coll.distinct('tag', {'molecule_id': ObjectId(mol_id['_id'])})
return exists, tags
def descriptors(cls, subcls, type, subtype, tags, presets, conf_option, substructure="") -> dict:
"""Retrieve DFT descriptors from the database
:param tag: metadata.tag of the db records
:type tag: str
:param presets: list of descriptor presets from 'global' (molecule level descriptors), \
'min_max' (min and max for each atomic descriptor across the molecule), 'substructure' \
(atomic descriptors for each atom in the substructure)
:type presets: list
:param conf_option: conformer averaging option: 'boltzmann' (Boltzmann average), \
'max' (conformer with highest weight), 'mean' (arithmetic average), 'min' (conformer with smallest weight), \
'any' (any single conformer), 'std' (std dev. over conformers)
:type conf_option: str
:param substructure: substructure SMARTS string
:type substructure: str
:return:
"""
# don't bother with extraction if there are not presets nor conf_option
if not presets or not conf_option:
logger.warning(f"One of options 'presets' or 'conf_option' is empty. Not extracting.")
return {}
# check that presets are ok
if not all(p in desc_presets for p in presets):
logger.warning(f"One of the presets in {presets} is not from allowed list {desc_presets}. Not extracting.")
return {}
# check that conf option is ok
if conf_option not in conf_options:
logger.warning(f"Conf_option {conf_option} is not one of the allowed options {conf_options}. Not extracting.")
return {}
mol_df = db_select_molecules(cls=cls, subcls=subcls, type=type, subtype=subtype,
tags=tags, substructure=substructure)
# TODO making DB queries inside a loop is very inefficient, this code should be reorganized to use single query
descs_df = mol_df.set_index('can')['_ids'].map(lambda l: descriptors_from_list_of_ids(l, conf_option=conf_option))
data = {}
if 'global' in presets:
dg = pd.concat([d['descriptors'] for can, d in descs_df.iteritems()], axis=1, sort=True)
dg.columns = descs_df.index
data['global'] = dg.T
if 'min_max' in presets:
dmin = pd.concat([d['atom_descriptors'].min() for can, d in descs_df.iteritems()], axis=1, sort=True)
dmax = pd.concat([d['atom_descriptors'].max() for can, d in descs_df.iteritems()], axis=1, sort=True)
dmin.columns = descs_df.index
dmax.columns = descs_df.index
data['min'] = dmin.T
data['max'] = dmax.T
if 'transitions' in presets:
# select top 3 transitions by oscillation strength
ts = pd.concat([d['transitions'].sort_values("ES_osc_strength",
ascending=False).head(10).reset_index(drop=True).unstack()
for can, d in descs_df.iteritems()], axis=1, sort=True)
ts.index = ts.index.map(lambda i: "_".join(map(str, i)))
ts.columns = descs_df.index
data['transitions'] = ts.T
if 'substructure' in presets and substructure:
sub = pybel.Smarts(substructure)
# these matches are numbered from 1, so subtract one from them
matches = descs_df.index.map(lambda c: sub.findall(pybel.readstring("smi", c))[0])
matches = matches.map(lambda x: (np.array(x) - 1).tolist())
# fetch atom labels for this smarts using the first molecule
sub_labels = pd.Series(descs_df.iloc[0]['labels']).loc[matches[0]].tolist()
sub_labels = add_numbers_to_repeated_items(sub_labels)
sub_labels = [f"atom{i + 1}" for i in range(len(matches[0]))]
# create a frame with descriptors large structure in one column, and substructure match
# indices in the second column
tmp_df = descs_df.to_frame('descs')
tmp_df['matches'] = matches
for i, label in enumerate(sub_labels):
# data[label] = pd.concat([row['descs']['atom_descriptors'].loc[row['matches'][i]]
# for c, row in tmp_df.iterrows()], axis=1)
to_concat = []
for c, row in tmp_df.iterrows():
atom_descs = row['descs']['atom_descriptors']
atom_descs['labels'] = row['descs']['labels']
to_concat.append(atom_descs.iloc[row['matches'][i]])
data[label] = pd.concat(to_concat, axis=1, sort=True)
data[label].columns = descs_df.index
data[label] = data[label].T
if 'core' in presets:
cans = mol_df['can'].tolist()
rd_mols = {can: Chem.MolFromSmiles(can) for can in cans}
# occasionally rdkit cannot create a molecule from can that openbabel can
# this is typically due to dative bonds, dative
for can, rd_mol in rd_mols.items():
if rd_mol is None:
logger.warning(f"Molecule with can: {can} cannot be constructed directly by rdkit.")
rd_mols[can] = Chem.MolFromSmarts(can) # create it from smarts
# run MCS if there is more than 1 molecule
if len(rd_mols) > 1:
core_smarts = rdFMCS.FindMCS(list(rd_mols.values())).smartsString
else: # otherwise use the entire smiles as smarts string
core_smarts = Chem.MolToSmarts(list(rd_mols.values())[0])
# create an rdkit smarts
core = Chem.MolFromSmarts(core_smarts)
# get the first match, if multiple substructure matches exist
matches = {can: rd_mols[can].GetSubstructMatches(core)[0] for can in cans}
matches = pd.Series(matches).map(list)
# create a frame with descriptors large structure in one column, and substructure match
# indices in the second column
tmp_df = descs_df.to_frame('descs')
tmp_df['matches'] = matches
# fetch atom labels for this smarts using the first molecule
row = tmp_df.iloc[0]
row_labels = pd.Series(row['descs']['labels'])
row_labels = row_labels[~row_labels.str.startswith('H')] # need to remove hydrogens
sub_labels = row_labels.iloc[row['matches']].tolist()
sub_labels = add_numbers_to_repeated_items(sub_labels)
for i, label in enumerate(sub_labels):
to_concat = []
for c, row in tmp_df.iterrows():
atom_descs = row['descs']['atom_descriptors']
atom_descs['labels'] = row['descs']['labels']
atom_descs = atom_descs[~atom_descs['labels'].str.startswith("H")] # need to remove hydrogens
to_concat.append(atom_descs.iloc[row['matches'][i]])
data[label] = pd.concat(to_concat, axis=1, sort=True)
data[label].columns = descs_df.index
data[label] = data[label].T
if 'labeled' in presets:
# extract the positions of the labeled atoms in the atom lists for each molecule
labels = descs_df.map(lambda d: [re.sub("\D", "", l) for l in d['labels']])
labels = labels.map(lambda ls: [(index, l) for index, l in enumerate(ls) if l])
labels = labels.map(lambda ls: sorted(ls, key=lambda l: l[1]))
# verify that the atomic labels are consistent across all molecules
atom_numbers = labels.map(lambda ls: [l[1] for l in ls])
atom_numbers_dedup = atom_numbers.map(tuple).drop_duplicates()
if len(atom_numbers_dedup) == 1:
matches = labels.map(lambda ls: [l[0] for l in ls])
# create a frame with descriptors large structure in one column, and substructure match
# indices in the second column
tmp_df = descs_df.to_frame('descs')
tmp_df['matches'] = matches
for i, label in enumerate(atom_numbers_dedup.iloc[0]):
label = 'A' + label
data[label] = pd.concat([row['descs']['atom_descriptors'].loc[row['matches'][i]]
for c, row in tmp_df.iterrows()], axis=1, sort=True)
data[label].columns = descs_df.index
data[label] = data[label].T
else:
logger.warning("Atomic labels are inconsistent. Not all molecules have the same set of labeled atoms")
raise InconsistentLabelsException
return data
def descriptors_from_list_of_ids(ids, conf_option='max') -> dict:
"""Get and average descriptors using a list of db ids.
:param ids: list of db id's that correspond to a given molecule
:type ids: list
:param conf_option: conformer averaging option: 'boltzmann' (Boltzmann average), \
'max' (conformer with highest weight), 'mean' (arithmetic average), 'min' (conformer with smallest weight), \
'any' (any single conformer), 'std' (std dev. over conformers)
:type conf_option: str
:return: dict
"""
# check that conf option is ok
if conf_option not in conf_options:
logger.warning(f"Conf_option {conf_option} is not one of the allowed options {conf_options}. Not extracting.")
return {}
# connect to db
feats_coll = db_connect("qchem_descriptors")
# fetch db _ids and weights and can
cursor = feats_coll.find({"_id": {"$in": ids}}, {'weight': 1, 'molecule_id': 1})
recs = pd.DataFrame(cursor).sort_values('weight', ascending=False)
# assert that all ids come from the same can, and that weights sum to 1.
assert len(recs.molecule_id.unique()) == 1
assert abs(recs.weight.sum() - 1.) < 1e-6
# set trivial option for the case with only one conformation
if len(recs) == 1:
conf_option = 'any'
# single conf options
if conf_option in ['min', 'max', 'any']:
if conf_option == 'max':
_id = recs['_id'].iloc[0]
elif conf_option == 'min':
_id = recs['_id'].iloc[-1]
else:
_id = recs['_id'].sample(1).iloc[0]
# return pandatized record for a chosen id
return _pandatize_record(feats_coll.find_one({"_id": _id}))
rec = {}
if conf_option in ['boltzmann', 'mean', 'std']:
# fetch db records for these _ids
cursor = feats_coll.find({"_id": {"$in": ids}})
recs = [_pandatize_record(record) for record in cursor]
rec.update({"labels": recs[0]['labels']})
keys_to_reweight = ['descriptors', 'atom_descriptors', 'modes', 'transitions']
for key in keys_to_reweight:
if conf_option == 'boltzmann':
dfs = pd.concat(r[key] * r['weight'] for r in recs)
rec[key] = dfs.groupby(dfs.index, sort=False).sum()
if conf_option in ['mean', 'std']:
dfs = pd.concat(r[key] for r in recs)
if conf_option == 'mean':
rec[key] = dfs.groupby(dfs.index, sort=False).mean()
elif conf_option == 'std':
rec[key] = dfs.groupby(dfs.index, sort=False).std()
return rec
def _pandatize_record(record) -> dict:
"""Convert json structures to pandas structures for an individual
db record of a single conformation.
:param record: db record of a single conformation
:return: dict
"""
del record['descriptors']['stoichiometry']
record['descriptors'] = pd.Series(record['descriptors']).astype(float)
record['modes'] = pd.DataFrame(record['modes']).astype(float)
record['transitions'] = pd.DataFrame(record['transitions']).astype(float)
record['atom_descriptors'] = pd.DataFrame(record['atom_descriptors']).astype(float)
if record['mode_vectors'] is not None:
record['mode_vectors'] = pd.DataFrame(record['mode_vectors'])
record['mode_vectors']['atom_idx'] = list(range(len(record['labels']))) * 3 * record['modes'].shape[0]
record['mode_vectors'] = record['mode_vectors'].set_index(['mode_number', 'axis', 'atom_idx']).unstack(
['mode_number', 'axis'])
record['mode_vectors'] = record['mode_vectors'].droplevel(0, axis=1).astype(float)
else:
record['mode_vectors'] = pd.DataFrame()
return record
| 41.524625
| 136
| 0.633973
|
7fc62de5bf23c14cb081b4a988a02155c653bf4b
| 258
|
py
|
Python
|
cata/__meta__.py
|
iandennismiller/cata
|
d647a1199125f66cf69412e91fef1f3f0706483b
|
[
"MIT"
] | null | null | null |
cata/__meta__.py
|
iandennismiller/cata
|
d647a1199125f66cf69412e91fef1f3f0706483b
|
[
"MIT"
] | null | null | null |
cata/__meta__.py
|
iandennismiller/cata
|
d647a1199125f66cf69412e91fef1f3f0706483b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# catalog (c) Ian Dennis Miller
__project__ = 'catalog'
__version__ = '0.1.0'
__author__ = 'Ian Dennis Miller'
__email__ = 'ian@iandennismiller.com'
__url__ = 'https://catalog-data.readthedocs.io'
__copyright__ = 'Ian Dennis Miller'
| 25.8
| 47
| 0.713178
|
7cf083ae91e90f3ddf6fda4033e93d97f00b2689
| 1,218
|
py
|
Python
|
services/arlington/groups/migrations/0001_initial.py
|
ShadowServants/ctfcup2018
|
561ad0f40554afdbe2823528037abf7191c3db21
|
[
"WTFPL"
] | 4
|
2018-12-03T12:51:37.000Z
|
2019-04-08T10:14:02.000Z
|
services/arlington/groups/migrations/0001_initial.py
|
ctfcup/2018-attack-defense
|
561ad0f40554afdbe2823528037abf7191c3db21
|
[
"WTFPL"
] | 3
|
2020-02-11T23:29:21.000Z
|
2021-06-10T21:01:47.000Z
|
services/arlington/groups/migrations/0001_initial.py
|
ctfcup/2018-attack-defense
|
561ad0f40554afdbe2823528037abf7191c3db21
|
[
"WTFPL"
] | 1
|
2019-11-29T15:24:20.000Z
|
2019-11-29T15:24:20.000Z
|
# Generated by Django 2.1.3 on 2018-11-13 01:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('text', models.TextField()),
('rendered_file', models.FileField(null=True, upload_to='rendered_docs/')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group')),
],
),
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=16)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group')),
],
),
]
| 34.8
| 114
| 0.587028
|
fa09b1c0388426624eeb818266c013347e10a970
| 4,208
|
py
|
Python
|
setup.py
|
davidroberson/cutadapt
|
c15054978d35a14a9587caca81c204dc37663eb9
|
[
"MIT"
] | 1
|
2018-12-12T10:31:51.000Z
|
2018-12-12T10:31:51.000Z
|
setup.py
|
davidroberson/cutadapt
|
c15054978d35a14a9587caca81c204dc37663eb9
|
[
"MIT"
] | null | null | null |
setup.py
|
davidroberson/cutadapt
|
c15054978d35a14a9587caca81c204dc37663eb9
|
[
"MIT"
] | null | null | null |
"""
Build cutadapt.
"""
import sys
import os.path
from setuptools import setup, Extension
from distutils.version import LooseVersion
from distutils.command.sdist import sdist as _sdist
from distutils.command.build_ext import build_ext as _build_ext
import versioneer
MIN_CYTHON_VERSION = '0.24'
if sys.version_info < (2, 6):
sys.stdout.write("At least Python 2.6 is required.\n")
sys.exit(1)
def out_of_date(extensions):
"""
Check whether any pyx source is newer than the corresponding generated
C source or whether any C source is missing.
"""
for extension in extensions:
for pyx in extension.sources:
path, ext = os.path.splitext(pyx)
if ext not in ('.pyx', '.py'):
continue
if extension.language == 'c++':
csource = path + '.cpp'
else:
csource = path + '.c'
# When comparing modification times, allow five seconds slack:
# If the installation is being run from pip, modification
# times are not preserved and therefore depends on the order in
# which files were unpacked.
if not os.path.exists(csource) or (
os.path.getmtime(pyx) > os.path.getmtime(csource) + 5):
return True
return False
def no_cythonize(extensions, **_ignore):
"""
Change file extensions from .pyx to .c or .cpp.
Copied from Cython documentation
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
def check_cython_version():
"""Exit if Cython was not found or is too old"""
try:
from Cython import __version__ as cyversion
except ImportError:
sys.stdout.write(
"ERROR: Cython is not installed. Install at least Cython version " +
str(MIN_CYTHON_VERSION) + " to continue.\n")
sys.exit(1)
if LooseVersion(cyversion) < LooseVersion(MIN_CYTHON_VERSION):
sys.stdout.write(
"ERROR: Your Cython is at version '" + str(cyversion) +
"', but at least version " + str(MIN_CYTHON_VERSION) + " is required.\n")
sys.exit(1)
extensions = [
Extension('cutadapt._align', sources=['cutadapt/_align.pyx']),
Extension('cutadapt._qualtrim', sources=['cutadapt/_qualtrim.pyx']),
Extension('cutadapt._seqio', sources=['cutadapt/_seqio.pyx']),
]
cmdclass = versioneer.get_cmdclass()
versioneer_build_ext = cmdclass.get('build_ext', _build_ext)
versioneer_sdist = cmdclass.get('sdist', _sdist)
class build_ext(versioneer_build_ext):
def run(self):
# If we encounter a PKG-INFO file, then this is likely a .tar.gz/.zip
# file retrieved from PyPI that already includes the pre-cythonized
# extension modules, and then we do not need to run cythonize().
if os.path.exists('PKG-INFO'):
no_cythonize(extensions)
else:
# Otherwise, this is a 'developer copy' of the code, and then the
# only sensible thing is to require Cython to be installed.
check_cython_version()
from Cython.Build import cythonize
self.extensions = cythonize(self.extensions)
versioneer_build_ext.run(self)
class sdist(versioneer_sdist):
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
from Cython.Build import cythonize
check_cython_version()
cythonize(extensions)
versioneer_sdist.run(self)
cmdclass['build_ext'] = build_ext
cmdclass['sdist'] = sdist
setup(
name = 'cutadapt',
version = versioneer.get_version(),
author = 'Marcel Martin',
author_email = 'marcel.martin@scilifelab.se',
url = 'https://cutadapt.readthedocs.io/',
description = 'trim adapters from high-throughput sequencing reads',
license = 'MIT',
cmdclass = cmdclass,
ext_modules = extensions,
packages = ['cutadapt', 'cutadapt.scripts'],
scripts = ['bin/cutadapt'],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Cython",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
| 29.426573
| 76
| 0.710789
|
5321a92aa3f211d2a9e219aaf45dbc61de418d49
| 4,771
|
py
|
Python
|
nova/virt/xenapi/image/bittorrent.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/xenapi/image/bittorrent.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | 1
|
2019-01-02T01:30:35.000Z
|
2019-01-02T01:38:02.000Z
|
nova/virt/xenapi/image/bittorrent.py
|
zaina/nova
|
181358c172d606b23c9cc14b58d677d911013c02
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import six.moves.urllib.parse as urlparse
from nova.i18n import _, _LW
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
xenapi_torrent_opts = [
cfg.StrOpt('torrent_base_url',
help='Base URL for torrent files; must contain a slash'
' character (see RFC 1808, step 6)'),
cfg.FloatOpt('torrent_seed_chance',
default=1.0,
help='Probability that peer will become a seeder.'
' (1.0 = 100%)'),
cfg.IntOpt('torrent_seed_duration',
default=3600,
help='Number of seconds after downloading an image via'
' BitTorrent that it should be seeded for other peers.'),
cfg.IntOpt('torrent_max_last_accessed',
default=86400,
help='Cached torrent files not accessed within this number of'
' seconds can be reaped'),
cfg.IntOpt('torrent_listen_port_start',
default=6881,
help='Beginning of port range to listen on'),
cfg.IntOpt('torrent_listen_port_end',
default=6891,
help='End of port range to listen on'),
cfg.IntOpt('torrent_download_stall_cutoff',
default=600,
help='Number of seconds a download can remain at the same'
' progress percentage w/o being considered a stall'),
cfg.IntOpt('torrent_max_seeder_processes_per_host',
default=1,
help='Maximum number of seeder processes to run concurrently'
' within a given dom0. (-1 = no limit)')
]
CONF = cfg.CONF
CONF.register_opts(xenapi_torrent_opts, 'xenserver')
class BittorrentStore(object):
@staticmethod
def _lookup_torrent_url_fn():
"""Load a "fetcher" func to get the right torrent URL.
"""
if CONF.xenserver.torrent_base_url:
if '/' not in CONF.xenserver.torrent_base_url:
LOG.warn(_LW('Value specified in conf file for'
' xenserver.torrent_base_url does not contain a'
' slash character, therefore it will not be used'
' as part of the torrent URL. Specify a valid'
' base URL as defined by RFC 1808 (see step 6).'))
def _default_torrent_url_fn(image_id):
return urlparse.urljoin(CONF.xenserver.torrent_base_url,
"%s.torrent" % image_id)
return _default_torrent_url_fn
raise RuntimeError(_('Cannot create default bittorrent URL'
' without xenserver.torrent_base_url'
' configuration option set.'))
def download_image(self, context, session, instance, image_id):
params = {}
params['image_id'] = image_id
params['uuid_stack'] = vm_utils._make_uuid_stack()
params['sr_path'] = vm_utils.get_sr_path(session)
params['torrent_seed_duration'] = CONF.xenserver.torrent_seed_duration
params['torrent_seed_chance'] = CONF.xenserver.torrent_seed_chance
params['torrent_max_last_accessed'] = \
CONF.xenserver.torrent_max_last_accessed
params['torrent_listen_port_start'] = \
CONF.xenserver.torrent_listen_port_start
params['torrent_listen_port_end'] = \
CONF.xenserver.torrent_listen_port_end
params['torrent_download_stall_cutoff'] = \
CONF.xenserver.torrent_download_stall_cutoff
params['torrent_max_seeder_processes_per_host'] = \
CONF.xenserver.torrent_max_seeder_processes_per_host
lookup_fn = self._lookup_torrent_url_fn()
params['torrent_url'] = lookup_fn(image_id)
vdis = session.call_plugin_serialized(
'bittorrent', 'download_vhd', **params)
return vdis
def upload_image(self, context, session, instance, image_id, vdi_uuids):
raise NotImplementedError
| 42.221239
| 79
| 0.628589
|
73e421ff18cc69e99aba2bf8e5e38626d7092873
| 4,228
|
py
|
Python
|
profiles_api/views.py
|
sanydge/rest_api
|
afc078cbe011ed42504b6b759acd41eaf67768d8
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
sanydge/rest_api
|
afc078cbe011ed42504b6b759acd41eaf67768d8
|
[
"MIT"
] | 5
|
2020-06-05T20:31:13.000Z
|
2021-06-10T18:13:25.000Z
|
profiles_api/views.py
|
sanydge/rest_api
|
afc078cbe011ed42504b6b759acd41eaf67768d8
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as functions (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle partial update of object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewsets = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message."""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating, creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (
permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 31.789474
| 77
| 0.651372
|
cd922135d173ed58f4443de4c708bcbaf6ecbc39
| 952
|
py
|
Python
|
tests/test_mem.py
|
dkostic/liboqs
|
3a56677a935e52b6b76edaa8f9312f0ba46a0398
|
[
"MIT"
] | null | null | null |
tests/test_mem.py
|
dkostic/liboqs
|
3a56677a935e52b6b76edaa8f9312f0ba46a0398
|
[
"MIT"
] | null | null | null |
tests/test_mem.py
|
dkostic/liboqs
|
3a56677a935e52b6b76edaa8f9312f0ba46a0398
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
import helpers
import pytest
from pathlib import Path
@helpers.filtered_test
@pytest.mark.parametrize('kem_name', helpers.available_kems_by_name())
def test_mem_kem(kem_name):
if not(helpers.is_kem_enabled_by_name(kem_name)):
pytest.skip('Not enabled')
Path('build/mem-benchmark').mkdir(parents=True, exist_ok=True)
for i in range(3):
helpers.run_subprocess([helpers.path_to_executable('test_kem_mem'), kem_name, str(i)])
@helpers.filtered_test
@pytest.mark.parametrize('sig_name', helpers.available_sigs_by_name())
def test_mem_sig(sig_name):
if not(helpers.is_sig_enabled_by_name(sig_name)):
pytest.skip('Not enabled')
Path('build/mem-benchmark').mkdir(parents=True, exist_ok=True)
for i in range(3):
helpers.run_subprocess([helpers.path_to_executable('test_sig_mem'), sig_name, str(i)])
if __name__ == "__main__":
import sys
pytest.main(sys.argv)
| 28.848485
| 93
| 0.737395
|
ed779bed4870d6a54131ac5d25b9e62cbd8e7f18
| 2,331
|
py
|
Python
|
prettyqt/widgets/itemeditorfactory.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 7
|
2019-05-01T01:34:36.000Z
|
2022-03-08T02:24:14.000Z
|
prettyqt/widgets/itemeditorfactory.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 141
|
2019-04-16T11:22:01.000Z
|
2021-04-14T15:12:36.000Z
|
prettyqt/widgets/itemeditorfactory.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 5
|
2019-04-17T11:48:19.000Z
|
2021-11-21T10:30:19.000Z
|
from __future__ import annotations
from prettyqt import widgets
from prettyqt.qt import QtCore, QtGui, QtWidgets
TYPES = {
bool: 1,
int: 2,
str: 10,
float: 38,
QtGui.QColor: 67,
QtGui.QCursor: 74,
QtCore.QDate: 14,
QtCore.QSize: 21,
QtCore.QTime: 15,
list: 9,
QtGui.QPolygon: 71,
QtGui.QPolygonF: 86,
QtGui.QColor: 67,
QtGui.QColorSpace: 87,
QtCore.QSizeF: 22,
QtCore.QRectF: 20,
QtCore.QLine: 23,
QtGui.QTextLength: 77,
dict: 8,
QtGui.QIcon: 69,
QtGui.QPen: 76,
QtCore.QLineF: 24,
QtGui.QTextFormat: 78,
QtCore.QRect: 19,
QtCore.QPoint: 25,
QtCore.QUrl: 17,
QtCore.QRegularExpression: 44,
QtCore.QDateTime: 16,
QtCore.QPointF: 26,
QtGui.QPalette: 68,
QtGui.QFont: 64,
QtGui.QBrush: 66,
QtGui.QRegion: 72,
QtGui.QImage: 70,
QtGui.QKeySequence: 75,
QtWidgets.QSizePolicy: 121,
QtGui.QPixmap: 65,
QtCore.QLocale: 18,
QtGui.QBitmap: 73,
QtGui.QMatrix4x4: 81,
QtGui.QVector2D: 82,
QtGui.QVector3D: 83,
QtGui.QVector4D: 84,
QtGui.QQuaternion: 85,
QtCore.QEasingCurve: 29,
QtCore.QJsonValue: 45,
QtCore.QJsonDocument: 48,
QtCore.QModelIndex: 42,
QtCore.QPersistentModelIndex: 50,
QtCore.QUuid: 30,
"user": 1024,
}
class ItemEditorFactory(QtWidgets.QItemEditorFactory):
@classmethod
def register_default_editor(
cls, editor_cls: type[QtWidgets.QWidget], typ: int | None = None
):
factory = cls.defaultFactory()
factory.register_editor(editor_cls, typ)
cls.setDefaultFactory(factory)
def register_editor(
self,
editor_cls: type[QtWidgets.QWidget],
typ: int | None = None,
property_name: str = "",
):
class EditorCreator(widgets.ItemEditorCreatorBase):
def createWidget(self, parent: QtWidgets.QWidget) -> QtWidgets.QWidget:
return editor_cls(parent=parent)
def valuePropertyName(self) -> QtCore.QByteArray:
return QtCore.QByteArray(property_name.encode())
if typ is None:
typ = editor_cls.staticMetaObject.userProperty().userType()
self.registerEditor(typ, EditorCreator())
factory = ItemEditorFactory()
ItemEditorFactory.setDefaultFactory(factory)
| 25.615385
| 83
| 0.646075
|
a225f106ed1c33be492b91267f82cd1742e05506
| 1,868
|
py
|
Python
|
lib/cogs/sell.py
|
namanyt/discord-lassan-bot
|
c95db8625ddf6cd0071988803cb1831a24ed18ab
|
[
"MIT"
] | 1
|
2021-08-04T11:13:19.000Z
|
2021-08-04T11:13:19.000Z
|
lib/cogs/sell.py
|
namanyt/discord-lassan-bot
|
c95db8625ddf6cd0071988803cb1831a24ed18ab
|
[
"MIT"
] | null | null | null |
lib/cogs/sell.py
|
namanyt/discord-lassan-bot
|
c95db8625ddf6cd0071988803cb1831a24ed18ab
|
[
"MIT"
] | null | null | null |
from json import load, dump
from discord.ext.commands import Cog, command, cooldown, BucketType
from lib.db import db
class Sell(Cog):
def __init__(self, bot):
self.bot = bot
@command(name='sell')
@cooldown(1, 2, type=BucketType.user)
async def sell_item(self, ctx, category, item_user):
user = ctx.author
with open('./data/json/shop.json', 'r') as f:
shop = load(f)
with open('./data/json/inv.json', 'r') as f:
inv = load(f)
if category in shop:
for items in shop[category]:
price = items['price']
item_name = items['item_name']
item_id = items['name']
item_desc = items['desc.']
if item_user in item_id:
if item_user in inv[str(user.id)]['inv']['item_id']:
inv[str(user.id)]['inv']['item_name'].remove(item_name)
inv[str(user.id)]['inv']['item_id'].remove(item_id)
inv[str(user.id)]['inv']['item_desc'].remove(item_desc)
db.execute("UPDATE economy SET Wallet = Wallet + ? WHERE UserID = ?",
price, user.id)
await ctx.send(f'{item_name} sold successfully')
with open('./data/json/inv.json', 'w') as f:
dump(inv, f)
return
else:
await ctx.send(f'{item_name} is not in your inventory')
return
else:
await ctx.send(f'{item_user} not available in store')
return
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up('sell')
def setup(bot):
bot.add_cog(Sell(bot))
| 32.77193
| 93
| 0.496788
|
234b527bfdfb267623f804d25a0d43d44460017c
| 31,927
|
py
|
Python
|
nova/auth/manager.py
|
bopopescu/openstack-12
|
2c7e0d1e63cae7aaa38095439843c9a2abb0382b
|
[
"Apache-2.0"
] | null | null | null |
nova/auth/manager.py
|
bopopescu/openstack-12
|
2c7e0d1e63cae7aaa38095439843c9a2abb0382b
|
[
"Apache-2.0"
] | null | null | null |
nova/auth/manager.py
|
bopopescu/openstack-12
|
2c7e0d1e63cae7aaa38095439843c9a2abb0382b
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WARNING: This code is deprecated and will be removed.
Keystone is the recommended solution for auth management.
Nova authentication management
"""
import os
import string # pylint: disable=W0402
import uuid
import zipfile
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import utils
from nova.auth import signer
auth_opts = [
cfg.ListOpt('allowed_roles',
default=[
'cloudadmin',
'itsec',
'sysadmin',
'netadmin',
'developer'
],
help='Allowed roles for project'),
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
cfg.ListOpt('superuser_roles',
default=['cloudadmin'],
help='Roles that ignore authorization checking completely'),
# NOTE(vish): a user with one of these roles will have it for every
# project, even if he or she is not a member of the project
cfg.ListOpt('global_roles',
default=['cloudadmin', 'itsec'],
help='Roles that apply to all projects'),
cfg.StrOpt('credentials_template',
default='$pybasedir/nova/auth/novarc.template',
help='Template for creating users rc file'),
cfg.StrOpt('vpn_client_template',
default='$pybasedir/nova/cloudpipe/client.ovpn.template',
help='Template for creating users vpn file'),
cfg.StrOpt('credential_vpn_file',
default='nova-vpn.conf',
help='Filename of certificate in credentials zip'),
cfg.StrOpt('credential_key_file',
default='pk.pem',
help='Filename of private key in credentials zip'),
cfg.StrOpt('credential_cert_file',
default='cert.pem',
help='Filename of certificate in credentials zip'),
cfg.StrOpt('credential_rc_file',
default='%src',
help='Filename of rc in credentials zip %s will be replaced by '
'name of the region (nova by default)'),
cfg.StrOpt('auth_driver',
default='nova.auth.dbdriver.DbDriver',
help='Driver that auth manager uses'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(auth_opts)
flags.DECLARE('osapi_compute_listen_port', 'nova.service')
LOG = logging.getLogger(__name__)
if FLAGS.memcached_servers:
import memcache
else:
from nova.common import memorycache as memcache
class AuthBase(object):
"""Base class for objects relating to auth
Objects derived from this class should be stupid data objects with
an id member. They may optionally contain methods that delegate to
AuthManager, but should not implement logic themselves.
"""
@classmethod
def safe_id(cls, obj):
"""Safely get object id.
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
This allows methods to accept objects or ids as parameters.
"""
if isinstance(obj, cls):
return obj.id
else:
return obj
class User(AuthBase):
"""Object representing a user
The following attributes are defined:
``id``
A system identifier for the user. A string (for LDAP)
``name``
The user name, potentially in some more friendly format
``access``
The 'username' for EC2 authentication
``secret``
The 'password' for EC2 authenticatoin
``admin``
???
"""
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
assert isinstance(id, basestring)
self.id = id
self.name = name
self.access = access
self.secret = secret
self.admin = admin
def is_superuser(self):
return AuthManager().is_superuser(self)
def is_admin(self):
return AuthManager().is_admin(self)
def has_role(self, role):
return AuthManager().has_role(self, role)
def add_role(self, role):
return AuthManager().add_role(self, role)
def remove_role(self, role):
return AuthManager().remove_role(self, role)
def is_project_member(self, project):
return AuthManager().is_project_member(self, project)
def is_project_manager(self, project):
return AuthManager().is_project_manager(self, project)
def __repr__(self):
return "User('%s', '%s')" % (self.id, self.name)
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
def __init__(self, id, name, project_manager_id, description, member_ids):
AuthBase.__init__(self)
self.id = id
self.name = name
self.project_manager_id = project_manager_id
self.description = description
self.member_ids = member_ids
@property
def project_manager(self):
return AuthManager().get_user(self.project_manager_id)
@property
def vpn_ip(self):
ip, _port = AuthManager().get_project_vpn_data(self)
return ip
@property
def vpn_port(self):
_ip, port = AuthManager().get_project_vpn_data(self)
return port
def has_manager(self, user):
return AuthManager().is_project_manager(user, self)
def has_member(self, user):
return AuthManager().is_project_member(user, self)
def add_role(self, user, role):
return AuthManager().add_role(user, role, self)
def remove_role(self, user, role):
return AuthManager().remove_role(user, role, self)
def has_role(self, user, role):
return AuthManager().has_role(user, role, self)
def get_credentials(self, user):
return AuthManager().get_credentials(user, self)
def __repr__(self):
return "Project('%s', '%s')" % (self.id, self.name)
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
Methods accept objects or ids.
AuthManager uses a driver object to make requests to the data backend.
See ldapdriver for reference.
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
_instance = None
mc = None
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance or ('new' in kwargs and kwargs['new']):
cls._instance = super(AuthManager, cls).__new__(cls)
return cls._instance
def __init__(self, driver=None, *args, **kwargs):
"""Inits the driver from parameter or flag
__init__ is run every time AuthManager() is called, so we only
reset the driver if it is not set or a new driver is specified.
"""
self.network_manager = importutils.import_object(FLAGS.network_manager)
if driver or not getattr(self, 'driver', None):
self.driver = importutils.import_class(driver or FLAGS.auth_driver)
if AuthManager.mc is None:
AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
check_type='ec2', headers=None):
"""Authenticates AWS request using access key and signature
If the project is not specified, attempts to authenticate to
a project with the same name as the user. This way, older tools
that have no project knowledge will still work.
:type access: str
:param access: Access key for user in the form "access:project".
:type signature: str
:param signature: Signature of the request.
:type params: list of str
:param params: Web paramaters used for the signature.
:type verb: str
:param verb: Web request verb ('GET' or 'POST').
:type server_string: str
:param server_string: Web request server string.
:type path: str
:param path: Web request path.
:type check_type: str
:param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
S3. Any other value will cause signature not to be
checked.
:type headers: list
:param headers: HTTP headers passed with the request (only needed for
s3 signature checks)
:rtype: tuple (User, Project)
:return: User and project that the request represents.
"""
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
LOG.debug('user: %r', user)
if user is None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.AccessKeyNotFound(access_key=access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project is None:
pjid = project_id
uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s"
" (user=%(uname)s)") % locals())
raise exception.ProjectNotFound(project_id=project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
uname = user.name
uid = user.id
pjname = project.name
pjid = project.id
LOG.audit(_("Failed authorization: user %(uname)s not admin"
" and not member of project %(pjname)s") % locals())
raise exception.ProjectMembershipNotFound(project_id=pjid,
user_id=uid)
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
LOG.debug(_('user.secret: %s'), user.secret)
LOG.debug(_('expected_signature: %s'), expected_signature)
LOG.debug(_('signature: %s'), signature)
if not utils.strcmp_const_time(signature, expected_signature):
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.InvalidSignature(signature=signature,
user=user)
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
LOG.debug(_('user.secret: %s'), user.secret)
LOG.debug(_('expected_signature: %s'), expected_signature)
LOG.debug(_('signature: %s'), signature)
if not utils.strcmp_const_time(signature, expected_signature):
(addr_str, port_str) = utils.parse_server_string(server_string)
# If the given server_string contains port num, try without it.
if port_str != '':
host_only_signature = signer.Signer(
user.secret.encode()).generate(params, verb,
addr_str, path)
LOG.debug(_('host_only_signature: %s'),
host_only_signature)
if utils.strcmp_const_time(signature, host_only_signature):
return (user, project)
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.InvalidSignature(signature=signature,
user=user)
return (user, project)
def get_access_key(self, user, project):
"""Get an access key that includes user and project"""
if not isinstance(user, User):
user = self.get_user(user)
return "%s:%s" % (user.access, Project.safe_id(project))
def is_superuser(self, user):
"""Checks for superuser status, allowing user to bypass authorization
:type user: User or uid
:param user: User to check.
:rtype: bool
:return: True for superuser.
"""
if not isinstance(user, User):
user = self.get_user(user)
# NOTE(vish): admin flag on user represents superuser
if user.admin:
return True
for role in FLAGS.superuser_roles:
if self.has_role(user, role):
return True
def is_admin(self, user):
"""Checks for admin status, allowing user to access all projects
:type user: User or uid
:param user: User to check.
:rtype: bool
:return: True for admin.
"""
if not isinstance(user, User):
user = self.get_user(user)
if self.is_superuser(user):
return True
for role in FLAGS.global_roles:
if self.has_role(user, role):
return True
def _build_mc_key(self, user, role, project=None):
key_parts = ['rolecache', User.safe_id(user), str(role)]
if project:
key_parts.append(Project.safe_id(project))
return utils.utf8('-'.join(key_parts))
def _clear_mc_key(self, user, role, project=None):
# NOTE(anthony): it would be better to delete the key
self.mc.set(self._build_mc_key(user, role, project), None)
def _has_role(self, user, role, project=None):
mc_key = self._build_mc_key(user, role, project)
rslt = self.mc.get(mc_key)
if rslt is None:
with self.driver() as drv:
rslt = drv.has_role(user, role, project)
self.mc.set(mc_key, rslt)
return rslt
else:
return rslt
def has_role(self, user, role, project=None):
"""Checks existence of role for user
If project is not specified, checks for a global role. If project
is specified, checks for the union of the global role and the
project role.
Role 'projectmanager' only works for projects and simply checks to
see if the user is the project_manager of the specified project. It
is the same as calling is_project_manager(user, project).
:type user: User or uid
:param user: User to check.
:type role: str
:param role: Role to check.
:type project: Project or project_id
:param project: Project in which to look for local role.
:rtype: bool
:return: True if the user has the role.
"""
if role == 'projectmanager':
if not project:
raise exception.NovaException(_("Must specify project"))
return self.is_project_manager(user, project)
global_role = self._has_role(User.safe_id(user),
role,
None)
if not global_role:
return global_role
if not project or role in FLAGS.global_roles:
return global_role
return self._has_role(User.safe_id(user),
role,
Project.safe_id(project))
def add_role(self, user, role, project=None):
"""Adds role for user
If project is not specified, adds a global role. If project
is specified, adds a local role.
The 'projectmanager' role is special and can't be added or removed.
:type user: User or uid
:param user: User to which to add role.
:type role: str
:param role: Role to add.
:type project: Project or project_id
:param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
raise exception.UserRoleNotFound(role_id=role)
if project is not None and role in FLAGS.global_roles:
raise exception.GlobalRoleNotAllowed(role_id=role)
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Adding role %(role)s to user %(uid)s"
" in project %(pid)s") % locals())
else:
LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
% locals())
with self.driver() as drv:
self._clear_mc_key(uid, role, pid)
drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
"""Removes role for user
If project is not specified, removes a global role. If project
is specified, removes a local role.
The 'projectmanager' role is special and can't be added or removed.
:type user: User or uid
:param user: User from which to remove role.
:type role: str
:param role: Role to remove.
:type project: Project or project_id
:param project: Project in which to remove local role.
"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Removing role %(role)s from user %(uid)s"
" on project %(pid)s") % locals())
else:
LOG.audit(_("Removing sitewide role %(role)s"
" from user %(uid)s") % locals())
with self.driver() as drv:
self._clear_mc_key(uid, role, pid)
drv.remove_role(uid, role, pid)
@staticmethod
def get_roles(project_roles=True):
"""Get list of allowed roles"""
if project_roles:
return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles))
else:
return FLAGS.allowed_roles
def get_user_roles(self, user, project=None):
"""Get user global or per-project roles"""
with self.driver() as drv:
return drv.get_user_roles(User.safe_id(user),
Project.safe_id(project))
def get_active_roles(self, user, project=None):
"""Get all active roles for context"""
if project:
roles = FLAGS.allowed_roles + ['projectmanager']
else:
roles = FLAGS.global_roles
return [role for role in roles if self.has_role(user, role, project)]
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
project_dict = drv.get_project(pid)
if project_dict:
return Project(**project_dict)
def get_projects(self, user=None):
"""Retrieves list of projects, optionally filtered by user"""
with self.driver() as drv:
project_list = drv.get_projects(User.safe_id(user))
if not project_list:
return []
return [Project(**project_dict) for project_dict in project_list]
def create_project(self, name, manager_user, description=None,
member_users=None):
"""Create a project
:type name: str
:param name: Name of the project to create. The name will also be
used as the project id.
:type manager_user: User or uid
:param manager_user: This user will be the project manager.
:type description: str
:param project: Description of the project. If no description is
specified, the name of the project will be used.
:type member_users: list of User or uid
:param: Initial project members. The project manager will always be
added as a member, even if he isn't specified in this list.
:rtype: Project
:return: The new project.
"""
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with self.driver() as drv:
project_dict = drv.create_project(name,
User.safe_id(manager_user),
description,
member_users)
if project_dict:
LOG.audit(_("Created project %(name)s with"
" manager %(manager_user)s") % locals())
project = Project(**project_dict)
return project
def modify_project(self, project, manager_user=None, description=None):
"""Modify a project
:type name: Project or project_id
:param project: The project to modify.
:type manager_user: User or uid
:param manager_user: This user will be the new project manager.
:type description: str
:param project: This will be the new description of the project.
"""
LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
drv.modify_project(Project.safe_id(project),
manager_user,
description)
def add_to_project(self, user, project):
"""Add user to project"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals())
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) == project.project_manager_id
def is_project_member(self, user, project):
"""Checks to see if user is a member of project"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) in project.member_ids
def remove_from_project(self, user, project):
"""Removes a user from a project"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals())
with self.driver() as drv:
return drv.remove_from_project(uid, pid)
@staticmethod
def get_project_vpn_data(project):
"""Gets vpn ip and port for project
:type project: Project or project_id
:param project: Project from which to get associated vpn data
:rvalue: tuple of (str, str)
:return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
networks = db.project_get_networks(context.get_admin_context(),
Project.safe_id(project), False)
if not networks:
return (None, None)
# TODO(tr3buchet): not sure what you guys plan on doing with this
# but it's possible for a project to have multiple sets of vpn data
# for now I'm just returning the first one
network = networks[0]
return (network['vpn_public_address'],
network['vpn_public_port'])
def delete_project(self, project):
"""Deletes a project"""
LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
def get_user(self, uid):
"""Retrieves a user by id"""
with self.driver() as drv:
user_dict = drv.get_user(uid)
if user_dict:
return User(**user_dict)
def get_user_from_access_key(self, access_key):
"""Retrieves a user by access key"""
with self.driver() as drv:
user_dict = drv.get_user_from_access_key(access_key)
if user_dict:
return User(**user_dict)
def get_users(self):
"""Retrieves a list of all users"""
with self.driver() as drv:
user_list = drv.get_users()
if not user_list:
return []
return [User(**user_dict) for user_dict in user_list]
def create_user(self, name, access=None, secret=None, admin=False):
"""Creates a user
:type name: str
:param name: Name of the user to create.
:type access: str
:param access: Access Key (defaults to a random uuid)
:type secret: str
:param secret: Secret Key (defaults to a random uuid)
:type admin: bool
:param admin: Whether to set the admin flag. The admin flag gives
superuser status regardless of roles specified for the user.
:type create_project: bool
:param: Whether to create a project for the user with the same name.
:rtype: User
:return: The new user.
"""
if access is None:
access = str(uuid.uuid4())
if secret is None:
secret = str(uuid.uuid4())
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
rv = User(**user_dict)
rvname = rv.name
rvadmin = rv.admin
LOG.audit(_("Created user %(rvname)s"
" (admin: %(rvadmin)r)") % locals())
return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
drv.delete_user(uid)
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
if access_key:
LOG.audit(_("Access Key change for user %s"), uid)
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
LOG.audit(_("Admin status set to %(admin)r"
" for user %(uid)s") % locals())
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
def get_credentials(self, user, project=None, use_dmz=True):
"""Get credential zip for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
private_key, signed_cert = crypto.generate_x509_cert(user.id, pid)
with utils.tempdir() as tmpdir:
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
if use_dmz and FLAGS.region_list:
regions = {}
for item in FLAGS.region_list:
region, _sep, region_host = item.partition("=")
regions[region] = region_host
else:
regions = {'nova': FLAGS.ec2_host}
for region, host in regions.iteritems():
rc = self.__generate_rc(user,
pid,
use_dmz,
host)
zippy.writestr(FLAGS.credential_rc_file % region, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
if vpn_ip:
configfile = open(FLAGS.vpn_client_template, "r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
ip=vpn_ip,
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()
with open(zf, 'rb') as f:
read_buffer = f.read()
return read_buffer
def get_environment_rc(self, user, project=None, use_dmz=True):
"""Get environment rc for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
return self.__generate_rc(user, pid, use_dmz)
@staticmethod
def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
ec2_host = FLAGS.ec2_dmz_host
else:
ec2_host = FLAGS.ec2_host
# NOTE(vish): Always use the dmz since it is used from inside the
# instance
s3_host = FLAGS.s3_dmz
if host:
s3_host = host
ec2_host = host
rc = open(FLAGS.credentials_template).read()
# NOTE(vish): Deprecated auth uses an access key, no auth uses a
# the user_id in place of it.
if FLAGS.auth_strategy == 'deprecated':
access = user.access
else:
access = user.id
rc = rc % {'access': access,
'project': pid,
'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
ec2_host,
FLAGS.ec2_port,
FLAGS.ec2_path),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
'os': '%s://%s:%s%s' % (FLAGS.osapi_scheme,
ec2_host,
FLAGS.osapi_compute_listen_port,
FLAGS.osapi_path),
'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}
return rc
| 37.210956
| 79
| 0.581013
|
6cf0e6345f51882e320e1dc1500facde92776175
| 133
|
py
|
Python
|
codeforces/1030A.py
|
bartekpacia/python-training
|
00a1047f70ab44cc5afed8619eb4eac0e406f3e3
|
[
"MIT"
] | null | null | null |
codeforces/1030A.py
|
bartekpacia/python-training
|
00a1047f70ab44cc5afed8619eb4eac0e406f3e3
|
[
"MIT"
] | null | null | null |
codeforces/1030A.py
|
bartekpacia/python-training
|
00a1047f70ab44cc5afed8619eb4eac0e406f3e3
|
[
"MIT"
] | null | null | null |
n = int(input())
nums = input().split(" ")
for num in nums:
if num == '1':
print("hard")
exit(0)
print("easy")
| 13.3
| 25
| 0.481203
|
3112d005daa9d2c2ed7e022dbe7129c8158a7e36
| 7,037
|
py
|
Python
|
azure-devops/azext_devops/dev/team/project.py
|
moerketh/azure-devops-cli-extension
|
634cf15e8704249c0053a5c8be8e7d7139184c25
|
[
"MIT"
] | 147
|
2017-11-15T20:39:05.000Z
|
2019-01-17T15:40:00.000Z
|
azure-devops/azext_devops/dev/team/project.py
|
moerketh/azure-devops-cli-extension
|
634cf15e8704249c0053a5c8be8e7d7139184c25
|
[
"MIT"
] | 139
|
2017-11-15T19:12:11.000Z
|
2019-01-22T07:56:23.000Z
|
azure-devops/azext_devops/dev/team/project.py
|
moerketh/azure-devops-cli-extension
|
634cf15e8704249c0053a5c8be8e7d7139184c25
|
[
"MIT"
] | 46
|
2017-11-17T09:15:29.000Z
|
2019-01-14T07:41:03.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import webbrowser
from knack.log import get_logger
from knack.util import CLIError
from azext_devops.devops_sdk.v5_0.core.models import TeamProject
from azext_devops.dev.common.operations import wait_for_long_running_operation
from azext_devops.dev.common.services import (get_core_client,
get_core_client_v51,
resolve_instance)
from azext_devops.dev.common.uri import uri_quote
logger = get_logger(__name__)
def create_project(name, organization=None, process=None, source_control='git', description=None,
visibility='private', detect=None, open=False): # pylint: disable=redefined-builtin
"""Create a team project.
:param name: Name of the new project.
:type name: str
:param process: Process to use. Default if not specified.
:type process: str
:param source_control: Source control type of the initial code repository created.
:type source_control: str
:param description: Description for the new project.
:type description: str
:param visibility: Project visibility.
:type visibility: str
:param open: Open the team project in the default web browser.
:type open: bool
:rtype: :class:`<TeamProject> <v5_0.core.models.TeamProject>`
"""
organization = resolve_instance(detect=detect, organization=organization)
team_project = TeamProject()
team_project.name = name
team_project.description = description
# private is the only allowed value by vsts right now.
team_project.visibility = visibility
core_client = get_core_client(organization)
# get process template id
process_id = None
process_list = core_client.get_processes()
if process is not None:
process_lower = process.lower()
for prc in process_list:
if prc.name.lower() == process_lower:
process_id = prc.id
break
if process_id is None:
raise CLIError('Could not find a process template with name: "{}"'.format(name))
if process_id is None:
for prc in process_list:
if prc.is_default:
process_id = prc.id
break
if process_id is None:
raise CLIError('Could not find a default process template: "{}"'.format(name))
# build capabilities
version_control_capabilities = {VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME: source_control}
process_capabilities = {PROCESS_TEMPLATE_CAPABILITY_TEMPLATE_TYPE_ID_ATTRIBUTE_NAME: process_id}
team_project.capabilities = {VERSION_CONTROL_CAPABILITY_NAME: version_control_capabilities,
PROCESS_TEMPLATE_CAPABILITY_NAME: process_capabilities}
# queue project creation
operation_reference = core_client.queue_create_project(project_to_create=team_project)
operation = wait_for_long_running_operation(organization, operation_reference.id, 1)
status = operation.status.lower()
if status == 'failed':
raise CLIError('Project creation failed.')
if status == 'cancelled':
raise CLIError('Project creation was cancelled.')
team_project = core_client.get_project(project_id=name, include_capabilities=True)
if open:
_open_project(team_project)
return team_project
def delete_project(id, organization=None, detect=None): # pylint: disable=redefined-builtin
"""Delete team project.
:param id: The id of the project to delete.
:type id: str
"""
organization = resolve_instance(detect=detect, organization=organization)
core_client = get_core_client(organization)
operation_reference = core_client.queue_delete_project(project_id=id)
operation = wait_for_long_running_operation(organization, operation_reference.id, 1)
status = operation.status.lower()
if status == 'failed':
raise CLIError('Project deletion failed.')
if status == 'cancelled':
raise CLIError('Project deletion was cancelled.')
print('Deleted project {}'.format(id))
return operation
def show_project(project, organization=None, detect=None, open=False): # pylint: disable=redefined-builtin
"""Show team project.
:param project: The id or name of the project to show.
:type project: str
:param open: Open the team project in the default web browser.
:type open: bool
:rtype: :class:`<TeamProject> <v5_0.core.models.TeamProject>`
"""
organization = resolve_instance(detect=detect, organization=organization)
core_client = get_core_client(organization)
team_project = core_client.get_project(project_id=project, include_capabilities=True)
if open:
_open_project(team_project)
return team_project
def list_projects(organization=None,
top=None,
skip=None,
state_filter='all',
continuation_token=None,
get_default_team_image_url=None,
detect=None):
"""List team projects
:param top: Maximum number of results to list.
:type top: int
:param skip: Number of results to skip.
:type skip: int
:rtype: list of :class:`<TeamProject> <v5_0.core.models.TeamProject>`
"""
logger.debug('Opening web page: %s', 'Test CLI Release')
logger.debug('__________________________________________________________________________________________________')
organization = resolve_instance(detect=detect, organization=organization)
core_client = get_core_client_v51(organization)
team_projects = core_client.get_projects(state_filter=state_filter,
top=top,
skip=skip,
continuation_token=continuation_token,
get_default_team_image_url=get_default_team_image_url)
return team_projects
def _open_project(project):
"""Opens the project in the default browser.
"""
api_segment = '/_apis/'
pos = project.url.find(api_segment)
if pos >= 0:
url = project.url[:pos + 1] + uri_quote(project.name)
logger.debug('Opening web page: %s', url)
webbrowser.open_new(url=url)
else:
raise CLIError("Failed to open web browser, due to unrecognized url in response.")
# capability keys
VERSION_CONTROL_CAPABILITY_NAME = 'versioncontrol'
VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME = 'sourceControlType'
PROCESS_TEMPLATE_CAPABILITY_NAME = 'processTemplate'
PROCESS_TEMPLATE_CAPABILITY_TEMPLATE_TYPE_ID_ATTRIBUTE_NAME = 'templateTypeId'
| 41.639053
| 118
| 0.67344
|
1d5f4e799f2541454dc0633bff16174549741af2
| 17,262
|
py
|
Python
|
python/html_checks.py
|
apluslms/grade-web
|
8f7eeb62c98bbd9b80b8499ed69d8853a1fc7bf3
|
[
"MIT"
] | null | null | null |
python/html_checks.py
|
apluslms/grade-web
|
8f7eeb62c98bbd9b80b8499ed69d8853a1fc7bf3
|
[
"MIT"
] | null | null | null |
python/html_checks.py
|
apluslms/grade-web
|
8f7eeb62c98bbd9b80b8499ed69d8853a1fc7bf3
|
[
"MIT"
] | 1
|
2021-03-01T10:24:39.000Z
|
2021-03-01T10:24:39.000Z
|
#!/bin/#!/usr/bin/env python3
import html5lib
from xml.dom.minidom import (Document, Element)
from html import escape
import tinycss
import esprima
class Logger:
def __init__(self, reporter, level=0, points=0):
self.reporter = reporter
self.level = level
self.level_max = points > 0
self.level_ok = True
self.rows = []
self.points = 0
self.max_points = points
def add_level(self, msg, points=0):
sublog = Logger(self.reporter, self.level + 1, points)
self.rows.append(('level', msg, points, sublog))
return sublog
def message(self, msg):
self.rows.append(('message', msg, 0))
def success(self, msg, points=0):
self.rows.append(('success', msg, points))
self.add_points(points, True)
def fail(self, msg, points=0):
self.rows.append(('fail', msg, points))
self.add_points(points, False)
self.level_ok = False
def __str__(self):
return self.reporter.format(self)
def add_points(self, points, earned):
self.points += points if earned else 0
self.max_points += 0 if self.level_max else points
def points_total(self):
points = min(self.points, self.max_points)
if self.level_max and self.level_ok and self.points == 0:
points = self.max_points
max_points = self.max_points
for row in self.rows:
if row[0] == 'level':
p, m = row[3].points_total()
points += p
max_points += m
return (points, max_points)
class Reporter:
FORMAT = {
'level': '{index:d}. {message}{points}:\n{body}',
'success': '* Success! {message}{points}',
'fail': '* Fail! {message}{points}',
'row': '* {message}{points}',
'points_wrap': ' ({:d}p)',
'separator': '\n',
}
def __init__(self):
pass
def format(self, logger):
res = []
for index, row in enumerate(logger.rows):
res.append(self.format_row(
logger.level, index + 1, row[0], row[1], row[2],
None if len(row) < 4 else str(row[3])
))
return (
self.wrap_level(logger.level, self.FORMAT['separator'].join(res))
+ '\n' + self.points_lines(logger)
)
def format_row(self, level, index, type, message, points, body):
key = type if type in self.FORMAT else 'row'
return self.wrap_row(level, index, self.FORMAT[key].format(
level=level, index=index, type=type,
message=message, points=self.wrap_points(points), body=body
))
def wrap_points(self, points):
return self.FORMAT['points_wrap'].format(points) if points > 0 else ''
def wrap_row(self, level, index, row):
return row
def wrap_level(self, level, body):
return body
def points_lines(self, logger):
return (
'TotalPoints: {:d}\nMaxPoints: {:d}\n'.format(*logger.points_total())
if logger.level == 0 else
''
)
class HtmlListReporter(Reporter):
FORMAT = {
'level': '<li class="check-level"><strong>{message}</strong>{points}\n{body}</li>',
'success': '<li class="check-success"><span class="text-success">✔</span> {message}{points}</li>',
'fail': '<li class="check-fail"><span class="text-danger">⨯</span> {message}{points}</li>',
'row': '<li class="check-message">{message}{points}</li>',
'level_wrap_numbered': '<ol>\n{}\n</ol>',
'level_wrap': '<ul>\n{}\n</ul>',
'points_wrap': ' ({:d}p)',
'separator': '\n',
}
def wrap_level(self, level, body):
return self.FORMAT['level_wrap_numbered' if level == 0 else 'level_wrap'].format(body)
def read_file(file_name):
import os
with open(os.path.join(os.getcwd(), file_name), 'r') as fp:
return fp.read()
def html_parse(text):
parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder("dom"), strict=True)
try:
return (parser.parse(text), tuple())
except:
return (
None,
('Line: {:d} Character: {:d} Error: {}'.format(
e[0][0], e[0][1], html5lib.constants.E[e[1]] % e[2]
) for e in parser.errors)
)
def html_node_text(node):
return ''.join(c.nodeValue for c in node.childNodes if c.nodeType == 3)
def html_cast_text(any):
if type(any) == Element:
return html_node_text(any)
return any
def html_has_text(node, text):
return ' '.join(html_node_text(node).split()) == text
def html_has_attributes(node, attrs):
for k,v in (attrs or {}).items():
if node.hasAttribute(k):
if v is False or (not v is True and node.getAttribute(k) != v):
return False
elif not v is False:
return False
return True
def html_find_children(node, name, attrs=None, recursion=False):
match = []
for i,child in enumerate(node.childNodes):
if child.localName == name and html_has_attributes(child, attrs):
match.append((i, child))
if recursion:
match.extend(html_find_children(child, name, attrs, recursion))
return match
def html_print_string(node, attrs=None):
if type(node) == Document:
return 'document root'
if type(node) == Element:
return html_print_string(
node.localName,
{ k: v.value for k,v in dict(node.attributes).items() }
)
parts = [node]
parts += ['{}="{}"'.format(k, v) for k,v in (attrs or {}).items() if not v is False and not v is True]
parts += ['{}'.format(k) for k,v in (attrs or {}).items() if v is True]
return '<' + ' '.join(parts) + '>'
def html_validate(logger, points, description_of_parse_location, text):
html, errors = html_parse(text)
if html:
logger.success(
'The {} contained proper HTML5 document, '
'e.g. all elements were recognized, '
'correctly closed '
'and having valid parent elements.'.format(
description_of_parse_location
),
points
)
return html
logger.fail(
'The {} did not contain a proper HTML5 document. '
'The possible reasons include unrecognized elements (tags), '
'failures to close an element with the corresponding ending </tag> '
'and elements that are located inside invalid parent element. '
'Below the raw output from the validator program is presented:\n'
'<ul>{}</ul>'.format(
description_of_parse_location,
'\n'.join('<li>{}</li>'.format(e) for e in errors)
),
points
)
return None
def html_require_child(logger, points, node, name, attrs=None, recursion=False, parent_name=None):
match = html_find_children(node, name, attrs, recursion)
tag_str = html_print_string(name, attrs)
parent_str = parent_name or html_print_string(node)
if len(match) > 1:
logger.fail('More than one {} found inside {}.'.format(tag_str, parent_str), points)
return None
elif len(match) == 1:
logger.success('Found {} inside {}.'.format(tag_str, parent_str), points)
return match[0][1]
logger.fail('No {} found inside {}.'.format(tag_str, parent_str), points)
return None
def html_require_path(logger, points, node, path):
element = node
for name, attrs in path:
element = html_require_child(logger, 0, element, name, attrs)
if not element:
logger.add_points(points, False)
return None
logger.add_points(points, True)
return element
def html_require_text(logger, points, node, text, parent_name=None):
parent_str = parent_name or html_print_string(node)
if html_has_text(node, text):
logger.success('Element {} has text "{}".'.format(parent_str, text), points)
return node
wrong = ' '.join(html_node_text(node).split())
logger.fail('Element {} has not text "{}" but "{}".'.format(parent_str, text, wrong), points)
return None
def html_require_attributes(logger, points, node, attrs, parent_name=None):
parent_str = parent_name or html_print_string(node)
result = True
for k,v in (attrs or {}).items():
if node.hasAttribute(k):
if v is True:
logger.success('Element {} has attribute {}.'.format(parent_str, k))
elif v is False:
logger.fail('Element {} has forbidden attribute {}.'.format(parent_str, k))
result = False
elif node.getAttribute(k) == v:
logger.success('Element {} has expected attribute {}="{}".'.format(parent_str, k, v))
else:
logger.fail(
'Element {} has attribute {}="{}" '
'but value "{}" was expected.'.format(
parent_str, k, escape(node.getAttribute(k)), v)
)
result = False
elif v is False:
logger.success('Element {} does not have forbidden attribute {}.'.format(parent_str, k))
else:
logger.fail('Element {} does not have expected attribute {}.'.format(parent_str, k))
result = False
logger.add_points(points, result)
return node if result else None
def css_parse(text_or_node):
parser = tinycss.make_parser('page3')
css = parser.parse_stylesheet(html_cast_text(text_or_node))
if len(css.errors) == 0:
return (css, tuple())
return (
None,
('Line: {:d} Character: {:d} Error: {}'.format(
e.line, e.column, e.reason
) for e in css.errors)
)
def css_find_rules(css, selectors):
return [rule for rule in css.rules if rule.selector.as_css() in selectors]
def css_find_declarations(rules, properties):
return [dec for rule in rules for dec in rule.declarations if dec.name in properties]
def css_validate(logger, points, description_of_parse_location, text_or_node):
css, errors = css_parse(text_or_node)
if css:
logger.success(
'The {} contains valid CSS stylesheet syntax, '
'e.g. all ruleset declarations are enclosed in curly brackets <code>{{}}</code>, '
'all rules have property name and value separated by <code>:</code>-character '
'and end with <code>;</code>-character.'.format(
description_of_parse_location
),
points
)
return css
logger.fail(
'The {} did not contain valid CSS stylesheet syntax. The possible reasons include '
'failures to enclose ruleset declarions in curly brackets <code>{{}}</code>, '
'rules that do not separate name and value by <code>:</code>-character '
'or do not end with <code>;</code>-character. '
'Below the raw output from the validator program is presented:\n'
'<ul>{}</ul>'.format(
description_of_parse_location,
'\n'.join('<li>{}</li>'.format(e) for e in errors)
),
points
)
return None
def css_require_rule(logger, points, css, selectors):
rules = css_find_rules(css, selectors)
select_str = ', '.join(selectors)
if len(rules) == 0:
logger.fail(
'No rules found for selectors "{}".'.format(select_str),
points
)
elif len(rules) == 1:
logger.success(
'A rule for selectors "{}" found.'.format(select_str),
points
)
else:
logger.success(
'Multiple rules for selectors "{}" found. '
'Last one is predominant.'.format(select_str),
points
)
return rules
def css_require_declarations(logger, points, rules, properties):
decs = css_find_declarations(rules, properties)
property_str = ', '.join(properties)
if len(decs) == 0:
logger.fail(
'No declarations found for properties "{}".'.format(property_str),
points
)
elif len(decs) == 1:
logger.success(
'A declaration for properties "{}" found.'.format(property_str),
points
)
else:
logger.success(
'Multiple declarations for properties "{}" found. '
'Last one is predominant.'.format(property_str),
points
)
return decs
def js_parse(text_or_node, module=False):
try:
js = (
esprima.parseScript(html_cast_text(text_or_node))
if not module else
esprima.parseModule(html_cast_text(text_or_node))
)
assert js.type == 'Program'
return (js, tuple())
except esprima.error_handler.Error as e:
return (None, [str(e)])
def js_validate(logger, points, description_of_parse_location, text_or_node, module=False):
js, errors = js_parse(text_or_node, module)
if js:
body = [s for s in js.body if s.type != 'EmptyStatement']
if len(body) == 0:
logger.fail(
'Empty JavaScript-code in {}.'.format(description_of_parse_location),
points
)
return None
logger.success(
'Validated JavaScript-code in {}.'.format(description_of_parse_location),
points
)
return body
logger.fail(
'Encountered syntax error while parsing the JavaScript-code in {}. '
'Note, that programming languages are picky and you need to write the commands precisely. '
'You should test your solution in browser and check that no errors appear in console panel. '
'Below the raw output from the parser program is presented:\n'
'<ul>{}</ul>'.format(
description_of_parse_location,
'\n'.join('<li>{}</li>'.format(e) for e in errors)
),
points
)
return None
def js_find_variables(js, name, recursion=False):
vars = []
for s in js:
if s.type == 'VariableDeclaration':
vars.extend(js_find_variables(s.declarations, name, recursion))
if s.type == 'VariableDeclarator' and s.id.type == 'Identifier' and s.id.name == name:
vars.append(s.init)
if recursion and hasattr(s, 'body'):
bs = s.body if type(s.body) == list else [s.body]
vars.extend(js_find_variables(bs, name, recursion))
return vars
def js_find_functions(js, name, recursion=False):
funcs = []
for s in js:
if s.type == 'FunctionDeclaration' and s.id.type == 'Identifier' and s.id.name == name:
funcs.append(s)
if recursion and hasattr(s, 'body'):
bs = s.body if type(s.body) == list else [s.body]
funcs.extend(js_find_functions(bs, name, recursion))
funcs.extend(s for s in js_find_variables(js, name, recursion) if s.type == 'FunctionExpression')
return funcs
def js_require_variable(logger, points, js, name, recursion=False):
vars = js_find_variables(js, name, recursion)
if len(vars) == 0:
logger.fail('No variables found for name "{}".'.format(name), points)
elif len(vars) == 1:
logger.success('A variable of name "{}" found.'.format(name), points)
else:
logger.success(
'Multiple variables for name "{}" found. '
'Last one is predominant.'.format(name),
points
)
return vars
def js_require_function(logger, points, js, name, recursion=False):
funcs = js_find_functions(js, name, recursion)
if len(funcs) == 0:
logger.fail('No functions found for name "{}".'.format(name), points)
elif len(funcs) == 1:
logger.success('A function of name "{}" found.'.format(name), points)
else:
logger.success(
'Multiple functions for name "{}" found. '
'Last one is predominant.'.format(name),
points
)
return funcs
# Command line interface:
def main(cmd, *arg):
logger = Logger(HtmlListReporter(), 1)
item = None
if cmd == 'html_parse' and len(arg) > 0:
item = html_validate(logger, 0, arg[0], read_file(arg[0]))
elif cmd == 'css_parse' and len(arg) > 0:
item = css_validate(logger, 0, arg[0], read_file(arg[0]))
elif cmd == 'js_parse' and len(arg) > 0:
item = js_validate(logger, 0, arg[0], read_file(arg[0]))
if item and len(arg) > 2:
if arg[1] == 'function':
if len(js_require_function(logger, 0, item, arg[2])) == 0:
item = None
elif arg[1] == 'variable':
if len(js_require_variable(logger, 0, item, arg[2])) == 0:
item = None
else:
logger.fail('Unknown command: {}'.format(cmd))
print(logger)
return not item is None
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('Usage: cmd arguments..')
print(' html_parse file_name')
print(' css_parse file_name')
print(' js_parse file_name [function|variable name]')
sys.exit(0)
ok = main(sys.argv[1], *sys.argv[2:])
sys.exit(0 if ok else 1)
| 35.9625
| 106
| 0.587939
|
1eeb430b7546a29f3f3580a0f36973e10eece483
| 167
|
py
|
Python
|
office_tracker/leave_tracker/admin.py
|
tanvir002700/tracker
|
567c3be2f36ac120fb412c06126cbd8fa72be4b9
|
[
"MIT"
] | null | null | null |
office_tracker/leave_tracker/admin.py
|
tanvir002700/tracker
|
567c3be2f36ac120fb412c06126cbd8fa72be4b9
|
[
"MIT"
] | 11
|
2020-06-05T18:04:42.000Z
|
2022-03-11T23:19:32.000Z
|
office_tracker/leave_tracker/admin.py
|
tanvir002700/tracker
|
567c3be2f36ac120fb412c06126cbd8fa72be4b9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Leave, Season, UserSeason
admin.site.register(Leave)
admin.site.register(Season)
admin.site.register(UserSeason)
| 23.857143
| 45
| 0.820359
|
b4760799a86cfa3cb9c5b7217ed34eaf3f8e2d94
| 367
|
py
|
Python
|
chat/signals.py
|
subsystemcoding/socialpixel_backend
|
648f9441370e9a536896ea5807a581b594a9db78
|
[
"MIT"
] | null | null | null |
chat/signals.py
|
subsystemcoding/socialpixel_backend
|
648f9441370e9a536896ea5807a581b594a9db78
|
[
"MIT"
] | null | null | null |
chat/signals.py
|
subsystemcoding/socialpixel_backend
|
648f9441370e9a536896ea5807a581b594a9db78
|
[
"MIT"
] | 3
|
2021-01-28T10:05:15.000Z
|
2021-03-20T18:21:34.000Z
|
from django.db.models.signals import post_save
from .models import ChatRoom, Message
from django.dispatch import receiver
@receiver(post_save, sender=Message)
def update_last_messaged_timestamp(sender, instance, **kwargs):
chatroom = ChatRoom.objects.get(id = instance.room.id)
chatroom.last_messaged_timestamp = instance.timestamp
chatroom.save()
| 30.583333
| 63
| 0.784741
|
e61c1e2f9a3ca87779d392d710b92721bf1308d0
| 6,349
|
py
|
Python
|
ax/modelbridge/transforms/stratified_standardize_y.py
|
EricZLou/Ax
|
3f8fc6f4a055e93cb69fda3799be41ee9572ef02
|
[
"MIT"
] | null | null | null |
ax/modelbridge/transforms/stratified_standardize_y.py
|
EricZLou/Ax
|
3f8fc6f4a055e93cb69fda3799be41ee9572ef02
|
[
"MIT"
] | null | null | null |
ax/modelbridge/transforms/stratified_standardize_y.py
|
EricZLou/Ax
|
3f8fc6f4a055e93cb69fda3799be41ee9572ef02
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from typing import TYPE_CHECKING, DefaultDict, List, Optional, Tuple
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.parameter import ChoiceParameter
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig, TParamValue
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.standardize_y import compute_standardization_parameters
from ax.utils.common.logger import get_logger
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
logger = get_logger("StratifiedStandardizeY")
class StratifiedStandardizeY(Transform):
"""Standardize Y, separately for each metric and for each value of a
ChoiceParameter.
The name of the parameter by which to stratify the standardization can be
specified in config["parameter_name"]. If not specified, will use a task
parameter if search space contains exactly 1 task parameter, and will raise
an exception otherwise.
The stratification parameter must be fixed during generation if there are
outcome constraints, in order to apply the standardization to the
constraints.
Transform is done in-place.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
config: Optional[TConfig] = None,
) -> None:
# Get parameter name for standardization.
if config is not None and "parameter_name" in config:
# pyre: Attribute `p_name` declared in class `ax.modelbridge.
# pyre: transforms.stratified_standardize_y.
# pyre: StratifiedStandardizeY` has type `str` but is used as type
# pyre-fixme[8]: `typing.Union[float, int, str]`.
self.p_name: str = config["parameter_name"]
strat_p = search_space.parameters[self.p_name]
if not isinstance(strat_p, ChoiceParameter):
raise ValueError(f"{self.p_name} not a ChoiceParameter")
else:
# See if there is a task parameter
task_parameters = [
p.name
for p in search_space.parameters.values()
if isinstance(p, ChoiceParameter) and p.is_task
]
if len(task_parameters) == 0:
raise ValueError(
"Must specify parameter for stratified standardization"
)
elif len(task_parameters) != 1:
raise ValueError(
"Must specify which task parameter to use for stratified "
"standardization"
)
self.p_name = task_parameters[0]
# Compute means and SDs
Ys: DefaultDict[Tuple[str, TParamValue], List[float]] = defaultdict(list)
for j, obsd in enumerate(observation_data):
v = observation_features[j].parameters[self.p_name]
for i, m in enumerate(obsd.metric_names):
Ys[(m, v)].append(obsd.means[i])
# Expected `DefaultDict[typing.Union[str, typing.Tuple[str,
# Optional[typing.Union[bool, float, str]]]], List[float]]` for 1st anonymous
# parameter to call
# `ax.modelbridge.transforms.standardize_y.compute_standardization_parameters`
# but got `DefaultDict[typing.Tuple[str, Optional[typing.Union[bool, float,
# str]]], List[float]]`.
# pyre-fixme[6]: Expected `DefaultDict[Union[str, Tuple[str, Optional[Union[b...
self.Ymean, self.Ystd = compute_standardization_parameters(Ys)
def transform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
# Transform observation data
for j, obsd in enumerate(observation_data):
v = observation_features[j].parameters[self.p_name]
means = np.array([self.Ymean[(m, v)] for m in obsd.metric_names])
stds = np.array([self.Ystd[(m, v)] for m in obsd.metric_names])
obsd.means = (obsd.means - means) / stds
obsd.covariance /= np.dot(stds[:, None], stds[:, None].transpose())
return observation_data
def transform_optimization_config(
self,
optimization_config: OptimizationConfig,
modelbridge: Optional["modelbridge_module.base.ModelBridge"],
fixed_features: ObservationFeatures,
) -> OptimizationConfig:
if len(optimization_config.outcome_constraints) == 0:
return optimization_config
if self.p_name not in fixed_features.parameters:
raise ValueError(
f"StratifiedStandardizeY transform requires {self.p_name} to be fixed "
"during generation."
)
v = fixed_features.parameters[self.p_name]
for c in optimization_config.outcome_constraints:
if c.relative:
raise ValueError(
"StratifiedStandardizeY transform does not support relative "
f"constraint {c}"
)
c.bound = (c.bound - self.Ymean[(c.metric.name, v)]) / self.Ystd[
(c.metric.name, v)
]
return optimization_config
def untransform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
for j, obsd in enumerate(observation_data):
v = observation_features[j].parameters[self.p_name]
means = np.array([self.Ymean[(m, v)] for m in obsd.metric_names])
stds = np.array([self.Ystd[(m, v)] for m in obsd.metric_names])
obsd.means = obsd.means * stds + means
obsd.covariance *= np.dot(stds[:, None], stds[:, None].transpose())
return observation_data
| 44.090278
| 88
| 0.653016
|
18b38355278e48cd97042dd28f0bb33437ecfe95
| 84
|
py
|
Python
|
belt - 1104/Helper/File/__init__.py
|
jackson-code/Delta
|
ff4c1df4dc75d9ff88025d37d5cd3216a5f353ff
|
[
"Unlicense"
] | null | null | null |
belt - 1104/Helper/File/__init__.py
|
jackson-code/Delta
|
ff4c1df4dc75d9ff88025d37d5cd3216a5f353ff
|
[
"Unlicense"
] | null | null | null |
belt - 1104/Helper/File/__init__.py
|
jackson-code/Delta
|
ff4c1df4dc75d9ff88025d37d5cd3216a5f353ff
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 10 21:51:43 2021
@author: user
"""
| 10.5
| 35
| 0.559524
|
b483d620c592123fa8aea9f2a322539ec3a25c69
| 187
|
py
|
Python
|
recommendation/admin.py
|
Zeble1603/cv-django
|
329d8d471c92dc0ce5f4bfb2bb5212fc1c8c34b4
|
[
"MIT"
] | 1
|
2021-10-19T21:22:38.000Z
|
2021-10-19T21:22:38.000Z
|
recommendation/admin.py
|
Zeble1603/cv-django
|
329d8d471c92dc0ce5f4bfb2bb5212fc1c8c34b4
|
[
"MIT"
] | null | null | null |
recommendation/admin.py
|
Zeble1603/cv-django
|
329d8d471c92dc0ce5f4bfb2bb5212fc1c8c34b4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.admin.decorators import register
from .models import Recommendation
# Register your models here.
admin.site.register(Recommendation)
| 26.714286
| 52
| 0.839572
|
435d0ffb24d74c90ebec2afa395514d31a850d55
| 6,209
|
py
|
Python
|
scripts/svi_gmm_tfp_original.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 2
|
2021-08-22T14:40:18.000Z
|
2021-12-07T02:46:00.000Z
|
scripts/svi_gmm_tfp_original.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 9
|
2021-03-31T20:18:21.000Z
|
2022-03-12T00:52:47.000Z
|
scripts/svi_gmm_tfp_original.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-06-21T01:18:07.000Z
|
2021-06-21T01:18:07.000Z
|
# SVI for a GMM
# https://github.com/brendanhasz/svi-gaussian-mixture-model/blob/master/BayesianGaussianMixtureModel.ipynb
# MIT License
#pip install tf-nightly
#pip install --upgrade tfp-nightly -q
# Imports
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from time import time
# Plot settings
#%config InlineBackend.figure_format = 'svg'
# Random seed
np.random.seed(12345)
tf.random.set_seed(12345)
# Generate some data
N = 3000
X = np.random.randn(N, 2).astype('float32')
X[:1000, :] += [2, 0]
X[1000:2000, :] -= [2, 4]
X[2000:, :] += [-2, 4]
# Plot the data
plt.plot(X[:, 0], X[:, 1], '.')
plt.axis('equal')
plt.show()
# Make a TensorFlow Dataset from that data
batch_size = 500
dataset = tf.data.Dataset.from_tensor_slices(
(X)).shuffle(10000).batch(batch_size)
class GaussianMixtureModel(tf.keras.Model):
"""A Bayesian Gaussian mixture model.
Assumes Gaussians' variances in each dimension are independent.
Parameters
----------
Nc : int > 0
Number of mixture components.
Nd : int > 0
Number of dimensions.
"""
def __init__(self, Nc, Nd):
# Initialize
super(GaussianMixtureModel, self).__init__()
self.Nc = Nc
self.Nd = Nd
# Variational distribution variables for means
self.locs = tf.Variable(tf.random.normal((Nc, Nd)))
self.scales = tf.Variable(tf.pow(tf.random.gamma((Nc, Nd), 5, 5), -0.5))
# Variational distribution variables for standard deviations
self.alpha = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
self.beta = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
# Variational distribution variables for component weights
self.counts = tf.Variable(2*tf.ones((Nc,)))
# Prior distributions for the means
self.mu_prior = tfd.Normal(tf.zeros((Nc, Nd)), tf.ones((Nc, Nd)))
# Prior distributions for the standard deviations
self.sigma_prior = tfd.Gamma(5*tf.ones((Nc, Nd)), 5*tf.ones((Nc, Nd)))
# Prior distributions for the component weights
self.theta_prior = tfd.Dirichlet(2*tf.ones((Nc,)))
def call(self, x, sampling=True, independent=True):
"""Compute losses given a batch of data.
Parameters
----------
x : tf.Tensor
A batch of data
sampling : bool
Whether to sample from the variational posterior
distributions (if True, the default), or just use the
mean of the variational distributions (if False).
Returns
-------
log_likelihoods : tf.Tensor
Log likelihood for each sample
kl_sum : tf.Tensor
Sum of the KL divergences between the variational
distributions and their priors
"""
# The variational distributions
mu = tfd.Normal(self.locs, self.scales)
sigma = tfd.Gamma(self.alpha, self.beta)
theta = tfd.Dirichlet(self.counts)
# Sample from the variational distributions
if sampling:
Nb = x.shape[0] #number of samples in the batch
mu_sample = mu.sample(Nb)
sigma_sample = tf.pow(sigma.sample(Nb), -0.5)
theta_sample = theta.sample(Nb)
else:
mu_sample = tf.reshape(mu.mean(), (1, self.Nc, self.Nd))
sigma_sample = tf.pow(tf.reshape(sigma.mean(), (1, self.Nc, self.Nd)), -0.5)
theta_sample = tf.reshape(theta.mean(), (1, self.Nc))
# The mixture density
density = tfd.Mixture(
cat=tfd.Categorical(probs=theta_sample),
components=[
tfd.MultivariateNormalDiag(loc=mu_sample[:, i, :],
scale_diag=sigma_sample[:, i, :])
for i in range(self.Nc)])
# Compute the mean log likelihood
log_likelihoods = density.log_prob(x)
# Compute the KL divergence sum
mu_div = tf.reduce_sum(tfd.kl_divergence(mu, self.mu_prior))
sigma_div = tf.reduce_sum(tfd.kl_divergence(sigma, self.sigma_prior))
theta_div = tf.reduce_sum(tfd.kl_divergence(theta, self.theta_prior))
kl_sum = mu_div + sigma_div + theta_div
# Return both losses
return log_likelihoods, kl_sum
# A GMM with 3 components in 2 dimensions
model = GaussianMixtureModel(3, 2)
# Use the Adam optimizer
optimizer = tf.keras.optimizers.Adam(lr=1e-3)
@tf.function
def train_step(data):
with tf.GradientTape() as tape:
log_likelihoods, kl_sum = model(data)
elbo_loss = kl_sum/N - tf.reduce_mean(log_likelihoods)
gradients = tape.gradient(elbo_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Fit the model
EPOCHS = 1000
time_start = time()
for epoch in range(EPOCHS):
for data in dataset:
train_step(data)
elapsed_time = (time() - time_start)
#print('method {}'.format(method))
print(elapsed_time)
# Compute log likelihood at each point on a grid
Np = 100 #number of grid points
Xp, Yp = np.meshgrid(np.linspace(-6, 6, Np), np.linspace(-6, 6, Np))
Pp = np.column_stack([Xp.flatten(), Yp.flatten()]).astype('float32')
Z, _ = model(Pp, sampling=False)
Z = np.reshape(Z, (Np, Np))
# Show the fit mixture density
plt.figure()
plt.imshow(np.exp(Z),
extent=(-6, 6, -6, 6),
origin='lower')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Likelihood')
model.locs
model.trainable_variables
# Sample from the std deviation variational posterior
stds = tf.pow(tfd.Gamma(model.alpha, model.beta).sample(10000), -0.5)
# Plot the samples
plt.figure()
sns.distplot(stds[:, 0, 0])
# Sample from the mean variational posterior
means = tfd.Normal(model.locs, model.scales).sample(10000)
# Plot the mean samples for a single
plt.figure()
sns.kdeplot(means[:, 0, 0].numpy(),
means[:, 0, 1].numpy(),
n_levels=10)
| 31.045
| 106
| 0.620229
|
475faa198f20b2e730c6757fe7316a4108afddf6
| 702
|
py
|
Python
|
E1a_hello_world.py
|
charles-stan/learn_python_Stanier
|
740a7104fcbd739663d703d3770f9e31509300f8
|
[
"MIT"
] | 2
|
2019-10-04T14:53:20.000Z
|
2019-10-29T18:16:15.000Z
|
E1a_hello_world.py
|
charles-stan/learn_python_Stanier
|
740a7104fcbd739663d703d3770f9e31509300f8
|
[
"MIT"
] | null | null | null |
E1a_hello_world.py
|
charles-stan/learn_python_Stanier
|
740a7104fcbd739663d703d3770f9e31509300f8
|
[
"MIT"
] | null | null | null |
"""
Hello World Python Script
Learning Example for Dr. Stanier's classes
File: E1a_hello_world.py
Author: Charles Stanier, charles-stanier@uiowa.edu
Date: August 9, 2019
Written/Tested In: Python 3.7.3
Program Objective: Get students to open a python editor, write and save a script, choose a location for saving scripts.
Modifications: none so far
"""
# normally we would start with importing libraries but this script does not require any
print("Hello World!")
# want to see documentation for print, see https://www.programiz.com/python-programming/methods/built-in/print
# choose a location to save your python scripts, and use the same location throughout the course
| 31.909091
| 120
| 0.75641
|
089a76f14513978314693848b0871f8ba8757cfa
| 1,147
|
py
|
Python
|
packages/wes_adapter/amazon_genomics/wes/adapters/util/util.py
|
elliot-smith/amazon-genomics-cli
|
371c5e2fb0f34c892839218b594380a7b67e81ab
|
[
"Apache-2.0"
] | 49
|
2021-09-27T04:12:15.000Z
|
2022-03-30T15:49:45.000Z
|
packages/wes_adapter/amazon_genomics/wes/adapters/util/util.py
|
elliot-smith/amazon-genomics-cli
|
371c5e2fb0f34c892839218b594380a7b67e81ab
|
[
"Apache-2.0"
] | 155
|
2021-09-27T03:57:28.000Z
|
2022-03-31T17:01:52.000Z
|
packages/wes_adapter/amazon_genomics/wes/adapters/util/util.py
|
elliot-smith/amazon-genomics-cli
|
371c5e2fb0f34c892839218b594380a7b67e81ab
|
[
"Apache-2.0"
] | 35
|
2021-09-27T16:12:10.000Z
|
2022-03-17T04:53:01.000Z
|
def describe_batch_jobs_with_tag(tag_key, tag_value, aws_batch, aws_tags):
"""
Retrieve descriptions of all Batch jobs with the given tag
"""
pagination_token = None
all_descriptions = []
get_resources_kwargs = {
"TagFilters": [{"Key": tag_key, "Values": [tag_value]}],
"ResourceTypeFilters": ["batch:job"],
}
while True:
if pagination_token:
get_resources_kwargs["PaginationToken"] = pagination_token
resources = aws_tags.get_resources(**get_resources_kwargs)
resource_tag_mappings = resources.get("ResourceTagMappingList", [])
job_arns = map(
lambda tag_mapping: tag_mapping["ResourceARN"], resource_tag_mappings
)
job_ids = list(map(job_id_from_arn, job_arns))
if job_ids:
descriptions = aws_batch.describe_jobs(jobs=job_ids)["jobs"]
all_descriptions += descriptions
pagination_token = resources.get("PaginationToken", None)
if not pagination_token:
return all_descriptions
def job_id_from_arn(job_arn: str) -> str:
return job_arn[job_arn.rindex("/") + 1 :]
| 38.233333
| 81
| 0.659111
|
c0bd46efc1fcb1835eba98979b5ad2d22583c213
| 1,476
|
py
|
Python
|
pype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
pype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
pype/hosts/maya/plugins/publish/validate_yeti_rig_input_in_instance.py
|
simonebarbieri/pype
|
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
|
[
"MIT"
] | null | null | null |
from maya import cmds
import pyblish.api
import pype.api
import pype.hosts.maya.api.action
class ValidateYetiRigInputShapesInInstance(pyblish.api.Validator):
"""Validate if all input nodes are part of the instance's hierarchy"""
order = pype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["yetiRig"]
label = "Yeti Rig Input Shapes In Instance"
actions = [pype.hosts.maya.api.action.SelectInvalidAction]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Yeti Rig has invalid input meshes")
@classmethod
def get_invalid(cls, instance):
input_set = next((i for i in instance if i == "input_SET"), None)
assert input_set, "Current %s instance has no `input_SET`" % instance
# Get all children, we do not care about intermediates
input_nodes = cmds.ls(cmds.sets(input_set, query=True), long=True)
dag = cmds.ls(input_nodes, dag=True, long=True)
shapes = cmds.ls(dag, long=True, shapes=True, noIntermediate=True)
# Allow publish without input meshes.
if not shapes:
cls.log.info("Found no input meshes for %s, skipping ..."
% instance)
return []
# check if input node is part of groomRig instance
instance_lookup = set(instance[:])
invalid = [s for s in shapes if s not in instance_lookup]
return invalid
| 32.8
| 77
| 0.651762
|
d33762966527038a974ba6064998f21701fcfe26
| 2,078
|
py
|
Python
|
experiment.py
|
embrace-inpe/cycle-slip-correction
|
c465dd4d45ea7df63a18749e26ba4bf0aa27eb59
|
[
"MIT"
] | 6
|
2019-05-20T21:23:41.000Z
|
2021-06-23T15:00:30.000Z
|
experiment.py
|
embrace-inpe/cycle-slip-correction
|
c465dd4d45ea7df63a18749e26ba4bf0aa27eb59
|
[
"MIT"
] | null | null | null |
experiment.py
|
embrace-inpe/cycle-slip-correction
|
c465dd4d45ea7df63a18749e26ba4bf0aa27eb59
|
[
"MIT"
] | 5
|
2018-12-27T16:46:45.000Z
|
2020-09-14T13:44:00.000Z
|
import numpy as np
import collections
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy.signal import find_peaks
array_inteiro = np.array([0, 1, 2, 3, np.nan, np.nan, np.nan, np.nan, 23, 24, 25, np.nan, np.nan, 26, 27, np.nan, np.nan, np.nan, np.nan, 57, 58])
array_inteiro_not_nan = array_inteiro[~np.isnan(array_inteiro)]
print(array_inteiro)
valid_pos = np.where(~np.isnan(array_inteiro))
valid_pos = np.array(valid_pos).flatten().tolist()
nan_pos = np.where(np.isnan(array_inteiro))
nan_pos = np.array(nan_pos).flatten().tolist()
# fourth_der_array_inteiro_not_nan = np.diff(array_inteiro_not_nan, n=1)
# indexes_not_nan = find_peaks(abs(fourth_der_array_inteiro_not_nan))[0]
# indexes_not_nan = np.array(indexes_not_nan)
#
# indexes_before = []
#
# for index in indexes_not_nan:
# element = array_inteiro_not_nan.item(index)
# pos_before = np.where(array_inteiro == element)
# pos_before = np.array(pos_before).flatten().tolist()
# indexes_before.append(pos_before[0])
# np_zeros = np.zeros(len(nan_pos))
# array_inteiro_not_nan = np.concatenate((array_inteiro_not_nan, np_zeros), axis=0)
# nan_pos = tuple((item, np.nan) for item in nan_pos)
# print(nan_pos)
array_inteiro_not_nan = np.insert(array_inteiro_not_nan, nan_pos, np.nan)
print(array_inteiro_not_nan)
# fig, axs = plt.subplots(3, 1)
#
# axs[0].plot(array_inteiro)
# axs[0].set_title('Teste')
# axs[0].set_ylabel('array')
# axs[0].grid(True)
#
# axs[1].plot(array_inteiro_not_nan)
# axs[1].set_ylabel('array_not_nan')
# axs[1].grid(True)
#
# axs[2].plot(fourth_der_array_inteiro_not_nan)
# axs[2].set_xlabel('Time')
# axs[2].set_ylabel('4th derivative')
# axs[2].grid(True)
#
# axs[1].scatter(indexes_not_nan, array_inteiro_not_nan[indexes_not_nan], marker='x', color='red', label='Cycle-slip')
# axs[2].scatter(indexes_not_nan, fourth_der_array_inteiro_not_nan[indexes_not_nan], marker='x', color='red', label='Cycle-slip')
#
# plt.savefig("Teste.pdf")
#
#
| 30.558824
| 146
| 0.743985
|
ed919957566b03dee959a18ea597dda8d2f4716c
| 51,651
|
py
|
Python
|
ironic/tests/unit/drivers/modules/ansible/test_deploy.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 350
|
2015-01-02T09:35:49.000Z
|
2022-03-28T09:25:59.000Z
|
ironic/tests/unit/drivers/modules/ansible/test_deploy.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 7
|
2015-05-04T16:12:41.000Z
|
2021-08-31T12:27:27.000Z
|
ironic/tests/unit/drivers/modules/ansible/test_deploy.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 333
|
2015-01-06T09:09:22.000Z
|
2022-02-20T08:11:40.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest import mock
from ironic_lib import utils as irlib_utils
from oslo_concurrency import processutils
from ironic.common import exception
from ironic.common import states
from ironic.common import utils as com_utils
from ironic.conductor import steps
from ironic.conductor import task_manager
from ironic.conductor import utils
from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules.network import flat as flat_network
from ironic.drivers.modules import pxe
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as object_utils
INSTANCE_INFO = {
'image_source': 'fake-image',
'image_url': 'http://image',
'image_checksum': 'checksum',
'image_disk_format': 'qcow2',
'root_mb': 5120,
'swap_mb': 0,
'ephemeral_mb': 0
}
DRIVER_INFO = {
'deploy_kernel': 'glance://deploy_kernel_uuid',
'deploy_ramdisk': 'glance://deploy_ramdisk_uuid',
'ansible_username': 'test',
'ansible_key_file': '/path/key',
'ipmi_address': '127.0.0.1',
}
DRIVER_INTERNAL_INFO = {
'is_whole_disk_image': True,
'clean_steps': []
}
class AnsibleDeployTestCaseBase(db_base.DbTestCase):
def setUp(self):
super(AnsibleDeployTestCaseBase, self).setUp()
self.config(enabled_hardware_types=['manual-management'],
enabled_deploy_interfaces=['ansible'],
enabled_power_interfaces=['fake'],
enabled_management_interfaces=['fake'])
node = {
'driver': 'manual-management',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **node)
class TestAnsibleMethods(AnsibleDeployTestCaseBase):
def test__parse_ansible_driver_info(self):
self.node.driver_info['ansible_deploy_playbook'] = 'spam.yaml'
playbook, user, key = ansible_deploy._parse_ansible_driver_info(
self.node, 'deploy')
self.assertEqual('spam.yaml', playbook)
self.assertEqual('test', user)
self.assertEqual('/path/key', key)
def test__parse_ansible_driver_info_defaults(self):
self.node.driver_info.pop('ansible_username')
self.node.driver_info.pop('ansible_key_file')
self.config(group='ansible',
default_username='spam',
default_key_file='/ham/eggs',
default_deploy_playbook='parrot.yaml')
playbook, user, key = ansible_deploy._parse_ansible_driver_info(
self.node, 'deploy')
# testing absolute path to the playbook
self.assertEqual('parrot.yaml', playbook)
self.assertEqual('spam', user)
self.assertEqual('/ham/eggs', key)
def test__parse_ansible_driver_info_no_playbook(self):
self.assertRaises(exception.IronicException,
ansible_deploy._parse_ansible_driver_info,
self.node, 'test')
def test__get_node_ip(self):
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://1.2.3.4:5678'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual('1.2.3.4',
ansible_deploy._get_node_ip(task))
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(group='ansible', verbosity=3)
self.config(group='ansible', ansible_extra_args='--timeout=100')
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook(self.node, 'deploy',
extra_vars, '/path/to/key',
tags=['spam'], notags=['ham'])
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
'/path/to/playbooks/inventory', '-e', '{"ironic": {"foo": "bar"}}',
'--tags=spam', '--skip-tags=ham',
'--private-key=/path/to/key', '-vvv', '--timeout=100')
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook_default_verbosity_nodebug(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(debug=False)
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook(self.node, 'deploy', extra_vars,
'/path/to/key')
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
'/path/to/playbooks/inventory', '-e', '{"ironic": {"foo": "bar"}}',
'--private-key=/path/to/key')
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook_default_verbosity_debug(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(debug=True)
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook(self.node, 'deploy', extra_vars,
'/path/to/key')
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
'/path/to/playbooks/inventory', '-e', '{"ironic": {"foo": "bar"}}',
'--private-key=/path/to/key', '-vvvv')
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook_ansible_interpreter_python3(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(group='ansible', verbosity=3)
self.config(group='ansible',
default_python_interpreter='/usr/bin/python3')
self.config(group='ansible', ansible_extra_args='--timeout=100')
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook(self.node, 'deploy',
extra_vars, '/path/to/key',
tags=['spam'], notags=['ham'])
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
'/path/to/playbooks/inventory', '-e',
mock.ANY, '--tags=spam', '--skip-tags=ham',
'--private-key=/path/to/key', '-vvv', '--timeout=100')
all_vars = execute_mock.call_args[0][7]
self.assertEqual({"ansible_python_interpreter": "/usr/bin/python3",
"ironic": {"foo": "bar"}},
json.loads(all_vars))
@mock.patch.object(com_utils, 'execute', return_value=('out', 'err'),
autospec=True)
def test__run_playbook_ansible_interpreter_override(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(group='ansible', verbosity=3)
self.config(group='ansible',
default_python_interpreter='/usr/bin/python3')
self.config(group='ansible', ansible_extra_args='--timeout=100')
self.node.driver_info['ansible_python_interpreter'] = (
'/usr/bin/python4')
extra_vars = {'foo': 'bar'}
ansible_deploy._run_playbook(self.node, 'deploy',
extra_vars, '/path/to/key',
tags=['spam'], notags=['ham'])
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
'/path/to/playbooks/inventory', '-e',
mock.ANY, '--tags=spam', '--skip-tags=ham',
'--private-key=/path/to/key', '-vvv', '--timeout=100')
all_vars = execute_mock.call_args[0][7]
self.assertEqual({"ansible_python_interpreter": "/usr/bin/python4",
"ironic": {"foo": "bar"}},
json.loads(all_vars))
@mock.patch.object(com_utils, 'execute',
side_effect=processutils.ProcessExecutionError(
description='VIKINGS!'),
autospec=True)
def test__run_playbook_fail(self, execute_mock):
self.config(group='ansible', playbooks_path='/path/to/playbooks')
self.config(group='ansible', config_file_path='/path/to/config')
self.config(debug=False)
extra_vars = {'foo': 'bar'}
exc = self.assertRaises(exception.InstanceDeployFailure,
ansible_deploy._run_playbook,
self.node, 'deploy', extra_vars,
'/path/to/key')
self.assertIn('VIKINGS!', str(exc))
execute_mock.assert_called_once_with(
'env', 'ANSIBLE_CONFIG=/path/to/config',
'ansible-playbook', '/path/to/playbooks/deploy', '-i',
'/path/to/playbooks/inventory', '-e', '{"ironic": {"foo": "bar"}}',
'--private-key=/path/to/key')
def test__parse_partitioning_info_root_msdos(self):
self.config(default_boot_mode='bios', group='deploy')
expected_info = {
'partition_info': {
'label': 'msdos',
'partitions': {
'root':
{'number': 1,
'part_start': '1MiB',
'part_end': '5121MiB',
'flags': ['boot']}
}}}
i_info = ansible_deploy._parse_partitioning_info(self.node)
self.assertEqual(expected_info, i_info)
def test__parse_partitioning_info_all_gpt(self):
in_info = dict(INSTANCE_INFO)
in_info['swap_mb'] = 128
in_info['ephemeral_mb'] = 256
in_info['ephemeral_format'] = 'ext4'
in_info['preserve_ephemeral'] = True
in_info['configdrive'] = 'some-fake-user-data'
in_info['capabilities'] = {'disk_label': 'gpt'}
self.node.instance_info = in_info
self.node.save()
expected_info = {
'partition_info': {
'label': 'gpt',
'ephemeral_format': 'ext4',
'preserve_ephemeral': 'yes',
'partitions': {
'bios':
{'number': 1,
'name': 'bios',
'part_start': '1MiB',
'part_end': '2MiB',
'flags': ['bios_grub']},
'ephemeral':
{'number': 2,
'part_start': '2MiB',
'part_end': '258MiB',
'name': 'ephemeral'},
'swap':
{'number': 3,
'part_start': '258MiB',
'part_end': '386MiB',
'name': 'swap'},
'configdrive':
{'number': 4,
'part_start': '386MiB',
'part_end': '450MiB',
'name': 'configdrive'},
'root':
{'number': 5,
'part_start': '450MiB',
'part_end': '5570MiB',
'name': 'root'}
}}}
i_info = ansible_deploy._parse_partitioning_info(self.node)
self.assertEqual(expected_info, i_info)
@mock.patch.object(ansible_deploy.images, 'download_size', autospec=True)
def test__calculate_memory_req(self, image_mock):
self.config(group='ansible', extra_memory=1)
image_mock.return_value = 2000000 # < 2MiB
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(2, ansible_deploy._calculate_memory_req(task))
image_mock.assert_called_once_with(task.context, 'fake-image')
def test__get_python_interpreter(self):
self.config(group='ansible',
default_python_interpreter='/usr/bin/python3')
self.node.driver_info['ansible_python_interpreter'] = (
'/usr/bin/python4')
python_interpreter = ansible_deploy._get_python_interpreter(self.node)
self.assertEqual('/usr/bin/python4', python_interpreter)
def test__get_configdrive_path(self):
self.config(tempdir='/path/to/tmpdir')
self.assertEqual('/path/to/tmpdir/spam.cndrive',
ansible_deploy._get_configdrive_path('spam'))
def test__prepare_extra_vars(self):
host_list = [('fake-uuid', '1.2.3.4', 'spam', 'ham'),
('other-uuid', '5.6.7.8', 'eggs', 'vikings')]
ansible_vars = {"foo": "bar"}
self.assertEqual(
{"nodes": [
{"name": "fake-uuid", "ip": '1.2.3.4',
"user": "spam", "extra": "ham"},
{"name": "other-uuid", "ip": '5.6.7.8',
"user": "eggs", "extra": "vikings"}],
"foo": "bar"},
ansible_deploy._prepare_extra_vars(host_list, ansible_vars))
def test__parse_root_device_hints(self):
hints = {"wwn": "fake wwn", "size": "12345", "rotational": True,
"serial": "HELLO"}
expected = {"wwn": "fake wwn", "size": 12345, "rotational": True,
"serial": "hello"}
props = self.node.properties
props['root_device'] = hints
self.node.properties = props
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
expected, ansible_deploy._parse_root_device_hints(task.node))
def test__parse_root_device_hints_iinfo(self):
hints = {"wwn": "fake wwn", "size": "12345", "rotational": True,
"serial": "HELLO"}
expected = {"wwn": "fake wwn", "size": 12345, "rotational": True,
"serial": "hello"}
iinfo = self.node.instance_info
iinfo['root_device'] = hints
self.node.instance_info = iinfo
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
expected, ansible_deploy._parse_root_device_hints(task.node))
def test__parse_root_device_hints_override(self):
hints = {"wwn": "fake wwn", "size": "12345", "rotational": True,
"serial": "HELLO"}
expected = {"wwn": "fake wwn", "size": 12345, "rotational": True,
"serial": "hello"}
props = self.node.properties
props['root_device'] = {'size': 'no idea'}
self.node.properties = props
iinfo = self.node.instance_info
iinfo['root_device'] = hints
self.node.instance_info = iinfo
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
expected, ansible_deploy._parse_root_device_hints(task.node))
def test__parse_root_device_hints_fail_advanced(self):
hints = {"wwn": "s!= fake wwn",
"size": ">= 12345",
"name": "<or> spam <or> ham",
"rotational": True}
expected = {"wwn": "s!= fake%20wwn",
"name": "<or> spam <or> ham",
"size": ">= 12345"}
props = self.node.properties
props['root_device'] = hints
self.node.properties = props
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
exc = self.assertRaises(
exception.InvalidParameterValue,
ansible_deploy._parse_root_device_hints, task.node)
for key, value in expected.items():
self.assertIn(str(key), str(exc))
self.assertIn(str(value), str(exc))
def test__prepare_variables(self):
i_info = self.node.instance_info
i_info['image_mem_req'] = 3000
i_info['image_whatever'] = 'hello'
self.node.instance_info = i_info
self.node.save()
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"mem_req": 3000,
"disk_format": "qcow2",
"checksum": "md5:checksum",
"whatever": "hello"}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
def test__prepare_variables_root_device_hints(self):
props = self.node.properties
props['root_device'] = {"wwn": "fake-wwn"}
self.node.properties = props
self.node.save()
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"disk_format": "qcow2",
"checksum": "md5:checksum"},
"root_device_hints": {"wwn": "fake-wwn"}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
def test__prepare_variables_insecure_activated(self):
self.config(image_store_insecure=True, group='ansible')
i_info = self.node.instance_info
i_info['image_checksum'] = 'sha256:checksum'
self.node.instance_info = i_info
self.node.save()
expected = {"image": {"url": "http://image",
"validate_certs": "no",
"source": "fake-image",
"disk_format": "qcow2",
"checksum": "sha256:checksum"}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
def test__prepare_variables_configdrive_url(self):
i_info = self.node.instance_info
i_info['configdrive'] = 'http://configdrive_url'
self.node.instance_info = i_info
self.node.save()
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"disk_format": "qcow2",
"checksum": "md5:checksum"},
'configdrive': {'type': 'url',
'location': 'http://configdrive_url'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
def test__prepare_variables_configdrive_file(self):
i_info = self.node.instance_info
i_info['configdrive'] = 'fake-content'
self.node.instance_info = i_info
self.node.save()
configdrive_path = ('%(tempdir)s/%(node)s.cndrive' %
{'tempdir': ansible_deploy.CONF.tempdir,
'node': self.node.uuid})
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"disk_format": "qcow2",
"checksum": "md5:checksum"},
'configdrive': {'type': 'file',
'location': configdrive_path}}
with mock.patch.object(ansible_deploy, 'open', mock.mock_open(),
create=True) as open_mock:
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
open_mock.assert_has_calls((
mock.call(configdrive_path, 'w'),
mock.call().__enter__(),
mock.call().write('fake-content'),
mock.call().__exit__(None, None, None)))
@mock.patch.object(utils, 'build_configdrive', autospec=True)
def test__prepare_variables_configdrive_json(self, mock_build_configdrive):
i_info = self.node.instance_info
i_info['configdrive'] = {'meta_data': {}}
self.node.instance_info = i_info
self.node.save()
mock_build_configdrive.return_value = 'fake-content'
configdrive_path = ('%(tempdir)s/%(node)s.cndrive' %
{'tempdir': ansible_deploy.CONF.tempdir,
'node': self.node.uuid})
expected = {"image": {"url": "http://image",
"validate_certs": "yes",
"source": "fake-image",
"disk_format": "qcow2",
"checksum": "md5:checksum"},
'configdrive': {'type': 'file',
'location': configdrive_path}}
with mock.patch.object(ansible_deploy, 'open', mock.mock_open(),
create=True) as open_mock:
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected,
ansible_deploy._prepare_variables(task))
mock_build_configdrive.assert_called_once_with(
task.node, {'meta_data': {}})
open_mock.assert_has_calls((
mock.call(configdrive_path, 'w'),
mock.call().__enter__(),
mock.call().write('fake-content'),
mock.call().__exit__(None, None, None)))
def test__validate_clean_steps(self):
steps = [{"interface": "deploy",
"name": "foo",
"args": {"spam": {"required": True, "value": "ham"}}},
{"name": "bar",
"interface": "deploy"}]
self.assertIsNone(ansible_deploy._validate_clean_steps(
steps, self.node.uuid))
def test__validate_clean_steps_missing(self):
steps = [{"name": "foo",
"interface": "deploy",
"args": {"spam": {"value": "ham"},
"ham": {"required": True}}},
{"name": "bar"},
{"interface": "deploy"}]
exc = self.assertRaises(exception.NodeCleaningFailure,
ansible_deploy._validate_clean_steps,
steps, self.node.uuid)
self.assertIn("name foo, field ham.value", str(exc))
self.assertIn("name bar, field interface", str(exc))
self.assertIn("name undefined, field name", str(exc))
def test__validate_clean_steps_names_not_unique(self):
steps = [{"name": "foo",
"interface": "deploy"},
{"name": "foo",
"interface": "deploy"}]
exc = self.assertRaises(exception.NodeCleaningFailure,
ansible_deploy._validate_clean_steps,
steps, self.node.uuid)
self.assertIn("unique names", str(exc))
@mock.patch.object(ansible_deploy.yaml, 'safe_load', autospec=True)
def test__get_clean_steps(self, load_mock):
steps = [{"interface": "deploy",
"name": "foo",
"args": {"spam": {"required": True, "value": "ham"}}},
{"name": "bar",
"interface": "deploy",
"priority": 100}]
load_mock.return_value = steps
expected = [{"interface": "deploy",
"step": "foo",
"priority": 10,
"abortable": False,
"argsinfo": {"spam": {"required": True}},
"args": {"spam": "ham"}},
{"interface": "deploy",
"step": "bar",
"priority": 100,
"abortable": False,
"argsinfo": {},
"args": {}}]
d_info = self.node.driver_info
d_info['ansible_clean_steps_config'] = 'custom_clean'
self.node.driver_info = d_info
self.node.save()
self.config(group='ansible', playbooks_path='/path/to/playbooks')
with mock.patch.object(ansible_deploy, 'open', mock.mock_open(),
create=True) as open_mock:
self.assertEqual(
expected,
ansible_deploy._get_clean_steps(
self.node, interface="deploy",
override_priorities={"foo": 10}))
open_mock.assert_has_calls((
mock.call('/path/to/playbooks/custom_clean'),))
load_mock.assert_called_once_with(
open_mock().__enter__.return_value)
class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
def setUp(self):
super(TestAnsibleDeploy, self).setUp()
self.driver = ansible_deploy.AnsibleDeploy()
def test_get_properties(self):
self.assertEqual(
set(list(ansible_deploy.COMMON_PROPERTIES)
+ ['agent_verify_ca', 'deploy_forces_oob_reboot']),
set(self.driver.get_properties()))
@mock.patch.object(deploy_utils, 'check_for_missing_params',
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate(self, pxe_boot_validate_mock, check_params_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
check_params_mock.assert_called_once_with(
{'instance_info.image_source': INSTANCE_INFO['image_source']},
mock.ANY)
@mock.patch.object(deploy_utils, 'get_boot_option',
return_value='netboot', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_not_iwdi_netboot(self, pxe_boot_validate_mock,
get_boot_mock):
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
get_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(ansible_deploy, '_calculate_memory_req', autospec=True,
return_value=2000)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_deploy(self, power_mock, mem_req_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYWAIT)
power_mock.assert_called_once_with(task, states.REBOOT)
mem_req_mock.assert_called_once_with(task)
i_info = task.node.instance_info
self.assertEqual(i_info['image_mem_req'], 2000)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_tear_down(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.'
'build_instance_info_for_deploy',
return_value={'test': 'test'}, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare(self, pxe_prepare_ramdisk_mock,
build_instance_info_mock, build_options_mock,
power_action_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
with mock.patch.object(task.driver.network,
'add_provisioning_network',
autospec=True) as net_mock:
self.driver.prepare(task)
net_mock.assert_called_once_with(task)
power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
task.driver.boot, task, {'op1': 'test1'})
self.node.refresh()
self.assertEqual('test', self.node.instance_info['test'])
@mock.patch.object(ansible_deploy, '_get_configdrive_path',
return_value='/path/test', autospec=True)
@mock.patch.object(irlib_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_clean_up(self, pxe_clean_up_mock, unlink_mock,
get_cfdrive_path_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.clean_up(task)
pxe_clean_up_mock.assert_called_once_with(task.driver.boot, task)
get_cfdrive_path_mock.assert_called_once_with(self.node['uuid'])
unlink_mock.assert_called_once_with('/path/test')
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
def test_get_clean_steps(self, get_clean_steps_mock):
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'},
{'priority': 99, 'interface': 'deploy',
'step': 'erase_devices_metadata'},
]
get_clean_steps_mock.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
get_clean_steps_mock.assert_called_once_with(
task.node, interface='deploy',
override_priorities={
'erase_devices': None,
'erase_devices_metadata': None})
self.assertEqual(mock_steps, steps)
@mock.patch.object(ansible_deploy, '_get_clean_steps', autospec=True)
def test_get_clean_steps_priority(self, mock_get_clean_steps):
self.config(erase_devices_priority=9, group='deploy')
self.config(erase_devices_metadata_priority=98, group='deploy')
mock_steps = [{'priority': 9, 'interface': 'deploy',
'step': 'erase_devices'},
{'priority': 98, 'interface': 'deploy',
'step': 'erase_devices_metadata'},
]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task.node, interface='deploy',
override_priorities={'erase_devices': 9,
'erase_devices_metadata': 98})
self.assertEqual(mock_steps, steps)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
def test_execute_clean_step(self, parse_driver_info_mock,
prepare_extra_mock, run_playbook_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'args': {'tags': ['clean']}}
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'], '127.0.0.1', 'test_u', {})]}
prepare_extra_mock.return_value = ironic_nodes
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://127.0.0.1'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.execute_clean_step(task, step)
parse_driver_info_mock.assert_called_once_with(
task.node, action='clean')
prepare_extra_mock.assert_called_once_with(
ironic_nodes['ironic_nodes'])
run_playbook_mock.assert_called_once_with(
task.node, 'test_pl', ironic_nodes, 'test_k', tags=['clean'])
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, 'LOG', autospec=True)
def test_execute_clean_step_no_success_log(
self, log_mock, run_mock, parse_driver_info_mock):
run_mock.side_effect = exception.InstanceDeployFailure('Boom')
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'args': {'tags': ['clean']}}
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://127.0.0.1'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InstanceDeployFailure,
self.driver.execute_clean_step,
task, step)
self.assertFalse(log_mock.info.called)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(steps, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare_cleaning(
self, prepare_ramdisk_mock, buid_options_mock, power_action_mock,
set_node_cleaning_steps, run_playbook_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'tags': ['clean']}
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['clean_steps'] = [step]
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.add_cleaning_network = mock.Mock()
state = self.driver.prepare_cleaning(task)
set_node_cleaning_steps.assert_called_once_with(task)
task.driver.network.add_cleaning_network.assert_called_once_with(
task)
buid_options_mock.assert_called_once_with(task.node)
prepare_ramdisk_mock.assert_called_once_with(
task.driver.boot, task, {'op1': 'test1'})
power_action_mock.assert_called_once_with(task, states.REBOOT)
self.assertFalse(run_playbook_mock.called)
self.assertEqual(states.CLEANWAIT, state)
@mock.patch.object(steps, 'set_node_cleaning_steps', autospec=True)
def test_prepare_cleaning_callback_no_steps(self,
set_node_cleaning_steps):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.add_cleaning_network = mock.Mock()
self.driver.prepare_cleaning(task)
set_node_cleaning_steps.assert_called_once_with(task)
self.assertFalse(task.driver.network.add_cleaning_network.called)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_tear_down_cleaning(self, clean_ramdisk_mock, power_action_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.remove_cleaning_network = mock.Mock()
self.driver.tear_down_cleaning(task)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
clean_ramdisk_mock.assert_called_once_with(task.driver.boot, task)
(task.driver.network.remove_cleaning_network
.assert_called_once_with(task))
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
def test__ansible_deploy(self, prepare_vars_mock, parse_part_info_mock,
parse_dr_info_mock, prepare_extra_mock,
run_playbook_mock):
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'], '127.0.0.1', 'test_u')]}
prepare_extra_mock.return_value = ironic_nodes
_vars = {
'url': 'image_url',
'checksum': 'aa'}
prepare_vars_mock.return_value = _vars
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.extra = {'ham': 'spam'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver._ansible_deploy(task, '127.0.0.1')
prepare_vars_mock.assert_called_once_with(task)
parse_part_info_mock.assert_called_once_with(task.node)
parse_dr_info_mock.assert_called_once_with(task.node)
prepare_extra_mock.assert_called_once_with(
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
variables=_vars)
run_playbook_mock.assert_called_once_with(
task.node, 'test_pl', ironic_nodes, 'test_k')
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_extra_vars', autospec=True)
@mock.patch.object(ansible_deploy, '_parse_ansible_driver_info',
return_value=('test_pl', 'test_u', 'test_k'),
autospec=True)
@mock.patch.object(ansible_deploy, '_parse_partitioning_info',
autospec=True)
@mock.patch.object(ansible_deploy, '_prepare_variables', autospec=True)
def test__ansible_deploy_iwdi(self, prepare_vars_mock,
parse_part_info_mock, parse_dr_info_mock,
prepare_extra_mock, run_playbook_mock):
ironic_nodes = {
'ironic_nodes': [(self.node['uuid'], '127.0.0.1', 'test_u')]}
prepare_extra_mock.return_value = ironic_nodes
_vars = {
'url': 'image_url',
'checksum': 'aa'}
prepare_vars_mock.return_value = _vars
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = True
instance_info = self.node.instance_info
del instance_info['root_mb']
self.node.driver_internal_info = driver_internal_info
self.node.instance_info = instance_info
self.node.extra = {'ham': 'spam'}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver._ansible_deploy(task, '127.0.0.1')
prepare_vars_mock.assert_called_once_with(task)
self.assertFalse(parse_part_info_mock.called)
parse_dr_info_mock.assert_called_once_with(task.node)
prepare_extra_mock.assert_called_once_with(
[(self.node['uuid'], '127.0.0.1', 'test_u', {'ham': 'spam'})],
variables=_vars)
run_playbook_mock.assert_called_once_with(
task.node, 'test_pl', ironic_nodes, 'test_k')
@mock.patch.object(fake.FakePower, 'get_power_state',
return_value=states.POWER_OFF, autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_tear_down_agent_force_reboot(
self, power_action_mock, get_pow_state_mock):
d_info = self.node.driver_info
d_info['deploy_forces_oob_reboot'] = True
self.node.driver_info = d_info
self.node.save()
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
self.node.provision_state = states.DEPLOYING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.tear_down_agent(task)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
get_pow_state_mock.assert_not_called()
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
def test_tear_down_agent_soft_poweroff_retry(
self, power_action_mock, run_playbook_mock):
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
self.config(group='ansible',
post_deploy_get_power_state_retries=1)
self.node.provision_state = states.DEPLOYING
di_info = self.node.driver_internal_info
di_info['agent_url'] = 'http://127.0.0.1'
self.node.driver_internal_info = di_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(task.driver.power,
'get_power_state',
return_value=states.POWER_ON,
autospec=True) as p_mock:
self.driver.tear_down_agent(task)
p_mock.assert_called_with(task)
self.assertEqual(2, len(p_mock.mock_calls))
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
run_playbook_mock.assert_called_once_with(
task.node, 'shutdown.yaml', mock.ANY, mock.ANY)
@mock.patch.object(utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(ansible_deploy, '_get_node_ip', autospec=True,
return_value='1.2.3.4')
def test_write_image(self, getip_mock, bootdev_mock):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.multiple(self.driver, autospec=True,
_ansible_deploy=mock.DEFAULT,
reboot_to_instance=mock.DEFAULT):
result = self.driver.write_image(task)
self.assertIsNone(result)
getip_mock.assert_called_once_with(task)
self.driver._ansible_deploy.assert_called_once_with(
task, '1.2.3.4')
bootdev_mock.assert_called_once_with(task, 'disk',
persistent=True)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertEqual(states.DEPLOYING, task.node.provision_state)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
autospec=True)
@mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
@mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare_with_smartnic_port(
self, pxe_prepare_ramdisk_mock,
build_instance_info_mock, build_options_mock,
power_action_mock, power_on_node_if_needed_mock,
restore_power_state_mock, net_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
build_instance_info_mock.return_value = {'test': 'test'}
build_options_mock.return_value = {'op1': 'test1'}
power_on_node_if_needed_mock.return_value = states.POWER_OFF
self.driver.prepare(task)
power_action_mock.assert_called_once_with(
task, states.POWER_OFF)
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
task.driver.boot, task, {'op1': 'test1'})
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
self.node.refresh()
self.assertEqual('test', self.node.instance_info['test'])
@mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
@mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(steps, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare_cleaning_with_smartnic_port(
self, prepare_ramdisk_mock, build_options_mock, power_action_mock,
set_node_cleaning_steps, run_playbook_mock,
power_on_node_if_needed_mock, restore_power_state_mock):
step = {'priority': 10, 'interface': 'deploy',
'step': 'erase_devices', 'tags': ['clean']}
driver_internal_info = dict(DRIVER_INTERNAL_INFO)
driver_internal_info['clean_steps'] = [step]
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.add_cleaning_network = mock.Mock()
build_options_mock.return_value = {'op1': 'test1'}
power_on_node_if_needed_mock.return_value = states.POWER_OFF
state = self.driver.prepare_cleaning(task)
set_node_cleaning_steps.assert_called_once_with(task)
task.driver.network.add_cleaning_network.assert_called_once_with(
task)
build_options_mock.assert_called_once_with(task.node)
prepare_ramdisk_mock.assert_called_once_with(
task.driver.boot, task, {'op1': 'test1'})
power_action_mock.assert_called_once_with(task, states.REBOOT)
self.assertFalse(run_playbook_mock.called)
self.assertEqual(states.CLEANWAIT, state)
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
@mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
@mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_tear_down_cleaning_with_smartnic_port(
self, clean_ramdisk_mock, power_action_mock,
power_on_node_if_needed_mock, restore_power_state_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.remove_cleaning_network = mock.Mock()
power_on_node_if_needed_mock.return_value = states.POWER_OFF
self.driver.tear_down_cleaning(task)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
clean_ramdisk_mock.assert_called_once_with(task.driver.boot, task)
(task.driver.network.remove_cleaning_network
.assert_called_once_with(task))
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
| 47.869323
| 79
| 0.5966
|
fcb4a9ccd8bc722e4954d75eaaae82b373451ac2
| 2,202
|
py
|
Python
|
fscognitive/controllers/led_controller.py
|
anhhoangiot/people_recognition_pi
|
92ceaebdef775a42023760360689d473662cb361
|
[
"MIT"
] | null | null | null |
fscognitive/controllers/led_controller.py
|
anhhoangiot/people_recognition_pi
|
92ceaebdef775a42023760360689d473662cb361
|
[
"MIT"
] | null | null | null |
fscognitive/controllers/led_controller.py
|
anhhoangiot/people_recognition_pi
|
92ceaebdef775a42023760360689d473662cb361
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-10-08
# @Author : Anh Hoang (anhhoang.work.mail@gmail.com)
# @Project : FSCognitive
# @Version : 1.0
import socket
from thread import *
import urllib2
import json
class LEDController(object):
HOST = ''
PORT = 8888
def __init__(self):
super(LEDController, self).__init__()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created!'
def start(self):
try:
self.socket.bind((HOST, PORT))
except socket.error as message:
print 'Bind failed. Error Code : ' + str(message[0])
print 'Socket bind complete!'
self.socket.listen(10)
print 'Socket now listening!'
while True:
connection, address = self.socket.accept()
print 'Connected with ' + address[0] + ':' + str(address[1])
start_new_thread(receiveCommandThread, (connection,))
self.socket.close()
def __receiveCommandThread(self, connection):
connection.send('authorized')
while True:
try:
command = connection.recv(1024)
if not command:
break
print 'Received command: ' + command
self.__processReceivedCommand(command)
except Exception, e:
print 'Connection error: %s' % e
break
connection.close()
def __processReceivedCommand(self, command):
if command:
status = 'off'
if command == '1':
print 'Turning light on'
status = 'on'
else:
print 'Turning light off'
status = 'off'
self.__sendLedStatusToCloud(status)
def __sendLedStatusToCloud(self, status):
print 'Sending led status ' + status
data = {
'status': status,
'name': 'LED 1'
}
request = urllib2.Request('http://pociot.azurewebsites.net/')
request.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(request, json.dumps(data))
print response
| 30.164384
| 72
| 0.559491
|
fc5044545f8394b6b60265d0beab24c2289d0595
| 125
|
py
|
Python
|
Foresite/upload_csv/admin.py
|
khoamb/Foresite
|
97b155452d92fe1c487e7cbeffbc867604a1e726
|
[
"MIT"
] | null | null | null |
Foresite/upload_csv/admin.py
|
khoamb/Foresite
|
97b155452d92fe1c487e7cbeffbc867604a1e726
|
[
"MIT"
] | 6
|
2018-11-29T23:25:16.000Z
|
2018-11-30T01:17:33.000Z
|
Foresite/upload_csv/admin.py
|
PricelessAntonio/Foresite
|
4eec1ab5bf588b1ef6ec176a612bc62e8d55b424
|
[
"MIT"
] | 3
|
2018-09-05T18:57:03.000Z
|
2020-03-22T02:19:58.000Z
|
from django.contrib import admin
from .models import CsvUpload
# Register your models here.
admin.site.register(CsvUpload)
| 17.857143
| 32
| 0.808
|
294e59aeb3225bb55f4148fd3d3f3f0fcb347729
| 18,697
|
py
|
Python
|
janitor/finance.py
|
VPerrollaz/pyjanitor
|
2fb6fdc139349a4219b41f57c9cef8e37a965ee6
|
[
"MIT"
] | 2
|
2020-09-06T22:11:01.000Z
|
2022-03-19T23:57:24.000Z
|
janitor/finance.py
|
VPerrollaz/pyjanitor
|
2fb6fdc139349a4219b41f57c9cef8e37a965ee6
|
[
"MIT"
] | null | null | null |
janitor/finance.py
|
VPerrollaz/pyjanitor
|
2fb6fdc139349a4219b41f57c9cef8e37a965ee6
|
[
"MIT"
] | null | null | null |
"""
Finance-specific data cleaning functions.
"""
import json
from datetime import date, datetime
from functools import lru_cache
from typing import Optional
import pandas as pd
import pandas_flavor as pf
import requests
from janitor import check
from .utils import deprecated_alias
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
# Dictionary of recognized World Bank countries and their abbreviations
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values() # noqa: PD011
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
if year < 1960:
raise ValueError("year value must be 1960 or later")
@lru_cache(maxsize=32)
def _convert_currency(
from_currency: str = None,
to_currency: str = None,
historical_date: Optional[date] = None,
) -> float:
"""
Currency conversion for Pandas DataFrame column.
Helper function for `convert_currency` method.
The API used is: https://exchangeratesapi.io/
"""
url = "https://api.exchangeratesapi.io"
if historical_date:
check("historical_date", historical_date, [datetime, date])
if isinstance(historical_date, datetime):
if historical_date < datetime(1999, 1, 4):
raise ValueError(
"historical_date:datetime must be later than 1999-01-04!"
)
string_date = str(historical_date)[:10]
else:
if historical_date < date(1999, 1, 4):
raise ValueError(
"historical_date:date must be later than 1999-01-04!"
)
string_date = str(historical_date)
url = url + "/%s" % string_date
else:
url = url + "/latest"
_check_currency(from_currency)
_check_currency(to_currency)
payload = {"base": from_currency, "symbols": to_currency}
result = requests.get(url, params=payload)
if result.status_code != 200:
raise ConnectionError(
"Exchange Rate API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
currency_dict = json.loads(result.text)
rate = currency_dict["rates"][to_currency]
return rate
@pf.register_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Converts a column from one currency to another, with an option to
convert based on historical exchange values.
This method mutates the original DataFrame.
:param df: A pandas dataframe.
:param column_name: Name of the new column. Should be a string, in order
for the column name to be compatible with the Feather binary
format (this is a useful thing to have).
:param from_currency: The base currency to convert from.
May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
"CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
"ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
"PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
"ZAR"}
:param to_currency: The target currency to convert to.
May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
"CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
"ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
"PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
"ZAR"}
:param historical_date: If supplied, get exchange rate on a certain\
date. If not supplied, get the latest exchange rate. The exchange\
rates go back to Jan. 4, 1999.
:param make_new_column: Generates new column for converted currency if
True, otherwise, converts currency in place.
:Setup:
.. code-block:: python
import pandas as pd
import janitor
from datetime import date
data_dict = {
"a": [1.23452345, 2.456234, 3.2346125] * 3,
"Bell__Chart": [1/3, 2/7, 3/2] * 3,
"decorated-elephant": [1/234, 2/13, 3/167] * 3,
"animals": ["rabbit", "leopard", "lion"] * 3,
"cities": ["Cambridge", "Shanghai", "Basel"] * 3,
}
example_dataframe = pd.DataFrame(data_dict)
:Example: Converting a column from one currency to another using rates
from 01/01/2018:
.. code-block:: python
example_dataframe.convert_currency('a', from_currency='USD',
to_currency='EUR', historical_date=date(2018,1,1))
:Output:
.. code-block:: python
a Bell__Chart decorated-elephant animals cities
0 1.029370 0.333333 0.004274 rabbit Cambridge
1 2.048056 0.285714 0.153846 leopard Shanghai
2 2.697084 1.500000 0.017964 lion Basel
3 1.029370 0.333333 0.004274 rabbit Cambridge
4 2.048056 0.285714 0.153846 leopard Shanghai
5 2.697084 1.500000 0.017964 lion Basel
6 1.029370 0.333333 0.004274 rabbit Cambridge
7 2.048056 0.285714 0.153846 leopard Shanghai
8 2.697084 1.500000 0.017964 lion Basel
"""
rate = _convert_currency(from_currency, to_currency, historical_date)
if make_new_column:
new_column_name = column_name + "_" + to_currency
df[new_column_name] = df[column_name] * rate
else:
df[column_name] = df[column_name] * rate
return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
"""
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
"""
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
list of economies:
https://databank.worldbank.org/data/download/site-content/CLASS.xls.
This method mutates the original DataFrame.
Functional usage example:
.. code-block:: python
import pandas as pd
import janitor.finance
df = pd.DataFrame(...)
df = janitor.finance.inflate_currency(
df=df,
column_name='profit',
country='USA',
currency_year=2015,
to_year=2018,
make_new_column=True
)
Method chaining usage example:
.. code-block:: python
import pandas as pd
import janitor.finance
df = pd.DataFrame(...)
df = df.inflate_currency(
column_name='profit',
country='USA',
currency_year=2015,
to_year=2018,
make_new_column=True
)
:param df: A pandas dataframe.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank list of economies:
https://databank.worldbank.org/data/download/site-content/CLASS.xls.
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
"""
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
| 28.545038
| 107
| 0.569503
|
4292aa1dd4e7a4fe5c049e274355eba1b82c26ba
| 703
|
py
|
Python
|
tests/example_classes.py
|
Einenlum/prophepy
|
9bddcf9f579d1ff4037978a5669587221cc8e21d
|
[
"MIT"
] | null | null | null |
tests/example_classes.py
|
Einenlum/prophepy
|
9bddcf9f579d1ff4037978a5669587221cc8e21d
|
[
"MIT"
] | null | null | null |
tests/example_classes.py
|
Einenlum/prophepy
|
9bddcf9f579d1ff4037978a5669587221cc8e21d
|
[
"MIT"
] | null | null | null |
class Calculator:
def __init__(self, name, **kwargs):
self.name = name
self.values = kwargs
def multiply(self, *args):
product = 1
for arg in args:
product = product * arg
return product
def add(self, *args):
return sum(args)
class Displayer:
def __init__(self, calculator: Calculator):
self.calculator = calculator
def display_addition(self, *args) -> str:
'''
If called with (3, 5) will return '3 + 5 = {sum given by the
calculator}'
'''
total = str(self.calculator.add(*args))
args = [str(arg) for arg in args]
return f"{' + '.join(args)} = {total}"
| 24.241379
| 68
| 0.550498
|
2f86d1b3e9819bbd5189ae6f98cc31abe6ac7215
| 1,015
|
py
|
Python
|
src/titles/migrations/0007_auto_20171213_1143.py
|
kierrez/movie-website
|
74f4ed018aba545dec190b70d62abe0ac6085462
|
[
"MIT"
] | 1
|
2019-03-02T20:06:16.000Z
|
2019-03-02T20:06:16.000Z
|
src/titles/migrations/0007_auto_20171213_1143.py
|
kierrez/movie-website
|
74f4ed018aba545dec190b70d62abe0ac6085462
|
[
"MIT"
] | 1
|
2022-01-07T22:57:41.000Z
|
2022-01-07T22:57:41.000Z
|
src/titles/migrations/0007_auto_20171213_1143.py
|
kierrez/movie-website
|
74f4ed018aba545dec190b70d62abe0ac6085462
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 10:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('titles', '0006_auto_20171212_1613'),
]
operations = [
migrations.CreateModel(
name='NowPlaying',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('update_date', models.DateField(unique=True)),
('titles', models.ManyToManyField(blank=True, related_name='nowplaying', to='titles.Title')),
],
options={
'ordering': ('-titles__release_date',),
},
),
migrations.AlterField(
model_name='crewtitle',
name='job',
field=models.IntegerField(blank=True, choices=[(0, 'Director'), (1, 'Screenplay'), (2, 'Creator')], null=True),
),
]
| 31.71875
| 123
| 0.570443
|
d8aa1792a1ac802fdb5fd758989198fa8059f618
| 2,638
|
py
|
Python
|
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0119.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0119.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0119.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : omm用户检查ip_local_port_range设置范围
Description :
1.由root用户切换到omm用户下
2.在非本地模式下检查: gs_check -i CheckSysPortRange
3.在本地模式下检查: gs_check -i CheckSysPortRange -L
Expect :
1.切换到omm用户下
2.非本地模式下检查完成
3.本地模式下执行检查完成
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Tools(unittest.TestCase):
def setUp(self):
logger.info('--------------Opengauss_Function_Tools_gs_check_Case0119start-------------------')
self.dbuserNode = Node('dbuser')
self.Constant = Constant()
def test_server_tools(self):
logger.info('------------------omm用户在非本地模式下检查ip_local_port_range设置范围------------------')
check_cmd1 = f'''
source {macro.DB_ENV_PATH}
gs_check -i CheckSysPortRange
'''
logger.info(check_cmd1)
msg1 = self.dbuserNode.sh(check_cmd1).result()
logger.info(msg1)
flag = (self.Constant.GS_CHECK_SUCCESS_MSG2[0] in msg1 or self.Constant.GS_CHECK_SUCCESS_MSG2[1] in msg1) and \
self.Constant.GS_CHECK_SUCCESS_MSG2[2] in msg1
self.assertTrue(flag)
logger.info('------------------omm用户在本地模式下检查ip_local_port_range设置范围------------------')
check_cmd2 = f'''
source {macro.DB_ENV_PATH}
gs_check -i CheckSysPortRange -L
'''
logger.info(check_cmd2)
msg2 = self.dbuserNode.sh(check_cmd2).result()
logger.info(msg2)
check_result_flag = False
for single_msg in self.Constant.GS_CHECK_SUCCESS_MSG1:
if single_msg in msg2:
check_result_flag = True
self.assertTrue(check_result_flag)
def tearDown(self):
logger.info('--------------无需清理环境-------------------')
logger.info('------------------Opengauss_Function_Tools_gs_check_Case0119finish------------------')
| 34.710526
| 119
| 0.615618
|
6be222a6604c89db3e8adbb7337cf5b66bec3127
| 380
|
py
|
Python
|
AlgoPy/SelectionSort.py
|
PasinduSan/Hello-world
|
0c3c976b94dceccc2ac1b83e036a721e68873495
|
[
"MIT"
] | 1
|
2018-12-25T14:02:08.000Z
|
2018-12-25T14:02:08.000Z
|
AlgoPy/SelectionSort.py
|
PasinduSan/Hello-world
|
0c3c976b94dceccc2ac1b83e036a721e68873495
|
[
"MIT"
] | null | null | null |
AlgoPy/SelectionSort.py
|
PasinduSan/Hello-world
|
0c3c976b94dceccc2ac1b83e036a721e68873495
|
[
"MIT"
] | null | null | null |
def selectionSort(nlist):
for fillslot in range(len(nlist)-1,0,-1):
maxpos=0
for location in range(1,fillslot+1):
if nlist[location]>nlist[maxpos]:
maxpos = location
temp = nlist[fillslot]
nlist[fillslot] = nlist[maxpos]
nlist[maxpos] = temp
nlist = [14,46,43,27,57,41,45,21,70]
selectionSort(nlist)
print(nlist)
| 23.75
| 44
| 0.610526
|
d28333127f17b972d3c2895fa9d48f1bec3e027a
| 14,051
|
py
|
Python
|
sdk/python/pulumi_google_native/compute/beta/get_organization_security_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/compute/beta/get_organization_security_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/compute/beta/get_organization_security_policy.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetOrganizationSecurityPolicyResult',
'AwaitableGetOrganizationSecurityPolicyResult',
'get_organization_security_policy',
'get_organization_security_policy_output',
]
@pulumi.output_type
class GetOrganizationSecurityPolicyResult:
def __init__(__self__, adaptive_protection_config=None, advanced_options_config=None, associations=None, creation_timestamp=None, description=None, display_name=None, fingerprint=None, kind=None, label_fingerprint=None, labels=None, name=None, parent=None, recaptcha_options_config=None, rule_tuple_count=None, rules=None, self_link=None, self_link_with_id=None, type=None):
if adaptive_protection_config and not isinstance(adaptive_protection_config, dict):
raise TypeError("Expected argument 'adaptive_protection_config' to be a dict")
pulumi.set(__self__, "adaptive_protection_config", adaptive_protection_config)
if advanced_options_config and not isinstance(advanced_options_config, dict):
raise TypeError("Expected argument 'advanced_options_config' to be a dict")
pulumi.set(__self__, "advanced_options_config", advanced_options_config)
if associations and not isinstance(associations, list):
raise TypeError("Expected argument 'associations' to be a list")
pulumi.set(__self__, "associations", associations)
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if fingerprint and not isinstance(fingerprint, str):
raise TypeError("Expected argument 'fingerprint' to be a str")
pulumi.set(__self__, "fingerprint", fingerprint)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if label_fingerprint and not isinstance(label_fingerprint, str):
raise TypeError("Expected argument 'label_fingerprint' to be a str")
pulumi.set(__self__, "label_fingerprint", label_fingerprint)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parent and not isinstance(parent, str):
raise TypeError("Expected argument 'parent' to be a str")
pulumi.set(__self__, "parent", parent)
if recaptcha_options_config and not isinstance(recaptcha_options_config, dict):
raise TypeError("Expected argument 'recaptcha_options_config' to be a dict")
pulumi.set(__self__, "recaptcha_options_config", recaptcha_options_config)
if rule_tuple_count and not isinstance(rule_tuple_count, int):
raise TypeError("Expected argument 'rule_tuple_count' to be a int")
pulumi.set(__self__, "rule_tuple_count", rule_tuple_count)
if rules and not isinstance(rules, list):
raise TypeError("Expected argument 'rules' to be a list")
pulumi.set(__self__, "rules", rules)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if self_link_with_id and not isinstance(self_link_with_id, str):
raise TypeError("Expected argument 'self_link_with_id' to be a str")
pulumi.set(__self__, "self_link_with_id", self_link_with_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="adaptiveProtectionConfig")
def adaptive_protection_config(self) -> 'outputs.SecurityPolicyAdaptiveProtectionConfigResponse':
return pulumi.get(self, "adaptive_protection_config")
@property
@pulumi.getter(name="advancedOptionsConfig")
def advanced_options_config(self) -> 'outputs.SecurityPolicyAdvancedOptionsConfigResponse':
return pulumi.get(self, "advanced_options_config")
@property
@pulumi.getter
def associations(self) -> Sequence['outputs.SecurityPolicyAssociationResponse']:
"""
A list of associations that belong to this policy.
"""
return pulumi.get(self, "associations")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
User-provided name of the Organization security plicy. The name should be unique in the organization in which the security policy is created. This should only be used when SecurityPolicyType is FIREWALL. The name must be 1-63 characters long, and comply with https://www.ietf.org/rfc/rfc1035.txt. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def fingerprint(self) -> str:
"""
Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make get() request to the security policy.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter
def kind(self) -> str:
"""
[Output only] Type of the resource. Always compute#securityPolicyfor security policies
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> str:
"""
A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy.
"""
return pulumi.get(self, "label_fingerprint")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> str:
"""
The parent of the security policy.
"""
return pulumi.get(self, "parent")
@property
@pulumi.getter(name="recaptchaOptionsConfig")
def recaptcha_options_config(self) -> 'outputs.SecurityPolicyRecaptchaOptionsConfigResponse':
return pulumi.get(self, "recaptcha_options_config")
@property
@pulumi.getter(name="ruleTupleCount")
def rule_tuple_count(self) -> int:
"""
Total count of all security policy rule tuples. A security policy can not exceed a set number of tuples.
"""
return pulumi.get(self, "rule_tuple_count")
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.SecurityPolicyRuleResponse']:
"""
A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match "*"). If no rules are provided when creating a security policy, a default rule with action "allow" will be added.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="selfLinkWithId")
def self_link_with_id(self) -> str:
"""
Server-defined URL for this resource with the resource id.
"""
return pulumi.get(self, "self_link_with_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type indicates the intended use of the security policy. CLOUD_ARMOR - Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache.
"""
return pulumi.get(self, "type")
class AwaitableGetOrganizationSecurityPolicyResult(GetOrganizationSecurityPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOrganizationSecurityPolicyResult(
adaptive_protection_config=self.adaptive_protection_config,
advanced_options_config=self.advanced_options_config,
associations=self.associations,
creation_timestamp=self.creation_timestamp,
description=self.description,
display_name=self.display_name,
fingerprint=self.fingerprint,
kind=self.kind,
label_fingerprint=self.label_fingerprint,
labels=self.labels,
name=self.name,
parent=self.parent,
recaptcha_options_config=self.recaptcha_options_config,
rule_tuple_count=self.rule_tuple_count,
rules=self.rules,
self_link=self.self_link,
self_link_with_id=self.self_link_with_id,
type=self.type)
def get_organization_security_policy(security_policy: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrganizationSecurityPolicyResult:
"""
List all of the ordered rules present in a single specified policy.
"""
__args__ = dict()
__args__['securityPolicy'] = security_policy
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:compute/beta:getOrganizationSecurityPolicy', __args__, opts=opts, typ=GetOrganizationSecurityPolicyResult).value
return AwaitableGetOrganizationSecurityPolicyResult(
adaptive_protection_config=__ret__.adaptive_protection_config,
advanced_options_config=__ret__.advanced_options_config,
associations=__ret__.associations,
creation_timestamp=__ret__.creation_timestamp,
description=__ret__.description,
display_name=__ret__.display_name,
fingerprint=__ret__.fingerprint,
kind=__ret__.kind,
label_fingerprint=__ret__.label_fingerprint,
labels=__ret__.labels,
name=__ret__.name,
parent=__ret__.parent,
recaptcha_options_config=__ret__.recaptcha_options_config,
rule_tuple_count=__ret__.rule_tuple_count,
rules=__ret__.rules,
self_link=__ret__.self_link,
self_link_with_id=__ret__.self_link_with_id,
type=__ret__.type)
@_utilities.lift_output_func(get_organization_security_policy)
def get_organization_security_policy_output(security_policy: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOrganizationSecurityPolicyResult]:
"""
List all of the ordered rules present in a single specified policy.
"""
...
| 50.182143
| 602
| 0.700306
|
3d5d8918e8192c21262d92fb4146aaad66dc5546
| 9,427
|
py
|
Python
|
immersive_scaler/ui.py
|
kant/immersive_scaler
|
cd7dd0c0866ffeb6f9866c7bdaf9113d4b31e85e
|
[
"MIT"
] | null | null | null |
immersive_scaler/ui.py
|
kant/immersive_scaler
|
cd7dd0c0866ffeb6f9866c7bdaf9113d4b31e85e
|
[
"MIT"
] | null | null | null |
immersive_scaler/ui.py
|
kant/immersive_scaler
|
cd7dd0c0866ffeb6f9866c7bdaf9113d4b31e85e
|
[
"MIT"
] | null | null | null |
import bpy
from bpy.props import BoolProperty, EnumProperty, FloatProperty, IntProperty, CollectionProperty
from bpy.types import Scene
def make_annotations(cls):
bl_props = {k: v for k, v in cls.__dict__.items() if isinstance(v, tuple)}
if bl_props:
if '__annotations__' not in cls.__dict__:
setattr(cls, '__annotations__', {})
annotations = cls.__dict__['__annotations__']
for k, v in bl_props.items():
annotations[k] = v
delattr(cls, k)
return cls
def set_properties():
Scene.target_height = FloatProperty(
name = "Target Height",
description = "Desired height of the highest vertex in the model. If Scale to Eyes is set, Desired Eye Height",
default = 1.61,
step = 0.01,
precision = 3,
soft_min = 0,
soft_max = 3,
subtype = 'DISTANCE'
)
Scene.arm_to_legs = FloatProperty(
name = "Leg/Arm Scaling",
description = "What percentage of the needed rescaling should be done to the legs. Remaining scaling is done on the arms",
default = 55,
step = 1,
precision = 3,
soft_min = 0,
soft_max = 100,
subtype = 'PERCENTAGE'
)
Scene.arm_thickness = FloatProperty(
name = "Arm Thickness",
description = "How much arm thickness should be kept or added when scaling",
default = 50,
step = 1,
precision = 3,
soft_min = 0,
soft_max = 100,
subtype = 'PERCENTAGE'
)
Scene.leg_thickness = FloatProperty(
name = "Leg Thickness",
description = "How much leg thickness should be kept or added when scaling",
default = 50,
step = 1,
precision = 3,
soft_min = 0,
soft_max = 100,
subtype = 'PERCENTAGE'
)
Scene.extra_leg_length = FloatProperty(
name = "Extra Leg Length",
description = "How far beneath the real floor should the model's legs go - how far below the real floor should the vrchat floor be. This is calculated before scaling so the",
default = 0,
step = 0.01,
precision = 3,
soft_min = -1,
soft_max = 1,
subtype = 'DISTANCE'
)
Scene.thigh_percentage = FloatProperty(
name = "Upper Leg Percent",
description = "Percentage of the distance from the hips to the heel that should be taken up by the upper leg",
default = 53,
step = 1,
precision = 3,
soft_min = 10,
soft_max = 90,
subtype = 'PERCENTAGE'
)
Scene.scale_hand = BoolProperty(
name = "Scale hand",
description = "Toggle for scaling the hand with the arm",
default = False
)
Scene.scale_foot = BoolProperty(
name = "Scale foot",
description = "Toggle for scaling the foot with the leg",
default = False
)
Scene.center_model = BoolProperty(
name = "Center Model",
description = "Toggle for centering the model on x,y = 0,0",
default = False
)
Scene.debug_no_scale = BoolProperty(
name = "Skip Height Scaling",
description = "Toggle for the final scaling phase",
default = False
)
Scene.debug_no_floor = BoolProperty(
name = "Skip move to floor",
description = "Toggle for the scaling phase",
default = False
)
Scene.debug_no_adjust = BoolProperty(
name = "Skip Main Rescale",
description = "Toggle for the first adjustment phase",
default = False
)
Scene.scale_eyes = BoolProperty(
name = "Scale to Eyes",
description = "Target height targets eyes instead of the highest vertex",
default = False
)
# Finger spreading
Scene.spare_thumb = BoolProperty(
name = "Ignore thumb",
description = "Toggle if the thumb should be adjusted in addition to the body",
default = True
)
Scene.spread_factor = FloatProperty(
name = "Spread Factor",
description = "Value showing how much fingers should be rotated. 1 is default, and will cause the finger bone to point directly away from the head of the wrist bone.",
default = 1,
step = .1,
precision = 2,
soft_min = 0,
soft_max = 2,
subtype = 'FACTOR'
)
# UI options
bpy.types.Scene.imscale_show_customize = bpy.props.BoolProperty(name='Show customize panel', default=False)
bpy.types.Scene.imscale_show_sf_custom = bpy.props.BoolProperty(name='Show customize panel', default=False)
bpy.types.Scene.imscale_show_debug = bpy.props.BoolProperty(name='Show debug panel', default=False)
class ImmersiveScalerMenu(bpy.types.Panel):
bl_label = 'Immersive Scaler Menu'
bl_idname = "VIEW3D_PT_imscale"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "IMScale"
def draw(self, context):
scn = context.scene
layout = self.layout
box = layout.box()
col=box.column(align=True)
col.label(text="Avatar Rescale")
# Armature Rescale
split = col.row(align=True)
row = split.row(align=True)
row.prop(bpy.context.scene, 'target_height', expand=True)
row = split.row(align=True)
row.alignment = 'RIGHT'
row.operator("armature.get_avatar_height", text="", icon="EMPTY_SINGLE_ARROW")
row = col.row(align=True)
row.prop(bpy.context.scene, 'arm_to_legs', expand=True)
# row = col.row(align=True)
# row.prop(bpy.context.scene, 'scale_hand', expand=True)
# row = col.row(align=True)
# row.prop(bpy.context.scene, 'scale_foot', expand=True)
# Customization options
row = col.row(align=False)
if scn.imscale_show_customize:
row.prop(scn, "imscale_show_customize", icon="DOWNARROW_HLT", text="", emboss=False)
else:
row.prop(scn, "imscale_show_customize", icon="RIGHTARROW", text="", emboss=False)
row.label(text="Customization")
if scn.imscale_show_customize:
row = col.row(align=True)
row.prop(bpy.context.scene, 'arm_thickness', expand=True)
row = col.row(align=True)
row.prop(bpy.context.scene, 'leg_thickness', expand=True)
row = col.row(align=True)
row.prop(bpy.context.scene, 'thigh_percentage', expand=True)
row = col.row(align=True)
row.prop(bpy.context.scene, 'extra_leg_length', expand=True)
row = col.row(align=True)
row.prop(bpy.context.scene, 'scale_eyes', expand=True)
# Debug/section toggle options
row = col.row(align=False)
if scn.imscale_show_debug:
row.prop(scn, "imscale_show_debug", icon="DOWNARROW_HLT", text="", emboss=False)
else:
row.prop(scn, "imscale_show_debug", icon="RIGHTARROW", text="", emboss=False)
row.label(text="Core functionality toggle")
if scn.imscale_show_debug:
row = col.row(align=True)
row.prop(bpy.context.scene, 'debug_no_adjust', expand=True)
row = col.row(align=True)
row.prop(bpy.context.scene, 'debug_no_floor', expand=True)
row = col.row(align=True)
row.prop(bpy.context.scene, 'debug_no_scale', expand=True)
row = col.row(align=True)
row.label(text="-------------")
row = col.row(align=True)
row.prop(bpy.context.scene, 'center_model', expand=True)
row = col.row(align=True)
row.scale_y=1.1
op = row.operator("armature.rescale", text="Rescale Armature")
# Spread Fingers
box = layout.box()
col=box.column(align=True)
col.label(text="Finger Spreading")
row = col.row(align=False)
if scn.imscale_show_sf_custom:
row.prop(scn, "imscale_show_sf_custom", icon="DOWNARROW_HLT", text="", emboss=False)
else:
row.prop(scn, "imscale_show_sf_custom", icon="RIGHTARROW", text="", emboss=False)
row.label(text="Customization")
if scn.imscale_show_sf_custom:
row = col.row(align=True)
row.prop(context.scene, 'spare_thumb')
row = col.row(align=False)
row.prop(context.scene, 'spread_factor')
row = col.row(align=True)
row.label(text="-------------")
row.scale_y=1.1
row = col.row(align=False)
row.operator("armature.spreadfingers", text="Spread Fingers")
# Shrink Hip
box = layout.box()
col=box.column(align=True)
col.label(text="Hip fix (beta)")
row.scale_y=1.1
row = col.row(align=True)
row.operator("armature.shrink_hips", text="Shrink Hip bone")
return None
def ui_register():
set_properties()
make_annotations(ImmersiveScalerMenu)
bpy.utils.register_class(ImmersiveScalerMenu)
def ui_unregister():
bpy.utils.unregister_class(ImmersiveScalerMenu)
if __name__ == "__main__":
register()
| 34.032491
| 183
| 0.587462
|
7f14b7d30035c55885f1ccc262c5b29a351d191c
| 16,545
|
py
|
Python
|
trestle/core/commands/author/ssp.py
|
guyzyl/compliance-trestle
|
b6fa6f5d8bfdb52e0a82fc0accd63c11d04d9afc
|
[
"Apache-2.0"
] | 1
|
2022-01-07T01:11:03.000Z
|
2022-01-07T01:11:03.000Z
|
trestle/core/commands/author/ssp.py
|
guyzyl/compliance-trestle
|
b6fa6f5d8bfdb52e0a82fc0accd63c11d04d9afc
|
[
"Apache-2.0"
] | null | null | null |
trestle/core/commands/author/ssp.py
|
guyzyl/compliance-trestle
|
b6fa6f5d8bfdb52e0a82fc0accd63c11d04d9afc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create ssp from catalog and profile."""
import argparse
import logging
import pathlib
import traceback
from typing import Dict, List, Set
from ruamel.yaml import YAML
from ruamel.yaml.error import YAMLError
import trestle.core.generators as gens
import trestle.oscal.common as com
import trestle.oscal.profile as prof
import trestle.oscal.ssp as ossp
from trestle.core import const, err
from trestle.core.catalog_interface import CatalogInterface
from trestle.core.commands.author.common import AuthorCommonCommand
from trestle.core.commands.common.return_codes import CmdReturnCodes
from trestle.core.profile_resolver import ProfileResolver
from trestle.core.utils import as_list, none_if_empty
from trestle.core.validator_helper import regenerate_uuids
from trestle.utils import fs, log
logger = logging.getLogger(__name__)
class SSPGenerate(AuthorCommonCommand):
"""Generate SSP in markdown form from a Profile."""
name = 'ssp-generate'
def _init_arguments(self) -> None:
file_help_str = 'Name of the profile model in the trestle workspace'
self.add_argument('-p', '--profile', help=file_help_str, required=True, type=str)
self.add_argument('-o', '--output', help=const.HELP_MARKDOWN_NAME, required=True, type=str)
self.add_argument('-y', '--yaml-header', help=const.HELP_YAML_PATH, required=False, type=str)
self.add_argument(
'-phv',
'--preserve-header-values',
help=const.HELP_PRESERVE_HEADER_VALUES,
required=False,
action='store_true',
default=False
)
sections_help_str = (
'Comma separated list of section:alias pairs for sections to output.' + ' Otherwises defaults to all.'
)
self.add_argument('-s', '--sections', help=sections_help_str, required=False, type=str)
@staticmethod
def _sections_from_args(args: argparse.Namespace) -> Dict[str, str]:
sections = {}
if args.sections is not None:
section_tuples = args.sections.strip("'").split(',')
for section in section_tuples:
if ':' in section:
s = section.split(':')
sections[s[0].strip()] = s[1].strip()
else:
sections[section] = section
if 'statement' in sections.keys():
raise err.TrestleError('"statement" sections are not allowed ')
return sections
def _run(self, args: argparse.Namespace) -> int:
log.set_log_level_from_args(args)
trestle_root = args.trestle_root
if not fs.allowed_task_name(args.output):
logger.warning(f'{args.output} is not an allowed directory name')
return CmdReturnCodes.COMMAND_ERROR.value
profile_path = trestle_root / f'profiles/{args.profile}/profile.json'
yaml_header: dict = {}
if 'yaml_header' in args and args.yaml_header is not None:
try:
logging.debug(f'Loading yaml header file {args.yaml_header}')
yaml = YAML()
yaml_header = yaml.load(pathlib.Path(args.yaml_header).open('r'))
except YAMLError as e:
logging.warning(f'YAML error loading yaml header for ssp generation: {e}')
return CmdReturnCodes.COMMAND_ERROR.value
markdown_path = trestle_root / args.output
profile_resolver = ProfileResolver()
try:
resolved_catalog = profile_resolver.get_resolved_profile_catalog(trestle_root, profile_path)
catalog_interface = CatalogInterface(resolved_catalog)
except Exception as e:
logger.error(f'Error creating the resolved profile catalog: {e}')
logger.debug(traceback.format_exc())
return CmdReturnCodes.COMMAND_ERROR.value
try:
sections = SSPGenerate._sections_from_args(args)
if sections == {}:
s_list = catalog_interface.get_sections()
for item in s_list:
sections[item] = item
logger.debug(f'ssp sections: {sections}')
except err.TrestleError:
logger.warning('"statement" section is not allowed.')
return CmdReturnCodes.COMMAND_ERROR.value
try:
catalog_interface.write_catalog_as_markdown(
markdown_path,
yaml_header,
sections,
True,
False,
None,
preserve_header_values=args.preserve_header_values
)
except Exception as e:
logger.error(f'Error writing the catalog as markdown: {e}')
logger.debug(traceback.format_exc())
return CmdReturnCodes.COMMAND_ERROR.value
return CmdReturnCodes.SUCCESS.value
class SSPAssemble(AuthorCommonCommand):
"""Assemble markdown files of controls into an SSP json file."""
name = 'ssp-assemble'
def _init_arguments(self) -> None:
file_help_str = 'Name of the input markdown file directory'
self.add_argument('-m', '--markdown', help=file_help_str, required=True, type=str)
output_help_str = 'Name of the output generated json SSP'
self.add_argument('-o', '--output', help=output_help_str, required=True, type=str)
self.add_argument('-r', '--regenerate', action='store_true', help=const.HELP_REGENERATE)
def _merge_imp_reqs(
self, ssp: ossp.SystemSecurityPlan, imp_reqs: List[ossp.ImplementedRequirement], regenerate: bool
) -> None:
"""
Merge the new imp_reqs into the ssp and optionally regenerate uuids.
If a statement has same id and same by_comp uuid as ssp, use the ssp version with new description.
Otherwise just insert the statement.
When the statement was loaded it had access to the current components so the uuids should match.
"""
id_map: Dict[str, Dict[str, ossp.Statement]] = {}
control_map: Dict[str, ossp.ImplementedRequirement] = {}
for imp_req in ssp.control_implementation.implemented_requirements:
control_map[imp_req.control_id] = imp_req
for statement in imp_req.statements:
for by_comp in statement.by_components:
id_ = statement.statement_id
if id_ not in id_map:
id_map[id_] = {}
id_map[id_][by_comp.component_uuid] = statement
for imp_req in imp_reqs:
if imp_req.control_id in control_map:
imp_req.uuid = control_map[imp_req.control_id].uuid
for statement in as_list(imp_req.statements):
id_ = statement.statement_id
# for each statement id match the statement per component to the original
if id_ in id_map:
comp_dict = id_map[id_]
for by_comp in as_list(statement.by_components):
if by_comp.component_uuid in comp_dict:
statement.uuid = comp_dict[by_comp.component_uuid].uuid
for orig_by_comp in as_list(comp_dict[by_comp.component_uuid].by_components):
if orig_by_comp.component_uuid == by_comp.component_uuid:
by_comp.uuid = orig_by_comp.uuid
break
ssp.control_implementation.implemented_requirements = imp_reqs
if regenerate:
regenerate_uuids(ssp)
def _generate_roles_in_metadata(self, ssp: ossp.SystemSecurityPlan) -> None:
"""Find all roles referenced by imp reqs and create role in metadata as needed."""
metadata = ssp.metadata
metadata.roles = as_list(metadata.roles)
known_role_ids = [role.id for role in metadata.roles]
for imp_req in ssp.control_implementation.implemented_requirements:
role_ids = [resp_role.role_id for resp_role in as_list(imp_req.responsible_roles)]
for role_id in role_ids:
if role_id not in known_role_ids:
role = com.Role(id=role_id, title=role_id)
metadata.roles.append(role)
known_role_ids.append(role_id)
metadata.roles = none_if_empty(metadata.roles)
def _run(self, args: argparse.Namespace) -> int:
log.set_log_level_from_args(args)
trestle_root = pathlib.Path(args.trestle_root)
md_path = trestle_root / args.markdown
# if ssp already exists - should load it rather than make new one
ssp_path = fs.path_for_top_level_model(
trestle_root, args.output, ossp.SystemSecurityPlan, fs.FileContentType.JSON
)
ssp: ossp.SystemSecurityPlan
comp_dict: Dict[str, ossp.SystemComponent] = {}
try:
# need to load imp_reqs from markdown but need component first
if ssp_path.exists():
# load the existing json ssp
_, _, ssp = fs.load_distributed(ssp_path, trestle_root)
for component in ssp.system_implementation.components:
comp_dict[component.title] = component
# read the new imp reqs from markdown and have them reference existing components
imp_reqs = CatalogInterface.read_catalog_imp_reqs(md_path, comp_dict)
self._merge_imp_reqs(ssp, imp_reqs, args.regenerate)
else:
# create a sample ssp to hold all the parts
ssp = gens.generate_sample_model(ossp.SystemSecurityPlan)
# load the imp_reqs from markdown and create components as needed, referenced by ### headers
imp_reqs = CatalogInterface.read_catalog_imp_reqs(md_path, comp_dict)
# create system implementation
system_imp: ossp.SystemImplementation = gens.generate_sample_model(ossp.SystemImplementation)
ssp.system_implementation = system_imp
# create a control implementation to hold the implementated requirements
control_imp: ossp.ControlImplementation = gens.generate_sample_model(ossp.ControlImplementation)
control_imp.implemented_requirements = imp_reqs
control_imp.description = const.SSP_SYSTEM_CONTROL_IMPLEMENTATION_TEXT
# insert the parts into the ssp
ssp.control_implementation = control_imp
ssp.system_implementation = system_imp
# we don't have access to the original profile so we don't know the href
import_profile: ossp.ImportProfile = gens.generate_sample_model(ossp.ImportProfile)
import_profile.href = 'REPLACE_ME'
ssp.import_profile = import_profile
# now that we know the complete list of needed components, add them to the sys_imp
# TODO if the ssp already existed then components may need to be removed if not ref'd by imp_reqs
ssp.system_implementation.components = []
for comp in comp_dict.values():
ssp.system_implementation.components.append(comp)
self._generate_roles_in_metadata(ssp)
except Exception as e:
logger.warning(f'Error assembling the ssp from markdown: {e}')
logger.debug(traceback.format_exc())
return CmdReturnCodes.COMMAND_ERROR.value
# write out the ssp as json
try:
fs.save_top_level_model(ssp, trestle_root, args.output, fs.FileContentType.JSON)
except Exception as e:
logger.warning(f'Error saving the generated ssp: {e}')
logger.debug(traceback.format_exc())
return CmdReturnCodes.COMMAND_ERROR.value
return CmdReturnCodes.SUCCESS.value
class SSPFilter(AuthorCommonCommand):
"""Filter the controls in an ssp based on files included by profile."""
name = 'ssp-filter'
def _init_arguments(self) -> None:
file_help_str = 'Name of the input ssp'
self.add_argument('-n', '--name', help=file_help_str, required=True, type=str)
file_help_str = 'Name of the input profile that defines set of controls in output ssp'
self.add_argument('-p', '--profile', help=file_help_str, required=True, type=str)
output_help_str = 'Name of the output generated SSP'
self.add_argument('-o', '--output', help=output_help_str, required=True, type=str)
self.add_argument('-r', '--regenerate', action='store_true', help=const.HELP_REGENERATE)
def _run(self, args: argparse.Namespace) -> int:
log.set_log_level_from_args(args)
trestle_root = pathlib.Path(args.trestle_root)
return self.filter_ssp(trestle_root, args.name, args.profile, args.output, args.regenerate)
def filter_ssp(self, trestle_root: pathlib.Path, ssp_name: str, profile_name: str, out_name: str, regenerate: bool):
"""Filter the ssp based on the profile and output new ssp."""
ssp: ossp.SystemSecurityPlan
try:
ssp, _ = fs.load_top_level_model(trestle_root, ssp_name, ossp.SystemSecurityPlan, fs.FileContentType.JSON)
profile_path = fs.path_for_top_level_model(
trestle_root, profile_name, prof.Profile, fs.FileContentType.JSON
)
prof_resolver = ProfileResolver()
catalog = prof_resolver.get_resolved_profile_catalog(trestle_root, profile_path)
catalog_interface = CatalogInterface(catalog)
# The input ssp should reference a superset of the controls referenced by the profile
# Need to cull references in the ssp to controls not in the profile
# Also make sure the output ssp contains imp reqs for all controls in the profile
control_imp = ssp.control_implementation
ssp_control_ids: Set[str] = set()
set_params = control_imp.set_parameters
new_set_params: List[ossp.SetParameter] = []
if set_params is not None:
for set_param in set_params:
control = catalog_interface.get_control_by_param_id(set_param.param_id)
if control is not None:
new_set_params.append(set_param)
ssp_control_ids.add(control.id)
control_imp.set_parameters = new_set_params if new_set_params else None
imp_requirements = control_imp.implemented_requirements
new_imp_requirements: List[ossp.ImplementedRequirement] = []
if imp_requirements is not None:
for imp_requirement in imp_requirements:
control = catalog_interface.get_control(imp_requirement.control_id)
if control is not None:
new_imp_requirements.append(imp_requirement)
ssp_control_ids.add(control.id)
control_imp.implemented_requirements = new_imp_requirements
# make sure all controls in the profile have implemented reqs in the final ssp
if not ssp_control_ids.issuperset(catalog_interface.get_control_ids()):
logger.warning('Unable to filter the ssp because the profile references controls not in it.')
logger.debug(traceback.format_exc())
return CmdReturnCodes.COMMAND_ERROR.value
ssp.control_implementation = control_imp
if regenerate:
regenerate_uuids(ssp)
fs.save_top_level_model(ssp, trestle_root, out_name, fs.FileContentType.JSON)
except Exception as e:
logger.warning(f'Error generating the filtered ssp: {e}')
logger.debug(traceback.format_exc())
return CmdReturnCodes.COMMAND_ERROR.value
return CmdReturnCodes.SUCCESS.value
| 47.002841
| 120
| 0.648837
|
1f172b3e1e4aeb161ce706b994566c0007212829
| 3,827
|
py
|
Python
|
setup.py
|
sparkfun/Qwiic_Micro_OLED_Py
|
d174b2f271dfd68714f67a77425c2c223735e156
|
[
"MIT"
] | 1
|
2021-11-25T05:52:50.000Z
|
2021-11-25T05:52:50.000Z
|
setup.py
|
sparkfun/Qwiic_Micro_OLED_Py
|
d174b2f271dfd68714f67a77425c2c223735e156
|
[
"MIT"
] | 2
|
2021-02-19T20:01:13.000Z
|
2021-10-07T04:49:29.000Z
|
setup.py
|
sparkfun/Qwiic_Micro_OLED_Py
|
d174b2f271dfd68714f67a77425c2c223735e156
|
[
"MIT"
] | 2
|
2020-01-28T13:40:41.000Z
|
2021-06-18T22:01:39.000Z
|
#------------------------------------------------------------------------
#
# This is a python install script written for qwiic python package.
#
# Written by SparkFun Electronics, May 2021
#
# This python library supports the SparkFun Electroncis qwiic
# ecosystem, providing an plaform indepenant interface to the
# I2C bus.
#
# More information on qwiic is at https://www.sparkfun.com/qwiic
#
# Do you like this library? Help support SparkFun. Buy a board!
#
#==================================================================================
# Copyright (c) 2021 SparkFun Electronics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#==================================================================================
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
import io
here = path.abspath(path.dirname(__file__))
# get the log description
with io.open(path.join(here, "DESCRIPTION.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name='sparkfun_qwiic_micro_oled',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.10.0',
description='SparkFun Electronics qwiic Micro OLED package',
long_description=long_description,
# The project's main homepage.
url='http://www.sparkfun.com/qwiic',
# Author details
author='SparkFun Electronics',
author_email='info@sparkfun.com',
install_requires=['sparkfun_qwiic_i2c', "sparkfun_qwiic_oled_base"],
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='electronics, maker',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
py_modules=["qwiic_micro_oled"],
)
| 36.798077
| 86
| 0.666841
|
8a0b8938229802a1ea70b0b86ba8108077b6f665
| 3,328
|
py
|
Python
|
python/dxa/deterministic_short_rate.py
|
portfolioscout/py4fi
|
9a65df340189ed52037456da221bf66fe89e787f
|
[
"CNRI-Python"
] | 15
|
2018-07-10T09:18:23.000Z
|
2021-12-30T06:35:09.000Z
|
python3/dxa/deterministic_short_rate.py
|
ioancw/py4fi
|
bbf7b41d375e4f7b0344bc9b1e97d7910ad1e6ec
|
[
"CNRI-Python"
] | 2
|
2020-10-27T19:44:15.000Z
|
2020-11-03T23:55:36.000Z
|
python3/dxa/deterministic_short_rate.py
|
ioancw/py4fi
|
bbf7b41d375e4f7b0344bc9b1e97d7910ad1e6ec
|
[
"CNRI-Python"
] | 13
|
2018-01-08T01:10:22.000Z
|
2021-05-26T17:35:35.000Z
|
'''
==== Deterministic Short Rate
Interest rates in general and short rates in particular are not constant over time. You rather observe something called a term structur of insterest rates in financial markets. Simply speaking, this means that a ZCB maturing at latexmath:[$s \geq 0$] will have a different yield than another bond of the same type maturing at latexmath:[$t \geq s$]. _Yield_ in this case is defined as the quantity latexmath:[$y_t$] that solves the equation latexmath:[$D_0(t)=e^{-y_t t}$] for a ZCB maturing at latexmath:[$t$]. Analogously, yield is also the quantity latexmath:[$y_s$] that solves the equation latexmath:[$D_0(s)=e^{-y_s s}$] for a ZCB maturing at latexmath:[$s$].'''
from constant_short_rate import *
class deterministic_short_rate(object):
''' Class for discounting based on deterministic short rates,
derived from a term structure of unit Zero-Coupon Bond yields
Attributes
==========
name : string
name of the object
yield_list : list/array of (time, yield) tuples
input yields with time attached
Methods
=======
get_interpolated_yields :
return interpolated yield curve given a time list/array
get_forward_rates :
return forward rates given a time list/array
get_discount_factors :
return discount factors given a time list/array
'''
def __init__(self, name, yield_list):
self.name = name
self.yield_list = np.array(yield_list)
if np.sum(np.where(self.yield_list[:, 1] < 0, 1, 0)) > 0:
raise ValueError, 'Negative yield(s).'
def get_interpolated_yields(self, time_list, dtobjects=True):
''' time_list either list of datetime objects or list of
year deltas as decimal number (dtobjects=False)
'''
if dtobjects is True:
tlist = get_year_deltas(time_list)
else:
tlist = time_list
dlist = get_year_deltas(self.yield_list[:, 0])
if len(time_list) <= 3:
k = 1
else:
k = 3
yield_spline = sci.splrep(dlist, self.yield_list[:, 1], k=k)
yield_curve = sci.splev(tlist, yield_spline, der=0)
yield_deriv = sci.splev(tlist, yield_spline, der=1)
return np.array([time_list, yield_curve, yield_deriv]).T
def get_forward_rates(self, time_list, dtobjects=True):
yield_curve = self.get_interpolated_yields(time_list, dtobjects)
if dtobjects is True:
tlist = get_year_deltas(time_list)
else:
tlist = time_list
forward_rate = yield_curve[:, 1] + yield_curve[:, 2] * tlist
return np.array((time_list, forward_rate)).T
def get_discount_factors(self, time_list, dtobjects=True):
discount_factors = []
if dtobjects is True:
dlist = get_year_deltas(time_list)
else:
dlist = time_list
forward_rate = self.get_forward_rates(time_list, dtobjects)
for no in range(len(dlist)):
factor = 0.0
for d in range(no, len(dlist) - 1):
factor += ((dlist[d + 1] - dlist[d])
* (0.5 * (forward_rate[d + 1, 1] + forward_rate[d, 1])))
discount_factors.append(np.exp(-factor))
return np.array((time_list, discount_factors)).T
| 47.542857
| 668
| 0.640925
|
7c81df714cb95571c73a9a741a1de700f0fe6b1b
| 10,462
|
py
|
Python
|
tests/utils/test_utils_check_copies.py
|
dctelus/transformers
|
6786cbc4b14ebff0ac59c768cadd109391db9a08
|
[
"Apache-2.0"
] | 8,028
|
2018-11-05T15:19:44.000Z
|
2019-07-16T09:14:59.000Z
|
tests/utils/test_utils_check_copies.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 731
|
2018-11-05T21:35:52.000Z
|
2019-07-16T09:51:26.000Z
|
tests/utils/test_utils_check_copies.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 2,106
|
2018-11-05T15:29:15.000Z
|
2019-07-16T08:51:57.000Z
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
REFERENCE_CODE = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class CopyCheckTester(unittest.TestCase):
def setUp(self):
self.transformer_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, "models/bert/"))
check_copies.TRANSFORMER_PATH = self.transformer_dir
shutil.copy(
os.path.join(git_repo_path, "src/transformers/models/bert/modeling_bert.py"),
os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"),
)
def tearDown(self):
check_copies.TRANSFORMER_PATH = "src/transformers"
shutil.rmtree(self.transformer_dir)
def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119)
code = black.format_str(code, mode=mode)
fname = os.path.join(self.transformer_dir, "new_code.py")
with open(fname, "w", newline="\n") as f:
f.write(code)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=True)
with open(fname, "r") as f:
self.assertTrue(f.read(), expected)
def test_find_code_in_transformers(self):
code = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(code, REFERENCE_CODE)
def test_is_copy_consistent(self):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead",
"BertLMPredictionHead",
REFERENCE_CODE + "\n",
)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead",
"BertLMPredictionHead",
REFERENCE_CODE,
)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel",
"TestModelLMPredictionHead",
re.sub("Bert", "TestModel", REFERENCE_CODE),
)
# Copy consistency with a really long name
long_class_name = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}",
f"{long_class_name}LMPredictionHead",
re.sub("Bert", long_class_name, REFERENCE_CODE),
)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel",
"TestModelLMPredictionHead",
REFERENCE_CODE,
overwrite_result=re.sub("Bert", "TestModel", REFERENCE_CODE),
)
def test_convert_to_localized_md(self):
localized_readme = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning."
localized_md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
converted_md_list_sample = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。\n"
num_models_equal, converted_md_list = check_copies.convert_to_localized_md(
md_list, localized_md_list, localized_readme["format_model_list"]
)
self.assertFalse(num_models_equal)
self.assertEqual(converted_md_list, converted_md_list_sample)
num_models_equal, converted_md_list = check_copies.convert_to_localized_md(
md_list, converted_md_list, localized_readme["format_model_list"]
)
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(num_models_equal)
link_changed_md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
link_unchanged_md_list = "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
converted_md_list_sample = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
num_models_equal, converted_md_list = check_copies.convert_to_localized_md(
link_changed_md_list, link_unchanged_md_list, localized_readme["format_model_list"]
)
# Check if the model link is synchronized.
self.assertEqual(converted_md_list, converted_md_list_sample)
| 67.064103
| 1,428
| 0.734754
|
39c6765592fd7320bfc86065a1162fe89ae255de
| 1,012
|
py
|
Python
|
kubernetes/test/test_v1beta2_stateful_set_list.py
|
Scalr/kubernetes-client-python
|
07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38
|
[
"Apache-2.0"
] | 3
|
2019-05-19T05:05:37.000Z
|
2020-03-20T04:56:20.000Z
|
kubernetes/test/test_v1beta2_stateful_set_list.py
|
Scalr/kubernetes-client-python
|
07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta2_stateful_set_list.py
|
Scalr/kubernetes-client-python
|
07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta2_stateful_set_list import V1beta2StatefulSetList
class TestV1beta2StatefulSetList(unittest.TestCase):
""" V1beta2StatefulSetList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta2StatefulSetList(self):
"""
Test V1beta2StatefulSetList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta2_stateful_set_list.V1beta2StatefulSetList()
pass
if __name__ == '__main__':
unittest.main()
| 22.488889
| 105
| 0.72332
|
53a37d82757770667a009b3d0f2fa3a51b54d09f
| 2,171
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/ohlc/_hoverlabel.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 7
|
2022-01-16T12:28:16.000Z
|
2022-03-04T15:31:45.000Z
|
packages/python/plotly/plotly/validators/ohlc/_hoverlabel.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | 14
|
2021-10-20T23:33:47.000Z
|
2021-12-21T04:50:37.000Z
|
packages/python/plotly/plotly/validators/ohlc/_hoverlabel.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | 1
|
2021-11-29T22:55:05.000Z
|
2021-11-29T22:55:05.000Z
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="ohlc", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
split
Show hover information (open, close, high, low)
in separate labels.
""",
),
**kwargs
)
| 39.472727
| 79
| 0.543528
|
5d1a16e0cea91567ea14d5b134ddb0ece2d82f3e
| 4,006
|
py
|
Python
|
code.py
|
kadc87/recommend-clothing-size
|
a8f177b4cd1de5f406ffce63c8cf68090847b12b
|
[
"MIT"
] | null | null | null |
code.py
|
kadc87/recommend-clothing-size
|
a8f177b4cd1de5f406ffce63c8cf68090847b12b
|
[
"MIT"
] | null | null | null |
code.py
|
kadc87/recommend-clothing-size
|
a8f177b4cd1de5f406ffce63c8cf68090847b12b
|
[
"MIT"
] | null | null | null |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_json(path, lines = True)
df.columns = ['bra_size', 'bust', 'category', 'cup_size', 'fit', 'height', 'hips',
'item_id', 'length', 'quality', 'review_summary', 'review_text',
'shoe_size', 'shoe_width', 'size', 'user_id', 'user_name', 'waist']
missing_data = pd.DataFrame({'total_missing': df.isnull().sum(), 'perc_missing': (df.isnull().sum()/82790)*100})
df.drop(columns = ['waist', 'bust', 'user_name', 'review_text', 'review_summary', 'shoe_size', 'shoe_width'], inplace = True)
#print(df.head(5))
X = df.drop(columns = ['fit'])
y = df['fit'].copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state= 6)
# Code ends here
# --------------
def plot_barh(df,col, cmap = None, stacked=False, norm = None):
df.plot(kind='barh', colormap=cmap, stacked=stacked)
fig = plt.gcf()
fig.set_size_inches(24,12)
plt.title("Category vs {}-feedback - cloth {}".format(col, '(Normalized)' if norm else ''), fontsize= 20)
plt.ylabel('Category', fontsize = 18)
plot = plt.xlabel('Frequency', fontsize=18)
# Code starts here
g_by_category = df.groupby('category')
cat_fit = g_by_category['fit'].value_counts()
cat_fit.unstack()
cat_fit.plot(kind = 'bar')
# Code ends here
# --------------
# Code starts here
cat_len = g_by_category['length'].value_counts()
cat_len = cat_len.unstack()
plot_barh(cat_len, 'length')
# Code ends here
# --------------
# function to to convert feet to inches
def get_cms(x):
if type(x) == type(1.0):
return
#print(x)
try:
return (int(x[0])*30.48) + (int(x[4:-2])*2.54)
except:
return (int(x[0])*30.48)
# apply on train data
X_train.height = X_train.height.apply(get_cms)
# apply on testing set
X_test.height = X_test.height.apply(get_cms)
# --------------
# Code starts here
X_train.isnull().sum()
X_train.dropna(axis = 0, subset = ['height', 'length', 'quality'], inplace = True)
X_test.dropna(axis = 0, subset = ['height', 'length', 'quality'], inplace = True)
X_train['bra_size'].fillna((X_train['bra_size'].mean()), inplace=True)
X_test['bra_size'].fillna((X_test['bra_size'].mean()), inplace=True)
X_train['hips'].fillna((X_train['hips'].mean()), inplace=True)
X_test['hips'].fillna((X_test['hips'].mean()), inplace=True)
mode_1 = X_train['cup_size'].mode()[0]
mode_2 = X_test['cup_size'].mode()[0]
X_train['cup_size']=X_train['cup_size'].replace(np.nan,mode_1)
X_test['cup_size']=X_test['cup_size'].replace(np.nan,mode_1)
# Code ends here
# --------------
# Code starts here
X_train =pd.get_dummies(data=X_train,columns=["category", "cup_size","length"],prefix=["category", "cup_size","length"])
X_test = pd.get_dummies(data=X_test,columns=["category", "cup_size","length"],prefix=["category", "cup_size","length"])
# Code ends here
# --------------
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
# Code starts here
model = DecisionTreeClassifier(random_state = 6)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test, y_pred)
print(score)
precision = precision_score(y_test, y_pred, average = None)
print(precision)
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
# parameters for grid search
parameters = {'max_depth':[5,10],'criterion':['gini','entropy'],'min_samples_leaf':[0.5,1]}
# Code starts here
model = DecisionTreeClassifier(random_state = 6)
grid = GridSearchCV(estimator = model, param_grid = parameters)
grid.fit(X_train, y_train)
y_pred = grid.predict(X_test)
accuracy = grid.score(X_test, y_test)
print(accuracy)
# Code ends here
| 27.251701
| 125
| 0.681228
|
7cf3ab6a2aadb66558311e112c142e821fe19b4d
| 10,048
|
py
|
Python
|
blog/views.py
|
marthaurion/django_blog
|
98b2bc0baf72fa6fd6dee3562b74440162a00b41
|
[
"MIT"
] | 1
|
2017-04-25T10:16:59.000Z
|
2017-04-25T10:16:59.000Z
|
blog/views.py
|
marthaurion/blog_django
|
98b2bc0baf72fa6fd6dee3562b74440162a00b41
|
[
"MIT"
] | null | null | null |
blog/views.py
|
marthaurion/blog_django
|
98b2bc0baf72fa6fd6dee3562b74440162a00b41
|
[
"MIT"
] | null | null | null |
import datetime, time
from django.contrib.postgres.search import SearchVector
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormMixin
from django.views.generic.list import ListView
from django.views.generic.dates import YearArchiveView, MonthArchiveView, DayArchiveView
import logging
from taggit.models import Tag
from comments.views import CommentFormMixin
from .models import Post, Category, Media
from .helpers import PostPaginator
logger = logging.getLogger(__name__)
class MediaDetailView(DetailView):
model = Media
template_name = 'blog/image_detail.html'
context_object_name = 'img'
def get_object(self, *args, **kwargs):
return get_object_or_404(Media, image_name=self.kwargs['name'])
# for some of the shared stuff in these views
class PostListMixin(object):
paginate_by = 10
allow_empty = True
paginator_class = PostPaginator
context_object_name = 'post_list'
template_name = 'blog/post_index.html'
def get_queryset(self):
queryset = super().get_queryset()
return self.build_post_queryset(queryset)
def build_post_queryset(self, queryset):
return queryset.defer('body', 'body_html').select_related('category')
# display every published post
class PostIndexView(PostListMixin, ListView):
model = Post
ordering = '-pub_date'
def get_queryset(self):
return Post.published.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
working_page = 1
if 'page' in self.kwargs:
working_page = self.kwargs['page']
title = "Marth's Anime Blog"
if working_page > 1:
title += ' - Page ' + str(working_page)
context['page_title'] = title
context['base_url'] = '/blog/'
context['post_list'] = self.build_post_queryset(context['post_list'])
return context
# display all posts published in a given year
class PostYearView(PostListMixin, YearArchiveView):
model = Post
date_field = 'pub_date'
make_object_list = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
year = self.kwargs['year']
working_page = 1
if 'page' in self.kwargs:
working_page = self.kwargs['page']
dt = datetime.datetime(year, 1, 1)
title = "Posts from " + dt.strftime('%Y')
page_header = title
if working_page > 1:
title += ' - Page ' + str(working_page)
context['page_title'] = title
context['page_header'] = page_header
context['base_url'] = '/blog/%d/' % (year)
context['post_list'] = self.build_post_queryset(context['post_list'])
return context
# display all posts published in a given month
class PostMonthView(PostListMixin, MonthArchiveView):
model = Post
date_field = 'pub_date'
month_format = "%m"
make_object_list = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
year = self.kwargs['year']
month = self.kwargs['month']
working_page = 1
if 'page' in self.kwargs:
working_page = int(self.kwargs['page'])
dt = datetime.datetime(year, month, 1)
title = "Posts from " + dt.strftime('%B %Y')
page_header = title
if working_page > 1:
title += ' - Page ' + str(working_page)
context['page_title'] = title
context['page_header'] = page_header
context['base_url'] = '/blog/%d/%d/' % (year, month)
context['post_list'] = self.build_post_queryset(context['post_list'])
return context
# display all posts published on a given day
class PostDayView(PostListMixin, DayArchiveView):
model = Post
date_field = 'pub_date'
month_format = "%m"
make_object_list = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
year = self.kwargs['year']
month = self.kwargs['month']
day = self.kwargs['day']
working_page = 1
if 'page' in self.kwargs:
working_page = self.kwargs['page']
dt = datetime.datetime(year, month, day)
title = "Posts from " + dt.strftime('%B %d, %Y')
page_header = title
if working_page > 1:
title += ' - Page ' + str(working_page)
context['page_title'] = title
context['page_header'] = page_header
context['base_url'] = '/blog/%d/%d/%d/' % (year, month, day)
context['post_list'] = self.build_post_queryset(context['post_list'])
return context
# display all posts for a category
class CategoryListView(PostListMixin, ListView):
post_category = None
def get_queryset(self):
self.post_category = get_object_or_404(Category, slug=self.kwargs['slug'])
category_list = self.post_category.get_descendants(include_self=True)
posts = Post.published.filter(category__in=category_list)
return posts
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
slug = self.kwargs['slug']
working_page = 1
if 'page' in self.kwargs:
working_page = self.kwargs['page']
category = self.post_category
title = "Posts for Category: " + category.title
page_header = title
if working_page > 1:
title = title + " - Page " + str(working_page)
context['page_title'] = title
context['page_header'] = page_header
context['base_url'] = '/blog/category/%s/' % (slug)
context['post_list'] = self.build_post_queryset(context['post_list'])
return context
# display all posts for a tag
class TagListView(PostListMixin, ListView):
def get_queryset(self):
posts = Post.published.filter(tags__slug__in=[self.kwargs['slug']])
return posts
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
slug = self.kwargs['slug']
working_page = 1
if 'page' in self.kwargs:
working_page = self.kwargs['page']
tag = get_object_or_404(Tag, slug=slug)
title = "Posts for Tag: " + tag.name
page_header = title
if working_page > 1:
title = title + " - Page " + str(working_page)
context['page_title'] = title
context['page_header'] = page_header
context['base_url'] = '/blog/tag/%s/' % (slug)
context['post_list'] = self.build_post_queryset(context['post_list'])
return context
# display a single post
class PostDetailView(CommentFormMixin, FormMixin, DetailView):
model = Post
template_name = 'blog/post_detail.html'
month_format = "%m"
def get(self, request, *args, **kwargs):
if 'email' in request.GET and 'comment' in request.GET:
self.unsubscribe_comment(request.GET['comment'], request.GET['email'])
return super().get(request, *args, **kwargs)
# override get object so that it gives a 404 error if you're looking at a post in the future and you're not an admin
def get_object(self, *args, **kwargs):
obj = super().get_object(*args, **kwargs)
if obj.pub_date>timezone.now(): # don't show future posts
if not self.request.user.is_active and not self.request.user.is_superuser: # only block if not an admin
raise Http404()
return obj
# add comment notify to context from session
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['comment_notify'] = self.request.session.get('comment_notify')
context['comment_username'] = self.request.session.get('comment_username')
if context['comment_username']:
context['comment_hidden'] = ' hidden'
else:
context['comment_hidden'] = ''
context['comment_list'] = self.object.approved_comments().select_related('author')
context['post_comment_url'] = self.object.get_absolute_url()
return context
# override the post function to handle the form values and create a comment
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.comment_check(request, form, self.object)
else:
return self.form_invalid(form)
class SearchResultsView(PostListMixin, ListView):
template_name = 'blog/search_index.html'
def get_queryset(self):
query = self.request.GET.get('q')
posts = Post.published.extra(
select={'rank': "ts_rank_cd(to_tsvector('english', title || ' ' || body_html), plainto_tsquery(%s), 32)"},
select_params=(query,),
where=("to_tsvector('english', title || ' ' || body_html) @@ plainto_tsquery(%s)",),
params=(query,),
order_by=('-rank',)
)
return posts
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
working_page = 1
if 'page' in self.kwargs:
working_page = self.kwargs['page']
title = "Search Results"
page_header = title
if working_page > 1:
title = title + " - Page " + str(working_page)
context['page_title'] = title
context['page_header'] = page_header
context['search'] = True
context['query'] = self.request.GET.get('q')
return context
| 34.768166
| 120
| 0.616242
|
0a2d4c042fd399f7e797992e79b251bf947b4b91
| 5,973
|
py
|
Python
|
rasa/core/channels/rocketchat.py
|
Neural-Space/rasa
|
7e7e5ec8511df9799f737196070381db7f1528d7
|
[
"Apache-2.0"
] | 37
|
2019-06-07T07:39:00.000Z
|
2022-01-27T08:32:57.000Z
|
rasa/core/channels/rocketchat.py
|
Neural-Space/rasa
|
7e7e5ec8511df9799f737196070381db7f1528d7
|
[
"Apache-2.0"
] | 209
|
2020-03-18T18:28:12.000Z
|
2022-03-01T13:42:29.000Z
|
rasa/core/channels/rocketchat.py
|
Neural-Space/rasa
|
7e7e5ec8511df9799f737196070381db7f1528d7
|
[
"Apache-2.0"
] | 65
|
2019-05-21T12:16:53.000Z
|
2022-02-23T10:54:15.000Z
|
import logging
from sanic import Blueprint, response
from sanic.request import Request
from typing import Text, Dict, Any, List, Iterable, Optional, Callable, Awaitable
from rasa.core.channels.channel import UserMessage, OutputChannel, InputChannel
from sanic.response import HTTPResponse
logger = logging.getLogger(__name__)
class RocketChatBot(OutputChannel):
@classmethod
def name(cls) -> Text:
return "rocketchat"
def __init__(self, user, password, server_url) -> None:
from rocketchat_API.rocketchat import RocketChat
self.rocket = RocketChat(user, password, server_url=server_url)
@staticmethod
def _convert_to_rocket_buttons(buttons: List[Dict]) -> List[Dict]:
return [
{
"text": b["title"],
"msg": b["payload"],
"type": "button",
"msg_in_chat_window": True,
}
for b in buttons
]
async def send_text_message(
self, recipient_id: Text, text: Text, **kwargs: Any
) -> None:
"""Send message to output channel"""
for message_part in text.strip().split("\n\n"):
self.rocket.chat_post_message(message_part, room_id=recipient_id)
async def send_image_url(
self, recipient_id: Text, image: Text, **kwargs: Any
) -> None:
image_attachment = [{"image_url": image, "collapsed": False}]
return self.rocket.chat_post_message(
None, room_id=recipient_id, attachments=image_attachment
)
async def send_attachment(
self, recipient_id: Text, attachment: Text, **kwargs: Any
) -> None:
return self.rocket.chat_post_message(
None, room_id=recipient_id, attachments=[attachment]
)
async def send_text_with_buttons(
self,
recipient_id: Text,
text: Text,
buttons: List[Dict[Text, Any]],
**kwargs: Any,
) -> None:
# implementation is based on
# https://github.com/RocketChat/Rocket.Chat/pull/11473
# should work in rocket chat >= 0.69.0
button_attachment = [{"actions": self._convert_to_rocket_buttons(buttons)}]
return self.rocket.chat_post_message(
text, room_id=recipient_id, attachments=button_attachment
)
async def send_elements(
self, recipient_id: Text, elements: Iterable[Dict[Text, Any]], **kwargs: Any
) -> None:
return self.rocket.chat_post_message(
None, room_id=recipient_id, attachments=elements
)
async def send_custom_json(
self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any
) -> None:
text = json_message.pop("text")
if json_message.get("channel"):
if json_message.get("room_id"):
logger.warning(
"Only one of `channel` or `room_id` can be passed to a RocketChat "
"message post. Defaulting to `channel`."
)
del json_message["room_id"]
return self.rocket.chat_post_message(text, **json_message)
else:
json_message.setdefault("room_id", recipient_id)
return self.rocket.chat_post_message(text, **json_message)
class RocketChatInput(InputChannel):
"""RocketChat input channel implementation."""
@classmethod
def name(cls) -> Text:
return "rocketchat"
@classmethod
def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel:
if not credentials:
cls.raise_missing_credentials_exception()
return cls(
credentials.get("user"),
credentials.get("password"),
credentials.get("server_url"),
)
def __init__(self, user: Text, password: Text, server_url: Text) -> None:
self.user = user
self.password = password
self.server_url = server_url
async def send_message(
self,
text: Optional[Text],
sender_name: Optional[Text],
recipient_id: Optional[Text],
on_new_message: Callable[[UserMessage], Awaitable[Any]],
metadata: Optional[Dict],
):
if sender_name != self.user:
output_channel = self.get_output_channel()
user_msg = UserMessage(
text,
output_channel,
recipient_id,
input_channel=self.name(),
metadata=metadata,
)
await on_new_message(user_msg)
def blueprint(
self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
) -> Blueprint:
rocketchat_webhook = Blueprint("rocketchat_webhook", __name__)
@rocketchat_webhook.route("/", methods=["GET"])
async def health(_: Request) -> HTTPResponse:
return response.json({"status": "ok"})
@rocketchat_webhook.route("/webhook", methods=["GET", "POST"])
async def webhook(request: Request) -> HTTPResponse:
output = request.json
metadata = self.get_metadata(request)
if output:
if "visitor" not in output:
sender_name = output.get("user_name", None)
text = output.get("text", None)
recipient_id = output.get("channel_id", None)
else:
messages_list = output.get("messages", None)
text = messages_list[0].get("msg", None)
sender_name = messages_list[0].get("username", None)
recipient_id = output.get("_id")
await self.send_message(
text, sender_name, recipient_id, on_new_message, metadata
)
return response.text("")
return rocketchat_webhook
def get_output_channel(self) -> OutputChannel:
return RocketChatBot(self.user, self.password, self.server_url)
| 33.745763
| 87
| 0.598192
|
56177e06bcb05f661bc6114e1bd6f84c7aaa0d5b
| 17,936
|
py
|
Python
|
SelectiveMemory/QasFeature/BipedalWalker_v7.py
|
ProGamerCode/FitML
|
3b44160bbf6c0587b8df198d3ceef10a42e2bfca
|
[
"MIT"
] | 171
|
2017-11-07T09:59:20.000Z
|
2022-03-29T13:59:18.000Z
|
SelectiveMemory/QasFeature/BipedalWalker_v7.py
|
ProGamerCode/FitML
|
3b44160bbf6c0587b8df198d3ceef10a42e2bfca
|
[
"MIT"
] | 1
|
2017-12-24T20:08:18.000Z
|
2018-01-31T22:26:49.000Z
|
SelectiveMemory/QasFeature/BipedalWalker_v7.py
|
ProGamerCode/FitML
|
3b44160bbf6c0587b8df198d3ceef10a42e2bfca
|
[
"MIT"
] | 44
|
2017-11-07T12:08:05.000Z
|
2022-01-04T15:53:12.000Z
|
'''
Mountain Car with Selective Memory Algorithm
solution by Michel Aka author of FitML github blog and repository
https://github.com/FitMachineLearning/FitML/
https://www.youtube.com/channel/UCi7_WxajoowBl4_9P0DhzzA/featured
Update
Deep Network
Adagrad optimizer
Using Selective Memory Average as feature dicriminator
Much smaller SM
Order of magnitude better performance
'''
import numpy as np
import keras
import gym
import pybullet_envs
import pybullet
import pygal
import os
import h5py
import matplotlib.pyplot as plt
import math
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras import optimizers
num_env_variables = 24
num_env_actions = 4
num_initial_observation = 10
learning_rate = 0.007
apLearning_rate = 0.006
version_name = "BW-DSMQ-v16"
weigths_filename = version_name+"-weights.h5"
apWeights_filename = version_name+"-weights-ap.h5"
#range within wich the SmartCrossEntropy action parameters will deviate from
#remembered optimal policy
sce_range = 0.2
b_discount = 0.97
max_memory_len = 2000000
experience_replay_size = 10000
random_every_n = 30
starting_explore_prob = 0.05
training_epochs = 3
mini_batch = 512
load_previous_weights = False
observe_and_train = True
save_weights = True
save_memory_arrays = True
load_memory_arrays = False
do_training = True
num_games_to_play = 160000
max_steps = 600
#Selective memory settings
sm_normalizer = 120
sm_memory_size = 1200
#One hot encoding array
possible_actions = np.arange(0,num_env_actions)
actions_1_hot = np.zeros((num_env_actions,num_env_actions))
actions_1_hot[np.arange(num_env_actions),possible_actions] = 1
#Create testing enviroment
env = gym.make('BipedalWalker-v2')
env.render(mode="human")
env.reset()
print("-- Observations",env.observation_space)
print("-- actionspace",env.action_space)
#initialize training matrix with random states and actions
dataX = np.random.random(( 5,num_env_variables+num_env_actions ))
#Only one output for the total score / reward
dataY = np.random.random((5,1))
#initialize training matrix with random states and actions
apdataX = np.random.random(( 5,num_env_variables ))
apdataY = np.random.random((5,num_env_actions))
def custom_error(y_true, y_pred, Qsa):
cce=0.001*(y_true - y_pred)*Qsa
return cce
#nitialize the Reward predictor model
Qmodel = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
Qmodel.add(Dense(1024*2, activation='relu', input_dim=dataX.shape[1]))
#Qmodel.add(Dropout(0.2))
Qmodel.add(Dense(1024*1, activation='relu'))
#Qmodel.add(Dropout(0.2))
Qmodel.add(Dense(512, activation='relu'))
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(1024, activation='relu'))
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(512, activation='relu'))
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(256, activation='relu'))
#Qmodel.add(Dropout(0.2))
Qmodel.add(Dense(dataY.shape[1]))
#opt = optimizers.adam(lr=learning_rate)
opt = optimizers.Adagrad()
Qmodel.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
#initialize the action predictor model
action_predictor_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
action_predictor_model.add(Dense(1024*2, activation='relu', input_dim=apdataX.shape[1]))
#action_predictor_model.add(Dropout(0.2))
action_predictor_model.add(Dense(1024*1, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
action_predictor_model.add(Dense(512, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(1024, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(512, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(64*8, activation='relu'))
action_predictor_model.add(Dense(apdataY.shape[1]))
#opt2 = optimizers.adam(lr=apLearning_rate)
opt2 = optimizers.Adagrad()
action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])
#load previous model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+weigths_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
Qmodel.load_weights(weigths_filename)
else:
print("File ",weigths_filename," does not exis. Retraining... ")
#load previous action predictor model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+ apWeights_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
action_predictor_model.load_weights(apWeights_filename)
else:
print("File ",apWeights_filename," does not exis. Retraining... ")
memorySA = np.zeros(shape=(1,num_env_variables+num_env_actions))
memoryS = np.zeros(shape=(1,num_env_variables))
memoryA = np.zeros(shape=(1,1))
memoryR = np.zeros(shape=(1,1))
memoryRR = np.zeros(shape=(1,1))
if load_memory_arrays:
if os.path.isfile(version_name+'memorySA.npy'):
print("Memory Files exist. Loading...")
memorySA = np.load(version_name+'memorySA.npy')
memoryRR = np.load(version_name+'memoryRR.npy')
memoryS = np.load(version_name+'memoryS.npy')
memoryA = np.load(version_name+'memoryA.npy')
memoryR = np.load(version_name+'memoryR.npy')
else:
print("No memory Files. Recreating")
mstats = []
mGames = []
mAverageScores = []
mSteps = []
mAP_Counts = 0
mAPPicks = []
def predictTotalRewards(qstate, action):
qs_a = np.concatenate((qstate,action), axis=0)
predX = np.zeros(shape=(1,num_env_variables+num_env_actions))
predX[0] = qs_a
#print("trying to predict reward at qs_a", predX[0])
pred = Qmodel.predict(predX[0].reshape(1,predX.shape[1]))
remembered_total_reward = pred[0][0]
return remembered_total_reward
def GetRememberedOptimalPolicy(qstate):
predX = np.zeros(shape=(1,num_env_variables))
predX[0] = qstate
#print("trying to predict reward at qs_a", predX[0])
pred = action_predictor_model.predict(predX[0].reshape(1,predX.shape[1]))
r_remembered_optimal_policy = pred[0]
return r_remembered_optimal_policy
def addToMemory(reward,stepReward,memMax,averegeReward,gameAverage):
#diff = reward - ((averegeReward+memMax)/2)
diff = reward - stepReward
gameFactor = ((gameAverage-averegeReward)/math.fabs(memMax-averegeReward) )
prob = 0.005
if gameFactor<0:
gameFactor = 0.05
else:
gameFactor = 1+gameFactor/2
if reward > averegeReward:
prob = prob + 0.95 * (diff / sm_normalizer)
#prob = prob * gameFactor
#prob = prob * (0.1+gameFactor)
#print("add reward",reward,"diff",diff,"prob",prob,"average", averegeReward,"max",memMax)
else:
prob = prob + 0.005/1000 * (diff / (40+math.fabs(diff)))
if diff < 0:
return False
if np.random.rand(1)<=prob :
#print("Adding reward",reward," based on prob ", prob)
#print("add reward",reward,"diff",diff,"prob",prob,"average", averegeReward,"max",memMax)
return True
else:
return False
if observe_and_train:
#Play the game 500 times
for game in range(num_games_to_play):
gameSA = np.zeros(shape=(1,num_env_variables+num_env_actions))
gameS = np.zeros(shape=(1,num_env_variables))
gameA = np.zeros(shape=(1,num_env_actions))
gameR = np.zeros(shape=(1,1))
#Get the Q state
qs = env.reset()
mAP_Counts = 0
#print("qs ", qs)
'''
if game < num_initial_observation:
print("Observing game ", game)
else:
print("Learning & playing game ", game)
'''
for step in range (5000):
if game < num_initial_observation:
#take a radmon action
a = env.action_space.sample()
else:
prob = np.random.rand(1)
explore_prob = starting_explore_prob-(starting_explore_prob/num_games_to_play)*game
#Chose between prediction and chance
if prob < explore_prob or game%random_every_n==1:
#take a random action
a = env.action_space.sample()
else:
#Get Remembered optiomal policy
remembered_optimal_policy = GetRememberedOptimalPolicy(qs)
stock = np.zeros(15)
stockAction = np.zeros(shape=(15,num_env_actions))
for i in range(15):
stockAction[i] = env.action_space.sample()
stock[i] = predictTotalRewards(qs,stockAction[i])
best_index = np.argmax(stock)
randaction = stockAction[best_index]
#Compare R for SmartCrossEntropy action with remembered_optimal_policy and select the best
#if predictTotalRewards(qs,remembered_optimal_policy) > utility_possible_actions[best_sce_i]:
if predictTotalRewards(qs,remembered_optimal_policy) > predictTotalRewards(qs,randaction):
a = remembered_optimal_policy
mAP_Counts += 1
#print(" | selecting remembered_optimal_policy ",a)
else:
a = randaction
#print(" - selecting generated optimal policy ",a)
#a = remembered_optimal_policy
env.render()
qs_a = np.concatenate((qs,a), axis=0)
#get the target state and reward
s,r,done,info = env.step(a)
#record only the first x number of states
if step ==0:
gameSA[0] = qs_a
gameS[0] = qs
gameR[0] = np.array([r])
gameA[0] = np.array([r])
else:
gameSA= np.vstack((gameSA, qs_a))
gameS= np.vstack((gameS, qs))
gameR = np.vstack((gameR, np.array([r])))
gameA = np.vstack((gameA, np.array([a])))
if step > max_steps:
done = True
if done :
tempGameSA = np.zeros(shape=(1,num_env_variables+num_env_actions))
tempGameS = np.zeros(shape=(1,num_env_variables))
tempGameA = np.zeros(shape=(1,num_env_actions))
tempGameR = np.zeros(shape=(1,1))
tempGameRR = np.zeros(shape=(1,1))
#Calculate Q values from end to start of game
#mstats.append(step)
for i in range(0,gameR.shape[0]):
#print("Updating total_reward at game epoch ",(gameY.shape[0]-1) - i)
if i==0:
#print("reward at the last step ",gameY[(gameY.shape[0]-1)-i][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]
else:
#print("local error before Bellman", gameY[(gameY.shape[0]-1)-i][0],"Next error ", gameY[(gameY.shape[0]-1)-i+1][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]+b_discount*gameR[(gameR.shape[0]-1)-i+1][0]
#print("reward at step",i,"away from the end is",gameY[(gameY.shape[0]-1)-i][0])
if memoryR.shape[0] ==1:
memorySA = gameSA
memoryR = gameR
memoryA = gameA
memoryS = gameS
memoryRR = gameR
tempGameA = tempGameA[1:]
tempGameS = tempGameS[1:]
tempGameRR = tempGameRR[1:]
tempGameR = tempGameR[1:]
tempGameSA = tempGameSA[1:]
for i in range(gameR.shape[0]):
tempGameSA = np.vstack((tempGameSA,gameSA[i]))
tempGameR = np.vstack((tempGameR,gameR[i]))
#Add experience to memory
#memorySA = np.concatenate((memorySA,gameSA),axis=0)
#memoryR = np.concatenate((memoryR,gameR),axis=0)
#print("memoryR average", memoryR.mean(axis=0)[0])
for i in range(0,gameR.shape[0]):
pr = predictTotalRewards(gameS[i],gameA[i])
# if you did better than expected then add to memory
#if game > 3 and addToMemory(gameR[i][0], pr ,memoryRR.max(),memoryR.mean(axis=0)[0],gameR.mean(axis=0)[0]):
if game > 3 and addToMemory(gameR[i][0], pr,memoryRR.max(),memoryR.mean(axis=0)[0],gameR.mean(axis=0)[0]):
tempGameA = np.vstack((tempGameA,gameA[i]))
tempGameS = np.vstack((tempGameS,gameS[i]))
tempGameRR = np.vstack((tempGameRR,gameR[i]))
if memoryR.shape[0] ==1:
memoryA = tempGameA
memoryS = tempGameS
memoryRR = tempGameRR
memoryR = tempGameR
memorySA = tempGameSA
else:
#Add experience to memory
memoryS = np.concatenate((memoryS,tempGameS),axis=0)
memoryRR = np.concatenate((memoryRR,tempGameRR),axis=0)
memoryA = np.concatenate((memoryA,tempGameA),axis=0)
memorySA = np.concatenate((memorySA,tempGameSA),axis=0)
memoryR = np.concatenate((memoryR,tempGameR),axis=0)
#if memory is full remove first element
if np.alen(memoryR) >= max_memory_len:
memorySA = memorySA[gameR.shape[0]:]
memoryR = memoryR[gameR.shape[0]:]
if np.alen(memoryA) >= sm_memory_size:
memoryA = memoryA[int(sm_memory_size/10):]
memoryS = memoryS[int(sm_memory_size/10):]
memoryRR = memoryRR[int(sm_memory_size/10):]
#Update the states
qs=s
#Retrain every X failures after num_initial_observation
if done and game >= num_initial_observation and do_training and game >= 5:
if game%2 == 0:
if game%25 == 0:
print("Training game# ", game,"momory size", memorySA.shape[0])
tSA = (memorySA)
tR = (memoryR)
tX = (memoryS)
tY = (memoryA)
#sw = (memoryAdv)
train_Q = np.random.randint(tR.shape[0],size=experience_replay_size)
train_A = np.random.randint(tY.shape[0],size=int(experience_replay_size/3))
tX = tX[train_A,:]
tY = tY[train_A,:]
#sw = sw[train_idx,:]
tR = tR[train_Q,:]
tSA = tSA[train_Q,:]
#training Reward predictor model
Qmodel.fit(tSA,tR, batch_size=mini_batch,epochs=training_epochs,verbose=0)
#training action predictor model
action_predictor_model.fit(tX,tY, batch_size=mini_batch, epochs=training_epochs,verbose=0)
if done and game >= num_initial_observation:
if save_weights and game%20 == 0 and game >35:
#Save model
#print("Saving weights")
Qmodel.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
if save_memory_arrays and game%20 == 0 and game >35:
np.save(version_name+'memorySA.npy',memorySA)
np.save(version_name+'memoryRR.npy',memoryRR)
np.save(version_name+'memoryS.npy',memoryS)
np.save(version_name+'memoryA.npy',memoryA)
np.save(version_name+'memoryR.npy',memoryR)
if done:
if game%5==0:
print("Training Game #",game,"last everage",memoryR[:-1000].mean(),"percent AP picks", mAP_Counts/step*100 ,"game mean",gameR.mean(),"memoryR",memoryR.shape[0], "SelectiveMem Size ",memoryRR.shape[0],"Selective Mem mean",memoryRR.mean(axis=0)[0], " steps = ", step )
if game%5 ==0 and np.alen(memoryR)>1000:
mGames.append(game)
mSteps.append(step/1000*100)
mAPPicks.append(mAP_Counts/step*100)
mAverageScores.append(max(memoryR[:-1000].mean(), -40)/60*100)
bar_chart = pygal.HorizontalLine()
bar_chart.x_labels = map(str, mGames) # Then create a bar graph object
bar_chart.add('Average score', mAverageScores) # Add some values
bar_chart.add('percent actor picks ', mAPPicks) # Add some values
bar_chart.add('percent steps complete ', mSteps) # Add some values
bar_chart.render_to_file(version_name+'Performance2_bar_chart.svg')
'''
#Game won conditions
if step > 197:
print("Game ", game," WON *** " )
else:
print("Game ",game," ended with positive reward ")
#Game ended - Break
'''
break
plt.plot(mstats)
plt.show()
if save_weights:
#Save model
print("Saving weights")
Qmodel.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
| 36.455285
| 286
| 0.602085
|
9327fa7c001140ba644d5b24e2d26627e1d3ca08
| 972
|
py
|
Python
|
Binary-Tree/Binary-Tree-master/Python Codes/rangeSum.py
|
SrijaniSom/dsa-code-store
|
148292c8f963214629f271ec8601e73d3d0e145e
|
[
"MIT"
] | 3
|
2021-02-19T07:09:46.000Z
|
2021-10-04T10:12:45.000Z
|
Binary-Tree/Binary-Tree-master/Python Codes/rangeSum.py
|
SrijaniSom/dsa-code-store
|
148292c8f963214629f271ec8601e73d3d0e145e
|
[
"MIT"
] | 6
|
2021-02-21T19:35:18.000Z
|
2021-05-06T11:51:37.000Z
|
Binary-Tree/Binary-Tree-master/Python Codes/rangeSum.py
|
SrijaniSom/dsa-code-store
|
148292c8f963214629f271ec8601e73d3d0e145e
|
[
"MIT"
] | 6
|
2021-02-21T19:28:03.000Z
|
2021-10-04T03:35:57.000Z
|
class Node:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
def insert(self, val):
if self.val:
if val < self.val:
if self.left is None:
self.left = Node(val)
else:
self.left.insert(val)
else:
if self.right is None:
self.right = Node(val)
else:
self.right.insert(val)
else:
self.val = val
class BinaryTreeAlgorithms:
def rangeSumBST(self, root,L,R):
if(root == None):
return 0
sum1 = 0; sum2 = 0
if(root.left):
sum1 = self.rangeSumBST(root.left,L,R)
if(root.right):
sum2 = self.rangeSumBST(root.right,L,R)
if((root.val >= L )and (root.val <= R)):
return root.val + sum1 + sum2
else:
return sum1 + sum2
| 27
| 51
| 0.452675
|
8a80a770597dba7d4cfc11258d28aa44eb9cd57a
| 306
|
py
|
Python
|
constants.py
|
xianc78/Tank-Game
|
bc1267874d3d01242b74463b57ad89fd358dfb13
|
[
"Zlib"
] | null | null | null |
constants.py
|
xianc78/Tank-Game
|
bc1267874d3d01242b74463b57ad89fd358dfb13
|
[
"Zlib"
] | null | null | null |
constants.py
|
xianc78/Tank-Game
|
bc1267874d3d01242b74463b57ad89fd358dfb13
|
[
"Zlib"
] | null | null | null |
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREY = (128, 128, 128) # Alright. Is it spelled as gray or grey?
BLUE = (0, 0, 255)
# SCREEN_SIZES
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
# TILE SIZES
TILE_SIZE = (32, 32)
# Frames Per Second
FPS = 40
# Game title
TITLE = "Tank Game"
| 16.105263
| 64
| 0.637255
|
dc4ab655d55149a37ab4369336ff3a16b066d926
| 2,986
|
py
|
Python
|
src/rawsalad/papi/urls.py
|
CCLab/Raw-Salad
|
1ec028985e2b910aca31302fb57ed0677778756e
|
[
"BSD-3-Clause"
] | null | null | null |
src/rawsalad/papi/urls.py
|
CCLab/Raw-Salad
|
1ec028985e2b910aca31302fb57ed0677778756e
|
[
"BSD-3-Clause"
] | null | null | null |
src/rawsalad/papi/urls.py
|
CCLab/Raw-Salad
|
1ec028985e2b910aca31302fb57ed0677778756e
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls.defaults import *
# URLs for search: temporary update by Denis Kolokol, marked with comment "DK"
urlpatterns = patterns( 'papi.papi',
(r'^$', 'get_formats' ),
(r'^(?P<serializer>[a-z]+)/$', 'get_datasets' ),
(r'^(?P<serializer>[a-z]+)/search/$', 'search_data' ), # DK
(r'^(?P<serializer>[a-z]+)/meta/$', 'get_datasets_meta' ),
(r'^(?P<serializer>[a-z]+)/dataset/$', 'get_datasets' ),
(r'^(?P<serializer>[a-z]+)/dataset/search/$', 'search_data' ), # DK
(r'^(?P<serializer>[a-z]+)/dataset/meta/$', 'get_datasets_meta' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/$', 'get_views' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/search/$', 'search_data' ), # DK
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/meta/$', 'get_views_meta' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/$', 'get_views' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/search/$', 'search_data' ), # DK
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/meta/$', 'get_views_meta' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/$', 'get_issues' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/search/$', 'search_data' ), # DK
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/meta/$', 'get_issues_meta' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/$', 'get_issues' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/search/$', 'search_data' ), # DK
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/meta/$', 'get_issues_meta' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/search/$', 'search_data' ), # DK
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/meta/$', 'get_metadata' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/tree/$', 'get_tree' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/$', 'get_data' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/(?P<path>[0-9a-zA-Z/\-]*)/meta/$', 'get_metadata' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/(?P<path>[0-9a-zA-Z/\-]*)/tree/$', 'get_tree' ),
(r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/(?P<path>[0-9a-zA-Z/\-\+(AND|TO)\[\]]*)/$', 'get_data' ),
# (r'^(?P<serializer>[a-z]+)/dataset/(?P<dataset_idef>\d+)/view/(?P<view_idef>\d+)/issue/(?P<issue>\d+)/(?P<path>([a-z]+/|[0-9\-\+(AND|TO)\[\]]*/)+)$', 'get_data' ),
)
| 78.578947
| 168
| 0.573677
|
58fa362db24e44f9e2d65d44d79f339231de4743
| 1,375
|
py
|
Python
|
miscset/__init__.py
|
setempler/miscset.py
|
312fa3e4def0224d9337302bbdbe2eba1d40182e
|
[
"MIT"
] | null | null | null |
miscset/__init__.py
|
setempler/miscset.py
|
312fa3e4def0224d9337302bbdbe2eba1d40182e
|
[
"MIT"
] | null | null | null |
miscset/__init__.py
|
setempler/miscset.py
|
312fa3e4def0224d9337302bbdbe2eba1d40182e
|
[
"MIT"
] | null | null | null |
# miscset
"""Main module and public API.
Version
-------
The library version can be identified by the `version` object.
.. exec_code::
:caption: Example code:
:caption_output: Result:
import miscset
print(miscset.version)
Direct Imports
--------------
The module imports to all submodules relevant for public usage,
so that a direct import is not necessary. This allows:
.. exec_code::
:caption: Example code:
:caption_output: Result:
import miscset
print(miscset.sh.run)
Logging
-------
Defines a default :py:mod:`logging` handler as a
:py:class:`logging.NullHandler` to allow usage of loggers
in methods of this package.
The handler can be redefined by a custom python module
importing methods from `miscset` and to custom logs:
.. exec_code::
:caption: Example code:
:caption_output: Result:
import logging
import miscset
handler = logging.StreamHandler()
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# this command prints now any debug messages using the log handler specified above
out = miscset.sh.run("echo hello")
print(out)
"""
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
from . import dt
from . import io
from . import sh
from . import files
from . import tables
from ._version import version
| 19.927536
| 86
| 0.711273
|
90a1603c26a43d2900ddeec655a90ee92a0dc991
| 22,406
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_compute_management_client_enums.py
|
dmarx/azure-sdk-for-python
|
86ac35b947c0ed3d5edb1cac03f5ad20a34a6fda
|
[
"MIT"
] | 1
|
2021-09-07T18:43:20.000Z
|
2021-09-07T18:43:20.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_compute_management_client_enums.py
|
dmarx/azure-sdk-for-python
|
86ac35b947c0ed3d5edb1cac03f5ad20a34a6fda
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_compute_management_client_enums.py
|
msyyc/azure-sdk-for-python
|
e2dba75181f8b4336ae57e75aa391322c12c3123
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AvailabilitySetSkuTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the sku of an Availability Set. Use 'Aligned' for virtual machines with managed disks
and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
"""
CLASSIC = "Classic"
ALIGNED = "Aligned"
class CachingTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**
"""
NONE = "None"
READ_ONLY = "ReadOnly"
READ_WRITE = "ReadWrite"
class DedicatedHostLicenseTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the software license type that will be applied to the VMs deployed on the dedicated
host. :code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **None**
:code:`<br>`:code:`<br>` **Windows_Server_Hybrid** :code:`<br>`:code:`<br>`
**Windows_Server_Perpetual** :code:`<br>`:code:`<br>` Default: **None**
"""
NONE = "None"
WINDOWS_SERVER_HYBRID = "Windows_Server_Hybrid"
WINDOWS_SERVER_PERPETUAL = "Windows_Server_Perpetual"
class DiffDiskOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the ephemeral disk option for operating system disk.
"""
LOCAL = "Local"
class DiffDiskPlacement(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the ephemeral disk placement for operating system disk. This property can be used by
user in the request to choose the location i.e, cache disk or resource disk space for Ephemeral
OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer
Ephemeral OS disk size requirements for Windows VM at https://docs.microsoft.com/en-
us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VM at
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-
requirements
"""
CACHE_DISK = "CacheDisk"
RESOURCE_DISK = "ResourceDisk"
class DiskCreateOptionTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies how the virtual machine should be created.:code:`<br>`:code:`<br>` Possible values
are::code:`<br>`:code:`<br>` **Attach** \u2013 This value is used when you are using a
specialized disk to create the virtual machine.:code:`<br>`:code:`<br>` **FromImage** \u2013
This value is used when you are using an image to create the virtual machine. If you are using
a platform image, you also use the imageReference element described above. If you are using a
marketplace image, you also use the plan element previously described.
"""
FROM_IMAGE = "FromImage"
EMPTY = "Empty"
ATTACH = "Attach"
class ExecutionState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Script execution status.
"""
UNKNOWN = "Unknown"
PENDING = "Pending"
RUNNING = "Running"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CANCELED = "Canceled"
class HyperVGenerationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the HyperVGeneration Type associated with a resource
"""
V1 = "V1"
V2 = "V2"
class HyperVGenerationTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the HyperVGeneration Type
"""
V1 = "V1"
V2 = "V2"
class InGuestPatchMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the mode of in-guest patching to IaaS virtual machine.:code:`<br />`:code:`<br />`
Possible values are::code:`<br />`:code:`<br />` **Manual** - You control the application of
patches to a virtual machine. You do this by applying patches manually inside the VM. In this
mode, automatic updates are disabled; the property WindowsConfiguration.enableAutomaticUpdates
must be false:code:`<br />`:code:`<br />` **AutomaticByOS** - The virtual machine will
automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates
must be true. :code:`<br />`:code:`<br />` ** AutomaticByPlatform** - the virtual machine will
automatically updated by the platform. The properties provisionVMAgent and
WindowsConfiguration.enableAutomaticUpdates must be true
"""
MANUAL = "Manual"
AUTOMATIC_BY_OS = "AutomaticByOS"
AUTOMATIC_BY_PLATFORM = "AutomaticByPlatform"
class IntervalInMins(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Interval value in minutes used to create LogAnalytics call rate logs.
"""
THREE_MINS = "ThreeMins"
FIVE_MINS = "FiveMins"
THIRTY_MINS = "ThirtyMins"
SIXTY_MINS = "SixtyMins"
class IPVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Available from Api-Version 2017-03-30 onwards, it represents whether the specific
ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and
'IPv6'.
"""
I_PV4 = "IPv4"
I_PV6 = "IPv6"
class MaintenanceOperationResultCodeTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Last Maintenance Operation Result Code.
"""
NONE = "None"
RETRY_LATER = "RetryLater"
MAINTENANCE_ABORTED = "MaintenanceAborted"
MAINTENANCE_COMPLETED = "MaintenanceCompleted"
class OperatingSystemStateTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The OS State.
"""
GENERALIZED = "Generalized" #: Generalized image. Needs to be provisioned during deployment time.
SPECIALIZED = "Specialized" #: Specialized image. Contains already provisioned OS Disk.
class OperatingSystemTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The operating system of the osDiskImage.
"""
WINDOWS = "Windows"
LINUX = "Linux"
class OrchestrationServiceNames(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The name of the service.
"""
AUTOMATIC_REPAIRS = "AutomaticRepairs"
DUMMY_ORCHESTRATION_SERVICE_NAME = "DummyOrchestrationServiceName"
class OrchestrationServiceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the service.
"""
NOT_RUNNING = "NotRunning"
RUNNING = "Running"
SUSPENDED = "Suspended"
class OrchestrationServiceStateAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The action to be performed.
"""
RESUME = "Resume"
SUSPEND = "Suspend"
class PatchAssessmentState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes the outcome of an install operation for a given patch.
"""
INSTALLED = "Installed"
FAILED = "Failed"
EXCLUDED = "Excluded"
NOT_SELECTED = "NotSelected"
PENDING = "Pending"
AVAILABLE = "Available"
class PatchOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The overall success or failure status of the operation. It remains "InProgress" until the
operation completes. At that point it will become "Failed", "Succeeded", or
"CompletedWithWarnings."
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
COMPLETED_WITH_WARNINGS = "CompletedWithWarnings"
class ProtocolTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the protocol of WinRM listener. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`\ **http** :code:`<br>`:code:`<br>` **https**
"""
HTTP = "Http"
HTTPS = "Https"
class ProximityPlacementGroupType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the type of the proximity placement group. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **Standard** : Co-locate resources within an Azure region or
Availability Zone. :code:`<br>`:code:`<br>` **Ultra** : For future use.
"""
STANDARD = "Standard"
ULTRA = "Ultra"
class RebootStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reboot status of the machine after the patch operation. It will be in "NotNeeded" status if
reboot is not needed after the patch operation. "Required" will be the status once the patch is
applied and machine is required to reboot. "Started" will be the reboot status when the machine
has started to reboot. "Failed" will be the status if the machine is failed to reboot.
"Completed" will be the status once the machine is rebooted successfully
"""
NOT_NEEDED = "NotNeeded"
REQUIRED = "Required"
STARTED = "Started"
FAILED = "Failed"
COMPLETED = "Completed"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned'
includes both an implicitly created identity and a set of user assigned identities. The type
'None' will remove any identities from the virtual machine.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class RollingUpgradeActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The last action performed on the rolling upgrade.
"""
START = "Start"
CANCEL = "Cancel"
class RollingUpgradeStatusCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Code indicating the current status of the upgrade.
"""
ROLLING_FORWARD = "RollingForward"
CANCELLED = "Cancelled"
COMPLETED = "Completed"
FAULTED = "Faulted"
class SettingNames(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the name of the setting to which the content applies. Possible values are:
FirstLogonCommands and AutoLogon.
"""
AUTO_LOGON = "AutoLogon"
FIRST_LOGON_COMMANDS = "FirstLogonCommands"
class SoftwareUpdateRebootBehavior(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes the reboot requirements of the patch.
"""
NEVER_REBOOTS = "NeverReboots"
ALWAYS_REQUIRES_REBOOT = "AlwaysRequiresReboot"
CAN_REQUEST_REBOOT = "CanRequestReboot"
class StatusLevelTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The level code.
"""
INFO = "Info"
WARNING = "Warning"
ERROR = "Error"
class StorageAccountTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the storage account type for the managed disk. Managed OS disk storage account type
can only be set when you create the scale set. NOTE: UltraSSD_LRS can only be used with data
disks. It cannot be used with OS Disk. Standard_LRS uses Standard HDD. StandardSSD_LRS uses
Standard SSD. Premium_LRS uses Premium SSD. UltraSSD_LRS uses Ultra disk. For more information
regarding disks supported for Windows Virtual Machines, refer to https://docs.microsoft.com/en-
us/azure/virtual-machines/windows/disks-types and, for Linux Virtual Machines, refer to
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-types
"""
STANDARD_LRS = "Standard_LRS"
PREMIUM_LRS = "Premium_LRS"
STANDARD_SSD_LRS = "StandardSSD_LRS"
ULTRA_SSD_LRS = "UltraSSD_LRS"
class UpgradeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the mode of an upgrade to virtual machines in the scale set.:code:`<br />`:code:`<br
/>` Possible values are::code:`<br />`:code:`<br />` **Manual** - You control the application
of updates to virtual machines in the scale set. You do this by using the manualUpgrade
action.:code:`<br />`:code:`<br />` **Automatic** - All virtual machines in the scale set are
automatically updated at the same time.
"""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
ROLLING = "Rolling"
class UpgradeOperationInvoker(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Invoker of the Upgrade Operation
"""
UNKNOWN = "Unknown"
USER = "User"
PLATFORM = "Platform"
class UpgradeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Code indicating the current status of the upgrade.
"""
ROLLING_FORWARD = "RollingForward"
CANCELLED = "Cancelled"
COMPLETED = "Completed"
FAULTED = "Faulted"
class VirtualMachineEvictionPolicyTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the eviction policy for the Azure Spot VM/VMSS
"""
DEALLOCATE = "Deallocate"
DELETE = "Delete"
class VirtualMachinePriorityTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the priority for a standalone virtual machine or the virtual machines in the scale
set. :code:`<br>`:code:`<br>` 'Low' enum will be deprecated in the future, please use 'Spot' as
the enum to deploy Azure Spot VM/VMSS.
"""
REGULAR = "Regular"
LOW = "Low"
SPOT = "Spot"
class VirtualMachineScaleSetScaleInRules(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "Default"
OLDEST_VM = "OldestVM"
NEWEST_VM = "NewestVM"
class VirtualMachineScaleSetSkuScaleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The scale type applicable to the sku.
"""
AUTOMATIC = "Automatic"
NONE = "None"
class VirtualMachineSizeTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the size of the virtual machine. For more information about virtual machine sizes,
see `Sizes for virtual machines <https://docs.microsoft.com/en-us/azure/virtual-
machines/sizes>`_. :code:`<br>`:code:`<br>` The available VM sizes depend on region and
availability set. For a list of available sizes use these APIs: :code:`<br>`:code:`<br>` `List
all available virtual machine sizes in an availability set
<https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes in a region
<https://docs.microsoft.com/en-us/rest/api/compute/resourceskus/list>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes for resizing
<https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes>`_.
:code:`<br>`:code:`<br>` This list of sizes is no longer updated and the
**VirtualMachineSizeTypes** string constants will be removed from the subsequent REST API
specification. Use `List all available virtual machine sizes in a region
<https://docs.microsoft.com/en-us/rest/api/compute/resourceskus/list>`_ to get the latest
sizes.
"""
BASIC_A0 = "Basic_A0"
BASIC_A1 = "Basic_A1"
BASIC_A2 = "Basic_A2"
BASIC_A3 = "Basic_A3"
BASIC_A4 = "Basic_A4"
STANDARD_A0 = "Standard_A0"
STANDARD_A1 = "Standard_A1"
STANDARD_A2 = "Standard_A2"
STANDARD_A3 = "Standard_A3"
STANDARD_A4 = "Standard_A4"
STANDARD_A5 = "Standard_A5"
STANDARD_A6 = "Standard_A6"
STANDARD_A7 = "Standard_A7"
STANDARD_A8 = "Standard_A8"
STANDARD_A9 = "Standard_A9"
STANDARD_A10 = "Standard_A10"
STANDARD_A11 = "Standard_A11"
STANDARD_A1_V2 = "Standard_A1_v2"
STANDARD_A2_V2 = "Standard_A2_v2"
STANDARD_A4_V2 = "Standard_A4_v2"
STANDARD_A8_V2 = "Standard_A8_v2"
STANDARD_A2_M_V2 = "Standard_A2m_v2"
STANDARD_A4_M_V2 = "Standard_A4m_v2"
STANDARD_A8_M_V2 = "Standard_A8m_v2"
STANDARD_B1_S = "Standard_B1s"
STANDARD_B1_MS = "Standard_B1ms"
STANDARD_B2_S = "Standard_B2s"
STANDARD_B2_MS = "Standard_B2ms"
STANDARD_B4_MS = "Standard_B4ms"
STANDARD_B8_MS = "Standard_B8ms"
STANDARD_D1 = "Standard_D1"
STANDARD_D2 = "Standard_D2"
STANDARD_D3 = "Standard_D3"
STANDARD_D4 = "Standard_D4"
STANDARD_D11 = "Standard_D11"
STANDARD_D12 = "Standard_D12"
STANDARD_D13 = "Standard_D13"
STANDARD_D14 = "Standard_D14"
STANDARD_D1_V2 = "Standard_D1_v2"
STANDARD_D2_V2 = "Standard_D2_v2"
STANDARD_D3_V2 = "Standard_D3_v2"
STANDARD_D4_V2 = "Standard_D4_v2"
STANDARD_D5_V2 = "Standard_D5_v2"
STANDARD_D2_V3 = "Standard_D2_v3"
STANDARD_D4_V3 = "Standard_D4_v3"
STANDARD_D8_V3 = "Standard_D8_v3"
STANDARD_D16_V3 = "Standard_D16_v3"
STANDARD_D32_V3 = "Standard_D32_v3"
STANDARD_D64_V3 = "Standard_D64_v3"
STANDARD_D2_S_V3 = "Standard_D2s_v3"
STANDARD_D4_S_V3 = "Standard_D4s_v3"
STANDARD_D8_S_V3 = "Standard_D8s_v3"
STANDARD_D16_S_V3 = "Standard_D16s_v3"
STANDARD_D32_S_V3 = "Standard_D32s_v3"
STANDARD_D64_S_V3 = "Standard_D64s_v3"
STANDARD_D11_V2 = "Standard_D11_v2"
STANDARD_D12_V2 = "Standard_D12_v2"
STANDARD_D13_V2 = "Standard_D13_v2"
STANDARD_D14_V2 = "Standard_D14_v2"
STANDARD_D15_V2 = "Standard_D15_v2"
STANDARD_DS1 = "Standard_DS1"
STANDARD_DS2 = "Standard_DS2"
STANDARD_DS3 = "Standard_DS3"
STANDARD_DS4 = "Standard_DS4"
STANDARD_DS11 = "Standard_DS11"
STANDARD_DS12 = "Standard_DS12"
STANDARD_DS13 = "Standard_DS13"
STANDARD_DS14 = "Standard_DS14"
STANDARD_DS1_V2 = "Standard_DS1_v2"
STANDARD_DS2_V2 = "Standard_DS2_v2"
STANDARD_DS3_V2 = "Standard_DS3_v2"
STANDARD_DS4_V2 = "Standard_DS4_v2"
STANDARD_DS5_V2 = "Standard_DS5_v2"
STANDARD_DS11_V2 = "Standard_DS11_v2"
STANDARD_DS12_V2 = "Standard_DS12_v2"
STANDARD_DS13_V2 = "Standard_DS13_v2"
STANDARD_DS14_V2 = "Standard_DS14_v2"
STANDARD_DS15_V2 = "Standard_DS15_v2"
STANDARD_DS13_4_V2 = "Standard_DS13-4_v2"
STANDARD_DS13_2_V2 = "Standard_DS13-2_v2"
STANDARD_DS14_8_V2 = "Standard_DS14-8_v2"
STANDARD_DS14_4_V2 = "Standard_DS14-4_v2"
STANDARD_E2_V3 = "Standard_E2_v3"
STANDARD_E4_V3 = "Standard_E4_v3"
STANDARD_E8_V3 = "Standard_E8_v3"
STANDARD_E16_V3 = "Standard_E16_v3"
STANDARD_E32_V3 = "Standard_E32_v3"
STANDARD_E64_V3 = "Standard_E64_v3"
STANDARD_E2_S_V3 = "Standard_E2s_v3"
STANDARD_E4_S_V3 = "Standard_E4s_v3"
STANDARD_E8_S_V3 = "Standard_E8s_v3"
STANDARD_E16_S_V3 = "Standard_E16s_v3"
STANDARD_E32_S_V3 = "Standard_E32s_v3"
STANDARD_E64_S_V3 = "Standard_E64s_v3"
STANDARD_E32_16_V3 = "Standard_E32-16_v3"
STANDARD_E32_8_S_V3 = "Standard_E32-8s_v3"
STANDARD_E64_32_S_V3 = "Standard_E64-32s_v3"
STANDARD_E64_16_S_V3 = "Standard_E64-16s_v3"
STANDARD_F1 = "Standard_F1"
STANDARD_F2 = "Standard_F2"
STANDARD_F4 = "Standard_F4"
STANDARD_F8 = "Standard_F8"
STANDARD_F16 = "Standard_F16"
STANDARD_F1_S = "Standard_F1s"
STANDARD_F2_S = "Standard_F2s"
STANDARD_F4_S = "Standard_F4s"
STANDARD_F8_S = "Standard_F8s"
STANDARD_F16_S = "Standard_F16s"
STANDARD_F2_S_V2 = "Standard_F2s_v2"
STANDARD_F4_S_V2 = "Standard_F4s_v2"
STANDARD_F8_S_V2 = "Standard_F8s_v2"
STANDARD_F16_S_V2 = "Standard_F16s_v2"
STANDARD_F32_S_V2 = "Standard_F32s_v2"
STANDARD_F64_S_V2 = "Standard_F64s_v2"
STANDARD_F72_S_V2 = "Standard_F72s_v2"
STANDARD_G1 = "Standard_G1"
STANDARD_G2 = "Standard_G2"
STANDARD_G3 = "Standard_G3"
STANDARD_G4 = "Standard_G4"
STANDARD_G5 = "Standard_G5"
STANDARD_GS1 = "Standard_GS1"
STANDARD_GS2 = "Standard_GS2"
STANDARD_GS3 = "Standard_GS3"
STANDARD_GS4 = "Standard_GS4"
STANDARD_GS5 = "Standard_GS5"
STANDARD_GS4_8 = "Standard_GS4-8"
STANDARD_GS4_4 = "Standard_GS4-4"
STANDARD_GS5_16 = "Standard_GS5-16"
STANDARD_GS5_8 = "Standard_GS5-8"
STANDARD_H8 = "Standard_H8"
STANDARD_H16 = "Standard_H16"
STANDARD_H8_M = "Standard_H8m"
STANDARD_H16_M = "Standard_H16m"
STANDARD_H16_R = "Standard_H16r"
STANDARD_H16_MR = "Standard_H16mr"
STANDARD_L4_S = "Standard_L4s"
STANDARD_L8_S = "Standard_L8s"
STANDARD_L16_S = "Standard_L16s"
STANDARD_L32_S = "Standard_L32s"
STANDARD_M64_S = "Standard_M64s"
STANDARD_M64_MS = "Standard_M64ms"
STANDARD_M128_S = "Standard_M128s"
STANDARD_M128_MS = "Standard_M128ms"
STANDARD_M64_32_MS = "Standard_M64-32ms"
STANDARD_M64_16_MS = "Standard_M64-16ms"
STANDARD_M128_64_MS = "Standard_M128-64ms"
STANDARD_M128_32_MS = "Standard_M128-32ms"
STANDARD_NC6 = "Standard_NC6"
STANDARD_NC12 = "Standard_NC12"
STANDARD_NC24 = "Standard_NC24"
STANDARD_NC24_R = "Standard_NC24r"
STANDARD_NC6_S_V2 = "Standard_NC6s_v2"
STANDARD_NC12_S_V2 = "Standard_NC12s_v2"
STANDARD_NC24_S_V2 = "Standard_NC24s_v2"
STANDARD_NC24_RS_V2 = "Standard_NC24rs_v2"
STANDARD_NC6_S_V3 = "Standard_NC6s_v3"
STANDARD_NC12_S_V3 = "Standard_NC12s_v3"
STANDARD_NC24_S_V3 = "Standard_NC24s_v3"
STANDARD_NC24_RS_V3 = "Standard_NC24rs_v3"
STANDARD_ND6_S = "Standard_ND6s"
STANDARD_ND12_S = "Standard_ND12s"
STANDARD_ND24_S = "Standard_ND24s"
STANDARD_ND24_RS = "Standard_ND24rs"
STANDARD_NV6 = "Standard_NV6"
STANDARD_NV12 = "Standard_NV12"
STANDARD_NV24 = "Standard_NV24"
class VmDiskTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""VM disk types which are disallowed.
"""
NONE = "None"
UNMANAGED = "Unmanaged"
| 39.656637
| 102
| 0.716505
|
4d8c1cd368b833620328eabb826e9c145ba82e26
| 455
|
py
|
Python
|
microservices/services/apis/serializers.py
|
imohitawasthi/agile-tracker
|
9b4ded9dd3394cce3b0917a6972c214919f1d721
|
[
"MIT"
] | null | null | null |
microservices/services/apis/serializers.py
|
imohitawasthi/agile-tracker
|
9b4ded9dd3394cce3b0917a6972c214919f1d721
|
[
"MIT"
] | null | null | null |
microservices/services/apis/serializers.py
|
imohitawasthi/agile-tracker
|
9b4ded9dd3394cce3b0917a6972c214919f1d721
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from . import models
class AtBucketSerializerV1(serializers.ModelSerializer):
class Meta:
model=models.AtBucketV1
fields = "__all__"
class AtTaskSerializerV1(serializers.ModelSerializer):
class Meta:
model=models.AtTaskV1
fields = "__all__"
class AtTaskSerializerV1_1(serializers.ModelSerializer):
class Meta:
model=models.AtTaskV1_1
fields = "__all__"
| 25.277778
| 56
| 0.723077
|
ecb6bfacd0dc3edbdcf61f165c4be16293b1b849
| 5,979
|
py
|
Python
|
felica/kururu_reader.py
|
thinkAmi-sandbox/nfcpy-sample
|
06daa02caee3bc26a074c9f4cf016aab27a7e549
|
[
"Unlicense"
] | null | null | null |
felica/kururu_reader.py
|
thinkAmi-sandbox/nfcpy-sample
|
06daa02caee3bc26a074c9f4cf016aab27a7e549
|
[
"Unlicense"
] | null | null | null |
felica/kururu_reader.py
|
thinkAmi-sandbox/nfcpy-sample
|
06daa02caee3bc26a074c9f4cf016aab27a7e549
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# 以下を参考にKURURUを読みました。m2wasabiさん、ありがとうございます。
# https://github.com/m2wasabi/nfcpy-suica-sample/blob/master/suica_read.py
import struct
import textwrap
import nfc
import nfc.tag.tt3
KURURU_SERVICE_CODE = 0x000f
class HistoryRecord(object):
def __init__(self, data):
# ビッグエンディアンでバイト列を解釈したもの
# 1byteで構成するものはB、2byteで構成するものはH、4byteで構成するものはIとなる
self.row_be = struct.unpack('>HBHBHHBBI', data)
# リトルエンディアンでバイト列を解釈したもの(くるるの場合は存在せず)
self.row_le = struct.unpack('<HBHBHHBBI', data)
def is_empty(self):
# 年月日がオールゼロの場合、履歴が無い空のレコードとみなす
return not all([
self.fetch_year(),
self.fetch_month(),
self.fetch_day(),
])
def fetch_year(self):
# 年: 16bit中、上位7bitを取得すればよい
# 1. 8bit~16bitは不要なので、9bit右にシフトして捨てる
# 6988(10進)の場合、bin(6988 >> 9)とすると、 '0b1101' になる
# 2. 残ったbitと、年の算出で必要な7bitとで論理積を取る
# 0b1101と0b1111111(16進数だと0x7f)との論理積で、0b1101が残る
# -> 結果は、10進int表現の13になる
return (self.row_be[0] >> 9) & 0b1111111
def fetch_month(self):
# 月: 16bit中、先頭から8bit~11bitを取得すればよい
# 1. 12bit~16bitは不要なので、5bit右にシフトして捨てる
# 6988(10進)の場合、bin(6988 >> 5)とすると、 '0b11011010' になる
# 2. 残ったbitのうち、月の算出で必要な下位4bitとで論理積を取る
# 0b11011010と0b1111(16進数だと0x0f)との論理積で、0b1010が残る
# -> 結果は、10進int表現の10になる
return (self.row_be[0] >> 5) & 0b1111
def fetch_day(self):
# 日: 16bit中、下位4bitを取得すればよい
# 1. 下位4bitなので、不要な桁はない
# 6988(10進)の場合、bin(6988 >> 0)とすると、 もとの値のまま '0b1101101001100' となる
# なので、今回はシフト演算はしない
# 2. 残ったbitと、日の算出で必要な5bitとで論理積を取る
# 0b1101101001100と0b11111(16進数だと0x1f)との論理積で、0b1100が残る
# -> 結果は、10進int表現の12になる
return self.row_be[0] & 0b11111
def fetch_alighting_time(self):
return self.format_time(self.row_be[1])
def fetch_machine_no(self):
return self.row_be[2]
def fetch_boarding_time(self):
return self.format_time(self.row_be[3])
def fetch_boarding_stop(self):
return self.row_be[4]
def fetch_alighting_stop(self):
return self.row_be[5]
def fetch_place(self):
# 上位4bitが場所になるので、下位4bitを切り捨て、残りの4bitの論理積を取る
place = (self.row_be[6] >> 4) & 0b1111
# 値を見ると、16進のint型なので、16進のintをキーに値を取得する
# print type(place) # => int
# print hex(place) # => 0xe
# print place # => 14
# 辞書のキーは、Suica版に合わせて16進数表記としておく
result = {
0x05: '車内 ({})',
0x07: '営業所 ({})',
0x0E: '券売機 ({})',
}.get(place, '不明 ({})')
return result.format(hex(place))
def fetch_category(self):
# 下位4bitがカテゴリになるので、下位4bitの論理積を取る
category = self.row_be[6] & 0b1111
result = {
0x00: '入金 ({})',
0x02: '支払 ({})',
}.get(category, '不明 ({})')
return result.format(hex(category))
def fetch_company(self):
company = (self.row_be[7] >> 4) & 0b1111
result = {
0x00: '長電バス ({})',
0x03: 'アルピコバス ({})',
}.get(company, '不明 ({})')
return result.format(hex(company))
def fetch_discount(self):
discount = self.row_be[7] & 0b1111
result = {
0x00: '入金 ({})',
0x01: 'なし ({})',
}.get(discount, '不明 ({})')
return result.format(hex(discount))
def fetch_balance(self):
return self.row_be[8]
def format_time(self, usage_time):
# usage_timeは、10進のintに見えるが、実際には16進のint
# そのため、これを、10進のintにする必要がある
# 16進のintを16進表現の文字列にする
hex_time = hex(usage_time)
# 16進表現の文字列を10進数値にする
int_time = int(hex_time, 16)
# 1/10されているので、元に戻す
origin_time = int_time * 10
# 商(時間)と余り(分)を取得する
# 元々は分単位なので、時間単位にする
hm = divmod(origin_time, 60)
return '{hm[0]:02d}:{hm[1]:02d}:00'.format(hm=hm)
def connected(tag):
# ServiceCodeクラスのコンストラクタの引数について
# ・第一引数は、サービス番号 (サービスコードの上位10bit)
# 不要な下位6bitは捨てる
# ・第二引数は、属性値 (サービスコードの下位6bit)
# 2進数111111を16進数で表すと、3f (下位6bitの取り出しを論理積にしてるので、その部分が出てくる)
sc = nfc.tag.tt3.ServiceCode(KURURU_SERVICE_CODE >> 6, KURURU_SERVICE_CODE & 0x3f)
for i in range(0, 10):
bc = nfc.tag.tt3.BlockCode(i, service=0)
data = tag.read_without_encryption([sc], [bc, ])
history = HistoryRecord(bytes(data))
if history.is_empty():
continue
result = """
Block: {history_no}
日付: {yyyy}/{mm}/{dd}
機番: {machine}
乗車時刻: {boarding_time}
乗車停留所: {boarding_stop}
降車時刻: {alighting_time}
降車停留所: {alighting_stop}
場所: {place}
種別: {category}
会社: {company}
割引: {discount}
残高: {balance:,}円
""".format(
history_no=i + 1,
yyyy=history.fetch_year() + 2000,
mm='{:02d}'.format(history.fetch_month()),
dd='{:02d}'.format(history.fetch_day()),
machine=history.fetch_machine_no(),
boarding_time=history.fetch_boarding_time(),
boarding_stop=history.fetch_boarding_stop(),
alighting_time=history.fetch_alighting_time(),
alighting_stop=history.fetch_alighting_stop(),
place=history.fetch_place(),
category=history.fetch_category(),
company=history.fetch_company(),
discount=history.fetch_discount(),
balance=history.fetch_balance(),
)
print '-' * 30
print textwrap.dedent(result)
def main():
with nfc.ContactlessFrontend('usb') as clf:
clf.connect(rdwr={'on-connect': connected})
if __name__ == '__main__':
# 参考
# https://stackoverflow.com/questions/2611858/struct-error-unpack-requires-a-string-argument-of-length-4/2612851
f = struct.calcsize('=HBHBHHBBHH')
print 'フォーマットの桁数:{}'.format(f)
main()
| 31.140625
| 116
| 0.588727
|
be0e4b31e6b19169b1841796f614c1b33bd082cb
| 411
|
py
|
Python
|
main.py
|
freshskates/Graph-Theory
|
a93311d9453ffe986b5b8b82b3b34277b71d6602
|
[
"MIT"
] | null | null | null |
main.py
|
freshskates/Graph-Theory
|
a93311d9453ffe986b5b8b82b3b34277b71d6602
|
[
"MIT"
] | null | null | null |
main.py
|
freshskates/Graph-Theory
|
a93311d9453ffe986b5b8b82b3b34277b71d6602
|
[
"MIT"
] | null | null | null |
# from Nodes.node import Node
from linked_list.linkedlist import LinkedList
def main():
list_node = LinkedList()
list_node.add("data")
list_node.add("data1")
list_node.add("data2")
list_node.add("data3")
list_node.show()
print(list_node.vectorize())
list_node.remove(1)
list_node.remove(0)
list_node.remove(0)
list_node.show()
if __name__ == '__main__':
main()
| 21.631579
| 45
| 0.671533
|
25349641f6de6d6a7781623e9b135442bcf04a8a
| 234
|
py
|
Python
|
src/config.py
|
takeruadelbert/epass-barrier-gate
|
62af69bef52dddc6da74b74bd2fdaff1ee166988
|
[
"Unlicense"
] | null | null | null |
src/config.py
|
takeruadelbert/epass-barrier-gate
|
62af69bef52dddc6da74b74bd2fdaff1ee166988
|
[
"Unlicense"
] | null | null | null |
src/config.py
|
takeruadelbert/epass-barrier-gate
|
62af69bef52dddc6da74b74bd2fdaff1ee166988
|
[
"Unlicense"
] | null | null | null |
# Server Configuration
ip_address_server = "http://192.168.88.204"
url = "/epass2018/parking_outs/api_member_out"
timeout_connection = 30 # in second(s)
retry_connect = 3 # in second(s)
# HID Configuration
hid_name = "Sycreader RFID"
| 29.25
| 46
| 0.760684
|
c741aecf0d3f642004a16cf2c6626e3c22bdc189
| 2,827
|
py
|
Python
|
examples/model_hosting/scheduled_model/generate_equipment_data.py
|
dmivankov/cognite-python-docs
|
cc01b5ab6f3fe382e646d457427eb8fa6cd61ff0
|
[
"Apache-2.0"
] | null | null | null |
examples/model_hosting/scheduled_model/generate_equipment_data.py
|
dmivankov/cognite-python-docs
|
cc01b5ab6f3fe382e646d457427eb8fa6cd61ff0
|
[
"Apache-2.0"
] | null | null | null |
examples/model_hosting/scheduled_model/generate_equipment_data.py
|
dmivankov/cognite-python-docs
|
cc01b5ab6f3fe382e646d457427eb8fa6cd61ff0
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
from datetime import datetime, timedelta
from time import sleep
import pandas as pd
from cognite.client import CogniteClient
from cognite.client.data_classes.time_series import TimeSeries
client = CogniteClient()
NUMBER_OF_DATAPOINTS = 20000
prefix = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
def random_walk(min_val, max_val, num_of_points):
points = [random.randrange(min_val, max_val)]
for i in range(1, num_of_points):
move = 1 if random.random() > 0.5 else -1
point = points[i - 1] + move * random.random()
points.append(max(min(point, max_val), min_val))
return points
def fake_prod_rate(temp, pressure, rpm):
return (temp + pressure) - (rpm * 0.5)
def generate_data():
data = {}
one_day_ago = datetime.now() - timedelta(days=1)
one_day_ahead = datetime.now() + timedelta(days=1)
one_day_ago_ms = int(round(one_day_ago.timestamp() * 1000))
one_day_ahead_ms = int(round(one_day_ahead.timestamp() * 1000))
step = (one_day_ahead_ms - one_day_ago_ms) // NUMBER_OF_DATAPOINTS
timestamps = [timestamp for timestamp in range(one_day_ago_ms, one_day_ahead_ms, step)][:NUMBER_OF_DATAPOINTS]
data["timestamps"] = timestamps
data["{}_temp".format(prefix)] = random_walk(75, 125, NUMBER_OF_DATAPOINTS)
data["{}_pressure".format(prefix)] = random_walk(150, 300, NUMBER_OF_DATAPOINTS)
data["{}_rpm".format(prefix)] = random_walk(100, 200, NUMBER_OF_DATAPOINTS)
data["{}_production_rate".format(prefix)] = [
fake_prod_rate(
data["{}_temp".format(prefix)][i], data["{}_pressure".format(prefix)][i], data["{}_rpm".format(prefix)][i]
)
for i in range(NUMBER_OF_DATAPOINTS)
]
return data
def post_data(data):
time_series_to_post = [TimeSeries(name=name) for name in data if name != "timestamps"]
# Create a time series for the prediction output as well
time_series_to_post.append(TimeSeries(name="{}_predicted_prod_rate".format(prefix)))
client.time_series.create(time_series_to_post)
created_time_series = []
while len(created_time_series) != 5:
created_time_series = client.time_series.search(name=prefix)
sleep(0.5)
ts_dict = {"_".join(ts.name.split("_")[1:]): ts.id for ts in created_time_series}
print(ts_dict)
datapoints = []
for ts in created_time_series:
# Only add datapoints to the input time series, i.e. skip the predicted_prod_rate timeseries.
if ts.name.endswith("_predicted_prod_rate"):
continue
datapoints.append({"id": ts.id, "datapoints": list(zip(data["timestamps"], data[ts.name]))})
client.datapoints.insert_multiple(datapoints)
if __name__ == "__main__":
data = generate_data()
post_data(data)
| 35.3375
| 118
| 0.696852
|
80db16348b41171c43ec72280be405072511ed3b
| 6,222
|
py
|
Python
|
acq4/analysis/atlas/AuditoryCortex/CortexROI.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 1
|
2020-06-04T17:04:53.000Z
|
2020-06-04T17:04:53.000Z
|
acq4/analysis/atlas/AuditoryCortex/CortexROI.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 24
|
2016-09-27T17:25:24.000Z
|
2017-03-02T21:00:11.000Z
|
acq4/analysis/atlas/AuditoryCortex/CortexROI.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 4
|
2016-10-19T06:39:36.000Z
|
2019-09-30T21:06:45.000Z
|
from __future__ import print_function
import acq4.pyqtgraph as pg
from acq4.pyqtgraph.graphicsItems import ROI
from acq4.pyqtgraph.Point import Point
from acq4.util import Qt
import math
class CortexROI(ROI.PolyLineROI):
def __init__(self, pos, state=None):
ROI.PolyLineROI.__init__(self, [[0,0], [2,0], [2,1], [0,1]], pos=pos, closed=True, pen=pg.mkPen(50,50, 255, 200))
## don't let the user add handles to the sides, only to the top and bottom
self.segments[0].setAcceptedMouseButtons(Qt.Qt.NoButton)
#self.segments[1].setAcceptedMouseButtons(Qt.Qt.NoButton) ## there was a change in PolylineROI that affected the order of segments, so now 0 and 2 are the sides instead of 1 and 3 (2013.12.12)
self.segments[2].setAcceptedMouseButtons(Qt.Qt.NoButton)
#self.segments[3].setAcceptedMouseButtons(Qt.Qt.NoButton)
if state is not None:
self.setState(state)
def setState(self, state):
self.blockSignals(True)
try:
ROI.PolyLineROI.setState(self, state)
handles = state['handles']
n = len(handles)
## set positions of 4 corners
self.handles[0]['item'].setPos(self.mapFromParent(Qt.QPointF(*handles[0])))
self.handles[1]['item'].setPos(self.mapFromParent(Qt.QPointF(*handles[n/2-1])))
self.handles[2]['item'].setPos(self.mapFromParent(Qt.QPointF(*handles[n/2])))
self.handles[3]['item'].setPos(self.mapFromParent(Qt.QPointF(*handles[-1])))
for i in range(1, n/2-1):
#self.segmentClicked(self.segments[i-1], pos=self.mapFromParent(Qt.QPointF(*handles[i])))
self.segmentClicked(self.segments[i], pos=self.mapFromParent(Qt.QPointF(*handles[i])))
for i, h in enumerate(self.handles):
h['item'].setPos(self.mapFromParent(Qt.QPointF(*handles[i])))
finally:
self.blockSignals(False)
def segmentClicked(self, segment, ev=None, pos=None): ## ev/pos should be in this item's coordinate system
if ev != None:
pos = ev.pos()
elif pos != None:
pos = pos
else:
raise Exception("Either an event or a position must be specified")
## figure out which segment to add corresponding handle to
n = len(self.segments)
ind = self.segments.index(segment)
#mirrorInd = (n - ind) - 2
mirrorInd= n-ind
## figure out position at which to add second handle:
h1 = pg.Point(self.mapFromItem(segment, segment.handles[0]['item'].pos()))
h2 = pg.Point(self.mapFromItem(segment, segment.handles[1]['item'].pos()))
dist = (h1-pos).length()/(h1-h2).length()
h3 = pg.Point(self.mapFromItem(self.segments[mirrorInd], self.segments[mirrorInd].handles[0]['item'].pos()))
h4 = pg.Point(self.mapFromItem(self.segments[mirrorInd], self.segments[mirrorInd].handles[1]['item'].pos()))
mirrorPos = h4 - (h4-h3)*dist
## add handles:
if mirrorInd > ind:
ROI.PolyLineROI.segmentClicked(self, self.segments[mirrorInd], pos=mirrorPos)
ROI.PolyLineROI.segmentClicked(self, segment, pos=pos)
ROI.LineSegmentROI([pos, mirrorPos], [0,0], handles=(self.segments[ind].handles[1]['item'], self.segments[mirrorInd+1].handles[1]['item']), pen=self.pen, movable=False, parent=self)
else:
ROI.PolyLineROI.segmentClicked(self, segment, pos=pos)
ROI.PolyLineROI.segmentClicked(self, self.segments[mirrorInd], pos=mirrorPos)
ROI.LineSegmentROI([mirrorPos, pos], [0,0], handles=(self.segments[mirrorInd].handles[1]['item'], self.segments[ind+1].handles[1]['item']), pen=self.pen, movable=False, parent=self)
def getQuadrilaterals(self):
"""Return a list of quadrilaterals (each a list of 4 points, in self.parentItem coordinates) formed by the ROI."""
n = len(self.handles)
quads = []
positions = self.getHandlePositions()
for i in range(n/2-1):
quad=[]
quad.append(positions[i])
quad.append(positions[i+1])
quad.append(positions[-(i+2)])
quad.append(positions[-(i+1)])
quads.append(quad)
return quads
def getNormalizedRects(self):
"""Return a list of rectangles (each a list of 4 points, in self.parentItem coordinates) for quadrilaterals to be mapped into."""
quads = self.getQuadrilaterals()
widths = []
for i, q in enumerate(quads):
w = abs(Point((q[0]+(q[3]-q[0])/2.)-(q[1]+(q[2]-q[1])/2.)).length())
widths.append(w)
if Qt.QPolygonF(q).containsPoint(Qt.QPointF(0., 0.0002), Qt.Qt.OddEvenFill):
ind = i
mids = (quads[ind][0]+(quads[ind][3]-quads[ind][0])/2.),(quads[ind][1]+(quads[ind][2]-quads[ind][1])/2.)
xPos = -(Point(mids[0]).length()*math.sin(Point(mids[0]).angle(Point(0,1)))*(math.pi/180.))
rects = []
for i, q in enumerate(quads):
rect = []
if i < ind:
rect.append([-sum(widths[i:ind])+xPos, 0.])
elif i == ind:
rect.append([xPos, 0.])
elif i > ind:
rect.append([sum(widths[ind:i])-xPos, 0.])
rect.append([rect[0][0] + widths[i], 0.])
rect.append([rect[0][0] + widths[i], 0.001])
rect.append([rect[0][0], 0.001])
rects.append(rect)
return rects
def getHandlePositions(self):
"""Return a list handle positions in self.parentItem's coordinates. These are the coordinates that are marked by the grid."""
positions = []
for h in self.handles:
positions.append(self.mapToParent(h['item'].pos()))
return positions
def saveState(self):
state = ROI.PolyLineROI.saveState(self)
state['handles'] = [(p.x(), p.y()) for p in self.getHandlePositions()]
return state
| 46.432836
| 200
| 0.583735
|
260f1f3bceffbc53200f684770271b68bb0d96f2
| 7,541
|
py
|
Python
|
app.py
|
asher1112/trial_aws
|
cb45c87c3f0d9f9017c3b3a29e0bf6f95972b932
|
[
"MIT"
] | null | null | null |
app.py
|
asher1112/trial_aws
|
cb45c87c3f0d9f9017c3b3a29e0bf6f95972b932
|
[
"MIT"
] | null | null | null |
app.py
|
asher1112/trial_aws
|
cb45c87c3f0d9f9017c3b3a29e0bf6f95972b932
|
[
"MIT"
] | null | null | null |
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import streamlit as st
import pandas as pd
import cv2
import librosa
import librosa.display
import sound
from tensorflow.keras.models import load_model
# load models
model = load_model("model.h5")
# tmodel = load_model("tmodel_all.h5")
# costants
CAT6 = ['fear', 'angry', 'neutral', 'happy', 'sad', 'surprise']
CAT3 = ["positive", "neutral", "negative"]
# page settings
st.set_page_config(layout="wide")
max_width = 1000
padding_top = 0
padding_right = "20%"
padding_left = "10%"
padding_bottom = 0
COLOR = "#1f1f2e"
BACKGROUND_COLOR = "#d1d1e0"
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: {max_width}px;
padding-top: {padding_top}rem;
padding-right: {padding_right}rem;
padding-left: {padding_left}rem;
padding-bottom: {padding_bottom}rem;
}}
.reportview-container .main {{
color: {COLOR};
background-color: {BACKGROUND_COLOR};
}}
</style>
""",
unsafe_allow_html=True,
)
@st.cache
def save_audio(file):
with open(os.path.join("audio", file.name), "wb") as f:
f.write(file.getbuffer())
@st.cache
def get_melspec(audio):
y, sr = librosa.load(audio, sr=44100)
X = librosa.stft(y)
Xdb = librosa.amplitude_to_db(abs(X))
img = np.stack((Xdb,) * 3,-1)
img = img.astype(np.uint8)
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayImage = cv2.resize(grayImage, (224, 224))
rgbImage = np.repeat(grayImage[..., np.newaxis], 3, -1)
return (rgbImage, Xdb)
@st.cache
def get_mfccs(audio, limit):
y, sr = librosa.load(audio, sr=44100)
a = librosa.feature.mfcc(y, sr=44100, n_mfcc = 20)
if a.shape[1] > limit:
mfccs = a[:,:limit]
elif a.shape[1] < limit:
mfccs = np.zeros((a.shape[0], limit))
mfccs[:, :a.shape[1]] = a
return mfccs
@st.cache
def get_title(predictions, categories=CAT6):
title = f"Detected emotion: {categories[predictions.argmax()]} \
- {predictions.max() * 100:.2f}%"
return title
@st.cache
def plot_emotions(fig, data6, data3=None, title="Detected emotion",
categories6=CAT6, categories3=CAT3):
color_dict = {"neutral":"grey",
"positive":"green",
"happy": "green",
"surprise":"orange",
"fear":"purple",
"negative":"red",
"angry":"red",
"sad":"lightblue"}
if data3 is None:
pos = data6[3] + data6[5]
neu = data6[2]
neg = data6[0] + data6[1] + data6[4]
data3 = np.array([pos, neu, neg])
ind = categories6[data6.argmax()]
color6 = color_dict[ind]
data6 = list(data6)
n = len(data6)
data6 += data6[:1]
angles6 = [i/float(n)*2*np.pi for i in range(n)]
angles6 += angles6[:1]
ind = categories3[data3.argmax()]
color3 = color_dict[ind]
data3 = list(data3)
n = len(data3)
data3 += data3[:1]
angles3 = [i/float(n)*2*np.pi for i in range(n)]
angles3 += angles3[:1]
# fig = plt.figure(figsize=(10, 4))
fig.set_facecolor('#d1d1e0')
ax = plt.subplot(122, polar="True")
# ax.set_facecolor('#d1d1e0')
plt.polar(angles6, data6, color=color6)
plt.fill(angles6, data6, facecolor=color6, alpha=0.25)
ax.spines['polar'].set_color('lightgrey')
ax.set_theta_offset(np.pi / 3)
ax.set_theta_direction(-1)
plt.xticks(angles6[:-1], categories6)
ax.set_rlabel_position(0)
plt.yticks([0, .25, .5, .75, 1], color="grey", size=8)
plt.title("BIG 6", color=color6)
plt.ylim(0, 1)
ax = plt.subplot(121, polar="True")
# ax.set_facecolor('#d1d1e0')
plt.polar(angles3, data3, color=color3, linewidth=2, linestyle="--", alpha=.8)
plt.fill(angles3, data3, facecolor=color3, alpha=0.25)
ax.spines['polar'].set_color('lightgrey')
ax.set_theta_offset(np.pi / 6)
ax.set_theta_direction(-1)
plt.xticks(angles3[:-1], categories3)
ax.set_rlabel_position(0)
plt.yticks([0, .25, .5, .75, 1], color="grey", size=8)
plt.title("BIG 3", color=color3)
plt.ylim(0, 1)
plt.suptitle(title)
plt.subplots_adjust(top=0.75)
def main():
st.title("Speech Emotion Recognition")
st.sidebar.markdown("## Use the menu to navigate on the site")
menu = ["Upload audio", "Dataset analysis", "About"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Upload audio":
st.subheader("Upload audio")
audio_file = st.file_uploader("Upload audio file", type=['wav'])
if st.button('Record'):
with st.spinner(f'Recording for 5 seconds ....'):
st.write("Recording...")
time.sleep(3)
st.success("Recording completed")
if audio_file is not None:
st.title("Analyzing...")
file_details = {"Filename": audio_file.name, "FileSize": audio_file.size}
st.write(file_details)
# st.subheader(f"File {file_details['Filename']}")
st.audio(audio_file, format='audio/wav', start_time=0)
path = os.path.join("audio", audio_file.name)
save_audio(audio_file)
# extract features
wav, sr = librosa.load(path, sr=44100)
Xdb = get_melspec(path)[1]
fig, ax = plt.subplots(1, 2, figsize=(12, 4), sharex=True)
fig.set_facecolor('#d1d1e0')
plt.subplot(211)
plt.title("Wave-form")
librosa.display.waveplot(wav, sr=sr)
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.spines["right"].set_visible(False)
plt.gca().axes.spines["left"].set_visible(False)
plt.gca().axes.spines["top"].set_visible(False)
plt.gca().axes.spines["bottom"].set_visible(False)
plt.gca().axes.set_facecolor('#d1d1e0')
plt.subplot(212)
plt.title("Mel-log-spectrogram")
librosa.display.specshow(Xdb, sr=sr, x_axis='time', y_axis='hz')
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().axes.spines["right"].set_visible(False)
plt.gca().axes.spines["left"].set_visible(False)
plt.gca().axes.spines["top"].set_visible(False)
st.write(fig)
data3 = np.array([.8, .9, .2])
st.title("Getting the result...")
mfccs = get_mfccs(path, model.input_shape[-1])
mfccs = mfccs.reshape(1, *mfccs.shape)
pred = model.predict(mfccs)[0]
txt = get_title(pred)
fig = plt.figure(figsize=(10, 4))
plot_emotions(data6=pred, fig=fig, title=txt)
st.write(fig)
# mel = get_melspec(path)
# mel = mel.reshape(1, *mel.shape)
# tpred = model.predict(mel)[0]
# txt = get_title(tpred)
# fig = plt.figure(figsize=(10, 4))
# plot_emotions(data3=data3, data6=tpred, fig=fig, title=txt)
# st.write(fig)
elif choice == "Dataset analysis":
st.subheader("Dataset analysis")
# with st.echo(code_location='below'):
else:
st.subheader("About")
st.info("maria.s.startseva@gmail.com")
st.info("talbaram3192@gmail.com")
st.info("asherholder123@gmail.com")
if __name__ == '__main__':
main()
# Streamlit widgets automatically run the script from top to bottom. Since
# this button is not connected to any other logic, it just causes a plain
# rerun.
st.button("Re-run")
| 30.043825
| 85
| 0.603766
|
060f8231b335b301573fa1b6ddbbae68234f182a
| 3,597
|
py
|
Python
|
scalabel/label/io_test.py
|
batcer/scalabel
|
8d4a178bcf91207bf8b0d336c770d863cffb5701
|
[
"Apache-2.0"
] | null | null | null |
scalabel/label/io_test.py
|
batcer/scalabel
|
8d4a178bcf91207bf8b0d336c770d863cffb5701
|
[
"Apache-2.0"
] | null | null | null |
scalabel/label/io_test.py
|
batcer/scalabel
|
8d4a178bcf91207bf8b0d336c770d863cffb5701
|
[
"Apache-2.0"
] | null | null | null |
"""Test cases for io.py."""
import json
from ..unittest.util import get_test_file
from .io import dump, group_and_sort, load, parse
from .typing import Frame
def test_parse() -> None:
"""Test parse label string."""
raw = json.loads(
'{"name": 1, "videoName": "a", "size": [10, 20], '
'"labels":[{"id": 1, "box2d": '
'{"x1": 1, "y1": 2, "x2": 3, "y2": 4}, "attributes":'
'{"crowd": false, "trafficLightColor": "G", "speed": 10}}]}'
)
frame = parse(raw)
assert frame.name == "1"
assert frame.video_name == "a"
assert isinstance(frame.labels, list)
assert len(frame.labels) == 1
assert frame.labels[0].id == "1"
assert frame.labels[0].attributes is not None
assert frame.labels[0].attributes["crowd"] is False
assert frame.labels[0].attributes["traffic_light_color"] == "G"
assert frame.labels[0].attributes["speed"] == 10.0
b = frame.labels[0].box_2d
assert b is not None
assert b.y2 == 4
def test_load() -> None:
"""Test loading labels."""
filepath = get_test_file("image_list_with_auto_labels.json")
def assert_correctness(inputs: str, nprocs: int) -> None:
frames = load(inputs, nprocs)
assert len(frames) == 10
assert (
frames[0].url == "https://s3-us-west-2.amazonaws.com/bdd-label/"
"bdd100k/frames-20000/val/c1ba5ee6-b2cb1e51.jpg"
)
assert frames[0].frame_index == 0
assert frames[-1].frame_index == 9
assert frames[0].labels is not None
assert frames[-1].labels is not None
assert frames[0].labels[0].id == "0"
assert frames[0].labels[0].box_2d is not None
assert frames[-1].labels[-1].box_2d is not None
box = frames[-1].labels[-1].box_2d
assert box.x1 == 218.7211456298828
assert box.x2 == 383.5201416015625
assert box.y1 == 362.24761962890625
assert box.y2 == 482.4760437011719
assert frames[0].labels[0].poly_2d is not None
polys = frames[0].labels[0].poly_2d
assert isinstance(polys, list)
poly = polys[0]
assert len(poly.vertices) == len(poly.types)
assert len(poly.vertices[0]) == 2
for char in poly.types:
assert char in ["C", "L"]
assert_correctness(filepath, nprocs=0)
assert_correctness(filepath, nprocs=2)
def test_group_and_sort() -> None:
"""Check the group and sort results."""
frames = [
Frame(name="bbb-1", video_name="bbb", frame_index=1, labels=[]),
Frame(name="aaa-2", video_name="aaa", frame_index=2, labels=[]),
Frame(name="aaa-2", video_name="aaa", frame_index=1, labels=[]),
]
frames_list = group_and_sort(frames)
assert len(frames_list) == 2
assert len(frames_list[0]) == 2
assert len(frames_list[1]) == 1
assert str(frames_list[0][0].video_name) == "aaa"
assert frames_list[0][1].name == "aaa-2"
assert frames_list[0][1].frame_index == 2
def test_dump() -> None:
"""Test dump labels."""
filepath = get_test_file("image_list_with_auto_labels.json")
labels = load(filepath)
labels_dict = dump(labels)
assert labels_dict[0]["frameIndex"] == labels[0].frame_index
assert labels_dict[-1]["frameIndex"] == labels[-1].frame_index
assert "box3d" not in labels_dict[0]["labels"][0]
assert "box2d" in labels_dict[0]["labels"][0]
assert labels[0].labels is not None
assert labels[0].labels[0].box_2d is not None
assert (
labels_dict[0]["labels"][0]["box2d"]["x1"]
== labels[0].labels[0].box_2d.x1
)
| 35.613861
| 76
| 0.614401
|
a28df8be9c71380abb2471f81b5cadbbabc3d118
| 559
|
py
|
Python
|
learning_logs/migrations/0001_initial.py
|
benjithorpe/learning_log
|
cc26ca432d8532af2a1d727736698a25d170979b
|
[
"MIT"
] | null | null | null |
learning_logs/migrations/0001_initial.py
|
benjithorpe/learning_log
|
cc26ca432d8532af2a1d727736698a25d170979b
|
[
"MIT"
] | null | null | null |
learning_logs/migrations/0001_initial.py
|
benjithorpe/learning_log
|
cc26ca432d8532af2a1d727736698a25d170979b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-10-05 19:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=200)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
]
| 24.304348
| 117
| 0.581395
|
cba5378b23565146ef48e79d92b67b0b74d37043
| 319
|
py
|
Python
|
B03898_05_Codes/zero_coupon_bond.py
|
prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes
|
7b3a74a0dc78aaa977f8fe752cc3fa4f54063f33
|
[
"MIT"
] | 446
|
2015-06-10T06:15:51.000Z
|
2022-03-28T22:16:03.000Z
|
B03898_05_Codes/zero_coupon_bond.py
|
prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes
|
7b3a74a0dc78aaa977f8fe752cc3fa4f54063f33
|
[
"MIT"
] | 8
|
2016-11-25T09:27:15.000Z
|
2020-07-14T21:00:26.000Z
|
B03898_05_Codes/zero_coupon_bond.py
|
prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes
|
7b3a74a0dc78aaa977f8fe752cc3fa4f54063f33
|
[
"MIT"
] | 277
|
2015-06-11T07:50:18.000Z
|
2022-03-22T12:54:46.000Z
|
"""
README
======
This file contains Python codes.
======
"""
def zero_coupon_bond(par, y, t):
"""
Price a zero coupon bond.
Par - face value of the bond.
y - annual yield or rate of the bond.
t - time to maturity in years.
"""
return par/(1+y)**t
print zero_coupon_bond(100, 0.05, 5)
| 17.722222
| 41
| 0.586207
|
5e62127cc7a97de5bcaf0c3d7facd282213d6bc6
| 8,044
|
py
|
Python
|
homeassistant/components/starline/config_flow.py
|
bg1000/core
|
4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5
|
[
"Apache-2.0"
] | 1
|
2021-03-23T07:20:03.000Z
|
2021-03-23T07:20:03.000Z
|
homeassistant/components/starline/config_flow.py
|
bg1000/core
|
4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5
|
[
"Apache-2.0"
] | 51
|
2020-08-03T07:30:44.000Z
|
2022-03-22T06:02:42.000Z
|
homeassistant/components/starline/config_flow.py
|
bg1000/core
|
4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5
|
[
"Apache-2.0"
] | null | null | null |
"""Config flow to configure StarLine component."""
from __future__ import annotations
from starline import StarlineAuth
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import ( # pylint: disable=unused-import
_LOGGER,
CONF_APP_ID,
CONF_APP_SECRET,
CONF_CAPTCHA_CODE,
CONF_MFA_CODE,
DATA_EXPIRES,
DATA_SLID_TOKEN,
DATA_SLNET_TOKEN,
DATA_USER_ID,
DOMAIN,
ERROR_AUTH_APP,
ERROR_AUTH_MFA,
ERROR_AUTH_USER,
)
class StarlineFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a StarLine config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize flow."""
self._app_id: str | None = None
self._app_secret: str | None = None
self._username: str | None = None
self._password: str | None = None
self._mfa_code: str | None = None
self._app_code = None
self._app_token = None
self._user_slid = None
self._user_id = None
self._slnet_token = None
self._slnet_token_expires = None
self._captcha_image = None
self._captcha_sid = None
self._captcha_code = None
self._phone_number = None
self._auth = StarlineAuth()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_auth_app(user_input)
async def async_step_auth_app(self, user_input=None, error=None):
"""Authenticate application step."""
if user_input is not None:
self._app_id = user_input[CONF_APP_ID]
self._app_secret = user_input[CONF_APP_SECRET]
return await self._async_authenticate_app(error)
return self._async_form_auth_app(error)
async def async_step_auth_user(self, user_input=None, error=None):
"""Authenticate user step."""
if user_input is not None:
self._username = user_input[CONF_USERNAME]
self._password = user_input[CONF_PASSWORD]
return await self._async_authenticate_user(error)
return self._async_form_auth_user(error)
async def async_step_auth_mfa(self, user_input=None, error=None):
"""Authenticate mfa step."""
if user_input is not None:
self._mfa_code = user_input[CONF_MFA_CODE]
return await self._async_authenticate_user(error)
return self._async_form_auth_mfa(error)
async def async_step_auth_captcha(self, user_input=None, error=None):
"""Captcha verification step."""
if user_input is not None:
self._captcha_code = user_input[CONF_CAPTCHA_CODE]
return await self._async_authenticate_user(error)
return self._async_form_auth_captcha(error)
@core.callback
def _async_form_auth_app(self, error=None):
"""Authenticate application form."""
errors = {}
if error is not None:
errors["base"] = error
return self.async_show_form(
step_id="auth_app",
data_schema=vol.Schema(
{
vol.Required(
CONF_APP_ID, default=self._app_id or vol.UNDEFINED
): str,
vol.Required(
CONF_APP_SECRET, default=self._app_secret or vol.UNDEFINED
): str,
}
),
errors=errors,
)
@core.callback
def _async_form_auth_user(self, error=None):
"""Authenticate user form."""
errors = {}
if error is not None:
errors["base"] = error
return self.async_show_form(
step_id="auth_user",
data_schema=vol.Schema(
{
vol.Required(
CONF_USERNAME, default=self._username or vol.UNDEFINED
): str,
vol.Required(
CONF_PASSWORD, default=self._password or vol.UNDEFINED
): str,
}
),
errors=errors,
)
@core.callback
def _async_form_auth_mfa(self, error=None):
"""Authenticate mfa form."""
errors = {}
if error is not None:
errors["base"] = error
return self.async_show_form(
step_id="auth_mfa",
data_schema=vol.Schema(
{
vol.Required(
CONF_MFA_CODE, default=self._mfa_code or vol.UNDEFINED
): str
}
),
errors=errors,
description_placeholders={"phone_number": self._phone_number},
)
@core.callback
def _async_form_auth_captcha(self, error=None):
"""Captcha verification form."""
errors = {}
if error is not None:
errors["base"] = error
return self.async_show_form(
step_id="auth_captcha",
data_schema=vol.Schema(
{
vol.Required(
CONF_CAPTCHA_CODE, default=self._captcha_code or vol.UNDEFINED
): str
}
),
errors=errors,
description_placeholders={
"captcha_img": '<img src="' + self._captcha_image + '"/>'
},
)
async def _async_authenticate_app(self, error=None):
"""Authenticate application."""
try:
self._app_code = await self.hass.async_add_executor_job(
self._auth.get_app_code, self._app_id, self._app_secret
)
self._app_token = await self.hass.async_add_executor_job(
self._auth.get_app_token, self._app_id, self._app_secret, self._app_code
)
return self._async_form_auth_user(error)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Error auth StarLine: %s", err)
return self._async_form_auth_app(ERROR_AUTH_APP)
async def _async_authenticate_user(self, error=None):
"""Authenticate user."""
try:
state, data = await self.hass.async_add_executor_job(
self._auth.get_slid_user_token,
self._app_token,
self._username,
self._password,
self._mfa_code,
self._captcha_sid,
self._captcha_code,
)
if state == 1:
self._user_slid = data["user_token"]
return await self._async_get_entry()
if "phone" in data:
self._phone_number = data["phone"]
if state == 0:
error = ERROR_AUTH_MFA
return self._async_form_auth_mfa(error)
if "captchaSid" in data:
self._captcha_sid = data["captchaSid"]
self._captcha_image = data["captchaImg"]
return self._async_form_auth_captcha(error)
raise Exception(data)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Error auth user: %s", err)
return self._async_form_auth_user(ERROR_AUTH_USER)
async def _async_get_entry(self):
"""Create entry."""
(
self._slnet_token,
self._slnet_token_expires,
self._user_id,
) = await self.hass.async_add_executor_job(
self._auth.get_user_id, self._user_slid
)
return self.async_create_entry(
title=f"Application {self._app_id}",
data={
DATA_USER_ID: self._user_id,
DATA_SLNET_TOKEN: self._slnet_token,
DATA_SLID_TOKEN: self._user_slid,
DATA_EXPIRES: self._slnet_token_expires,
},
)
| 33.516667
| 88
| 0.574714
|
5243dcf05cfdafff69d28c49ba0b2942989adbe3
| 914
|
py
|
Python
|
flexmeasures/cli/tests/utils.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 12
|
2021-12-18T10:41:10.000Z
|
2022-03-29T23:00:29.000Z
|
flexmeasures/cli/tests/utils.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 103
|
2021-12-07T08:51:15.000Z
|
2022-03-31T13:28:48.000Z
|
flexmeasures/cli/tests/utils.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 3
|
2022-01-18T04:45:48.000Z
|
2022-03-14T09:48:22.000Z
|
from typing import List, Callable
from click.core import Command as ClickCommand
def to_flags(cli_input: dict) -> list:
"""Turn dictionary of CLI input into a list of CLI flags ready for use in FlaskCliRunner.invoke().
Example:
cli_input = {
"year": 2020,
"country": "NL",
}
cli_flags = to_flags(cli_input) # ["--year", 2020, "--country", "NL"]
runner = app.test_cli_runner()
result = runner.invoke(some_cli_function, to_flags(cli_input))
"""
return [
item
for sublist in zip(
[f"--{key.replace('_', '-')}" for key in cli_input.keys()],
cli_input.values(),
)
for item in sublist
]
def get_click_commands(module) -> List[Callable]:
return [
getattr(module, attr)
for attr in dir(module)
if type(getattr(module, attr)) == ClickCommand
]
| 26.882353
| 102
| 0.577681
|
0f80a9e3135560f0591099bad1bb7a5ddbb51087
| 3,826
|
py
|
Python
|
src/dirbs/dimensions/duplicate_threshold.py
|
bryang-qti-qualcomm/DIRBS-Core
|
6b48457715338cce4eb6b3948940297ebd789189
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
src/dirbs/dimensions/duplicate_threshold.py
|
bryang-qti-qualcomm/DIRBS-Core
|
6b48457715338cce4eb6b3948940297ebd789189
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
src/dirbs/dimensions/duplicate_threshold.py
|
bryang-qti-qualcomm/DIRBS-Core
|
6b48457715338cce4eb6b3948940297ebd789189
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
DIRBS dimension function for duplicate threshold within a time period.
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from psycopg2 import sql
from .duplicate_abstract_base import DuplicateAbstractBase
class DuplicateThreshold(DuplicateAbstractBase):
"""Implementation of the DuplicateThreshold classification dimension."""
def __init__(self, *, threshold, period_days=None, period_months=None, **kwargs):
"""Constructor."""
super().__init__(period_days=period_days, period_months=period_months, **kwargs)
try:
self._threshold = int(threshold)
except (TypeError, ValueError):
raise ValueError('\'threshold\' parameter must be an integer, got \'{0}\' instead...'.format(threshold))
@property
def algorithm_name(self):
"""Overrides Dimension.algorithm_name."""
return 'Duplicate threshold'
def _matching_imeis_sql(self, conn, app_config, virt_imei_range_start, virt_imei_range_end, curr_date=None):
"""Overrides Dimension._matching_imeis_sql."""
analysis_start_date, analysis_end_date = self._calc_analysis_window(conn, curr_date)
return sql.SQL(
"""SELECT imei_norm
FROM (SELECT DISTINCT imei_norm, imsi
FROM monthly_network_triplets_country
WHERE imei_norm IS NOT NULL
AND last_seen >= {analysis_start_date}
AND first_seen < {analysis_end_date}
AND virt_imei_shard >= {virt_imei_range_start}
AND virt_imei_shard < {virt_imei_range_end}
AND is_valid_imsi(imsi)) all_seen_imei_imsis
GROUP BY imei_norm HAVING COUNT(*) >= {threshold}
""").format(analysis_start_date=sql.Literal(analysis_start_date),
analysis_end_date=sql.Literal(analysis_end_date),
virt_imei_range_start=sql.Literal(virt_imei_range_start),
virt_imei_range_end=sql.Literal(virt_imei_range_end),
threshold=sql.Literal(self._threshold)).as_string(conn)
dimension = DuplicateThreshold
| 50.342105
| 118
| 0.715369
|
238949fa51e9cd34fca3d2b53dacfb22c8e656a1
| 8,225
|
py
|
Python
|
untitled7/apps/trade/views.py
|
pop993012/111122
|
cee79d5e6eb5b1c9714e3f712503cce9fccfb8c2
|
[
"Apache-2.0"
] | null | null | null |
untitled7/apps/trade/views.py
|
pop993012/111122
|
cee79d5e6eb5b1c9714e3f712503cce9fccfb8c2
|
[
"Apache-2.0"
] | null | null | null |
untitled7/apps/trade/views.py
|
pop993012/111122
|
cee79d5e6eb5b1c9714e3f712503cce9fccfb8c2
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import mixins
from .models import ShopCar, OrderInfo, OrderGoods
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from .serializer import ShopCarSerializers, PostShopCarSerializers
from apps.goods.models import Goods
from .serializer import OrderInfoSerializer, OrderGoodsSerializer, OrderDetailSerializer
from django.views.decorators.csrf import csrf_exempt, csrf_protect
class ShopCarView(viewsets.ModelViewSet):
queryset = ShopCar.objects.all()
permission_classes = (IsAuthenticated,)
def get_serializer_class(self):
if self.action == 'list':
return ShopCarSerializers
return PostShopCarSerializers
authentication_classes = [BasicAuthentication, JSONWebTokenAuthentication]
def get_queryset(self):
return ShopCar.objects.filter(user=self.request.user)
def perform_create(self, serializer):
print(999955441)
user = self.request.user
goods = serializer.data['goods']
shop = ShopCar.objects.filter(user=user, goods_id=goods).first()
gs = Goods.objects.filter(id=goods).first()
gs.goods_num -= serializer.data['nums']
gs.save()
if shop:
shop.nums += serializer.data['nums']
shop.save()
else:
ShopCar.objects.create(goods_id=goods, nums=serializer.data['nums'], user=user)
def perform_update(self, serializer):
print(8)
# car_id=serializer.data['id']
goos_id = serializer.data['goods']
print(goos_id)
shopgoods = ShopCar.objects.filter(user=self.request.user, goods_id=goos_id).first()
print('OK')
print(shopgoods.nums)
print(serializer.initial_data['nums'])
print(shopgoods.goods)
goods = Goods.objects.filter(id=goos_id).first()
max = shopgoods.nums - serializer.initial_data['nums']
print(max)
print(serializer.data['nums'])
shopgoods.nums = serializer.initial_data['nums']
shopgoods.save()
goods.goods_num += max
goods.save()
def perform_destroy(self, instance):
goods = instance.goods
shopcar = ShopCar.objects.filter(pk=instance.pk).first()
goods.goods_num += shopcar.nums
shopcar.delete()
goods.save()
class OrderInfoView(viewsets.ModelViewSet):
queryset = OrderInfo.objects.all()
permission_classes = (IsAuthenticated,) # 必须是自己
authentication_classes = [BasicAuthentication, JSONWebTokenAuthentication]
serializer_class = OrderInfoSerializer
def get_serializer_class(self):
if self.action == "retrieve":
return OrderDetailSerializer
return OrderInfoSerializer
def get_queryset(self):
return OrderInfo.objects.filter(user=self.request.user)
def perform_create(self, serializer):
order = serializer.save()
shops = ShopCar.objects.filter(user=self.request.user).all()
for shop in shops:
OrderGoods.objects.create(
order=order,
goods=shop.goods,
nums=shop.nums
)
shop.delete()
class OrderGoodsView(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,) # 必须是自己
authentication_classes = [BasicAuthentication, JSONWebTokenAuthentication]
queryset = OrderGoods.objects.all()
def get_queryset(self):
return OrderInfo.objects.filter(user=self.request.user)
serializer_class = OrderGoodsSerializer
# from untitled7 import settings
# from alipay import AliPay
# import os
# ali_pay=AliPay(
# appid=settings.ALIPAY_APPID,
# app_notify_url = None,
# app_private_key_path = os.path.join(settings.BASE_DIR, 'keys/a'),
# alipay_public_key_path = os.path.join(settings.BASE_DIR, 'keys/pub'),
# debug = False,
# )
from rest_framework.views import APIView
from .util.aliPay import AliPay
from datetime import datetime
from rest_framework.response import Response
from django.shortcuts import HttpResponseRedirect
class AlipayView(APIView):
def get(self, request):
processed_dict = {}
# 取出post里面的数据
for key, value in request.GET.items():
processed_dict[key] = value
# 把signpop掉,文档有说明
sign = processed_dict.pop("sign", None)
# 生成一个Alipay对象
alipay = AliPay(
appid="2016092000553304",
app_notify_url="http://47.105.111.148:8000/alipay/return/",
app_private_key_path='apps/trade/keys/a.txt',
alipay_public_key_path='apps/trade/keys/zfb.txt', # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url="http://47.105.111.148:8000/alipay/return/"
)
# 进行验证
verify_re = alipay.verify(processed_dict, sign)
# 如果验签成功
if verify_re is True:
# 商户网站唯一订单号
order_sn = processed_dict.get('out_trade_no', None)
# 支付宝系统交易流水号
trade_no = processed_dict.get('trade_no', None)
# 交易状态
trade_status = processed_dict.get('trade_status',True)
# 查询数据库中订单记录(根据订单号查询订单)
existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
for existed_order in existed_orders:
# 订单商品项
order_goods = existed_order.goods.all() # 订单的详情
# 商品销量增加订单中数值
for order_good in order_goods:
goods = order_good.goods # 获取到所有的商品
goods.sold_num += order_good.goods_num # 销量进行累加
goods.save() # 保存到数据库中
# 更新订单状态
existed_order.pay_status = trade_status # 修改订单的状态
existed_order.trade_no = trade_no # 支付宝的流水号
existed_order.pay_time = datetime.now() # 支付时间
existed_order.save() # 更新订单信息
# 需要返回一个'success'给支付宝,如果不返回,支付宝会一直发送订单支付成功的消息
return HttpResponseRedirect('http://47.105.128.181:8000/aa/')
else:
return Response('支付失败,sign不成功')
def post(self, request):
pass
"""
处理支付宝的notify_url (必须是公网ip才行)
"""
# 存放post里面所有的数据
processed_dict = {}
# 取出post里面的数据
for key, value in request.POST.items():
processed_dict[key] = value
# 把signpop掉,文档有说明
sign = processed_dict.pop("sign", None)
# 生成一个Alipay对象
alipay = AliPay(
appid="2016092000553304",
app_notify_url="http://47.105.111.148:8000/alipay/return/",
app_private_key_path='apps/trade/keys/a.txt',
alipay_public_key_path='apps/trade/keys/zfb.txt', # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url="http://47.105.111.148:8000/alipay/return/"
)
# 进行验证
verify_re = alipay.verify(processed_dict, sign)
# 如果验签成功
if verify_re is True:
# 商户网站唯一订单号
order_sn = processed_dict.get('out_trade_no', None)
# 支付宝系统交易流水号
trade_no = processed_dict.get('trade_no', None)
# 交易状态
trade_status = processed_dict.get('trade_status', None)
# 查询数据库中订单记录
existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
for existed_order in existed_orders:
# 订单商品项
order_goods = existed_order.goods.all()
# 商品销量增加订单中数值
for order_good in order_goods:
goods = order_good.goods
goods.sold_num += order_good.goods_num
goods.save()
# 更新订单状态
existed_order.pay_status = trade_status
existed_order.trade_no = trade_no
existed_order.pay_time = datetime.now()
existed_order.save()
# 需要返回一个'success'给支付宝,如果不返回,支付宝会一直发送订单支付成功的消息
return Response("success")
| 35.606061
| 93
| 0.632827
|
fedc91091fb03e737d51b48a693d491a9be23b2a
| 2,129
|
py
|
Python
|
usaspending_api/search/tests/test_spending_over_time.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/search/tests/test_spending_over_time.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | 1
|
2021-11-15T17:53:27.000Z
|
2021-11-15T17:53:27.000Z
|
usaspending_api/search/tests/test_spending_over_time.py
|
truthiswill/usaspending-api
|
bd7d915442e2ec94cc830c480ceeffd4479be6c0
|
[
"CC0-1.0"
] | null | null | null |
import json
import pytest
from rest_framework import status
from usaspending_api.search.tests.test_mock_data_search import all_filters
@pytest.mark.django_db
def test_spending_over_time_success(client, refresh_matviews):
# test for needed filters
resp = client.post(
'/api/v2/search/spending_over_time',
content_type='application/json',
data=json.dumps({
"group": "fiscal_year",
"filters": {
"keywords": ["test", "testing"]
}
}))
assert resp.status_code == status.HTTP_200_OK
# test all filters
resp = client.post(
'/api/v2/search/spending_over_time',
content_type='application/json',
data=json.dumps({
"group": "quarter",
"filters": all_filters()
}))
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_spending_over_time_failure(client, refresh_matviews):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post(
'/api/v2/search/spending_over_time/',
content_type='application/json',
data=json.dumps({'group': 'fiscal_year'}))
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_spending_over_time_subawards_success(client, refresh_matviews):
resp = client.post(
'/api/v2/search/spending_over_time',
content_type='application/json',
data=json.dumps({
"group": "quarter",
"filters": all_filters(),
"subawards": True
}))
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_spending_over_time_subawards_failure(client, refresh_matviews):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post(
'/api/v2/search/spending_over_time',
content_type='application/json',
data=json.dumps({
"group": "quarter",
"filters": all_filters(),
"subawards": "string"
}))
assert resp.status_code == status.HTTP_400_BAD_REQUEST
| 29.164384
| 74
| 0.646783
|
f6beda9b8c401a6860f8c037e7edbde5c86c1a4f
| 6,374
|
py
|
Python
|
salt/utils/hashutils.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
salt/utils/hashutils.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
salt/utils/hashutils.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
A collection of hashing and encoding utils.
"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import base64
import hashlib
import hmac
import os
import random
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
# Import Salt libs
from salt.ext import six
from salt.utils.decorators.jinja import jinja_filter
@jinja_filter("base64_encode")
def base64_b64encode(instr):
"""
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
"""
return salt.utils.stringutils.to_unicode(
base64.b64encode(salt.utils.stringutils.to_bytes(instr)),
encoding="utf8" if salt.utils.platform.is_windows() else None,
)
@jinja_filter("base64_decode")
def base64_b64decode(instr):
"""
Decode a base64-encoded string using the "modern" Python interface.
"""
decoded = base64.b64decode(salt.utils.stringutils.to_bytes(instr))
try:
return salt.utils.stringutils.to_unicode(
decoded, encoding="utf8" if salt.utils.platform.is_windows() else None
)
except UnicodeDecodeError:
return decoded
def base64_encodestring(instr):
"""
Encode a byte-like object as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded string.
"""
# Handles PY2
if six.PY2:
return salt.utils.stringutils.to_unicode(
base64.encodestring(salt.utils.stringutils.to_bytes(instr)),
encoding="utf8" if salt.utils.platform.is_windows() else None,
)
# Handles PY3
return salt.utils.stringutils.to_unicode(
base64.encodebytes(salt.utils.stringutils.to_bytes(instr)),
encoding="utf8" if salt.utils.platform.is_windows() else None,
)
def base64_decodestring(instr):
"""
Decode a base64-encoded byte-like object using the "modern" Python interface.
"""
bvalue = salt.utils.stringutils.to_bytes(instr)
if six.PY3:
# Handle PY3
decoded = base64.decodebytes(bvalue)
else:
# Handle PY2
decoded = base64.decodestring(bvalue)
try:
return salt.utils.stringutils.to_unicode(
decoded, encoding="utf8" if salt.utils.platform.is_windows() else None
)
except UnicodeDecodeError:
return decoded
@jinja_filter("md5")
def md5_digest(instr):
"""
Generate an md5 hash of a given string.
"""
return salt.utils.stringutils.to_unicode(
hashlib.md5(salt.utils.stringutils.to_bytes(instr)).hexdigest()
)
@jinja_filter('sha1')
def sha1_digest(instr):
"""
Generate an sha1 hash of a given string.
"""
if six.PY3:
b = salt.utils.stringutils.to_bytes(instr)
return hashlib.sha1(b).hexdigest()
return hashlib.sha1(instr).hexdigest()
@jinja_filter("sha256")
def sha256_digest(instr):
"""
Generate a sha256 hash of a given string.
"""
return salt.utils.stringutils.to_unicode(
hashlib.sha256(salt.utils.stringutils.to_bytes(instr)).hexdigest()
)
@jinja_filter("sha512")
def sha512_digest(instr):
"""
Generate a sha512 hash of a given string
"""
return salt.utils.stringutils.to_unicode(
hashlib.sha512(salt.utils.stringutils.to_bytes(instr)).hexdigest()
)
@jinja_filter("hmac")
def hmac_signature(string, shared_secret, challenge_hmac):
"""
Verify a challenging hmac signature against a string / shared-secret
Returns a boolean if the verification succeeded or failed.
"""
msg = salt.utils.stringutils.to_bytes(string)
key = salt.utils.stringutils.to_bytes(shared_secret)
challenge = salt.utils.stringutils.to_bytes(challenge_hmac)
hmac_hash = hmac.new(key, msg, hashlib.sha256)
valid_hmac = base64.b64encode(hmac_hash.digest())
return valid_hmac == challenge
@jinja_filter('random_hash')
def random_hash(size=9999999999, hash_type=None):
"""
Return a hash of a randomized data from random.SystemRandom()
"""
if not hash_type:
hash_type = "md5"
hasher = getattr(hashlib, hash_type)
return hasher(
salt.utils.stringutils.to_bytes(
six.text_type(random.SystemRandom().randint(0, size))
)
).hexdigest()
@jinja_filter("file_hashsum")
def get_hash(path, form="sha256", chunk_size=65536):
"""
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
"""
hash_type = hasattr(hashlib, form) and getattr(hashlib, form) or None
if hash_type is None:
raise ValueError("Invalid hash type: {0}".format(form))
with salt.utils.files.fopen(path, "rb") as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b""):
hash_obj.update(chunk)
return hash_obj.hexdigest()
class DigestCollector(object):
"""
Class to collect digest of the file tree.
"""
def __init__(self, form="sha256", buff=0x10000):
"""
Constructor of the class.
:param form:
"""
self.__digest = hasattr(hashlib, form) and getattr(hashlib, form)() or None
if self.__digest is None:
raise ValueError("Invalid hash type: {0}".format(form))
self.__buff = buff
def add(self, path):
"""
Update digest with the file content by path.
:param path:
:return:
"""
with salt.utils.files.fopen(path, "rb") as ifile:
for chunk in iter(lambda: ifile.read(self.__buff), b""):
self.__digest.update(chunk)
def digest(self):
"""
Get digest.
:return:
"""
return salt.utils.stringutils.to_str(self.__digest.hexdigest() + os.linesep)
| 28.841629
| 84
| 0.661908
|
dc7ae05f2092de1ce17c9678a77e6f88049b7c9c
| 9,525
|
py
|
Python
|
bloomon/utils/bouqet_manager.py
|
belushkin/bloomon
|
472dd48d297737335d114d770c27a6cac986c4e6
|
[
"MIT"
] | null | null | null |
bloomon/utils/bouqet_manager.py
|
belushkin/bloomon
|
472dd48d297737335d114d770c27a6cac986c4e6
|
[
"MIT"
] | null | null | null |
bloomon/utils/bouqet_manager.py
|
belushkin/bloomon
|
472dd48d297737335d114d770c27a6cac986c4e6
|
[
"MIT"
] | null | null | null |
import re
from bloomon.entities.bouqet_design import BouqetDesign
from collections import defaultdict
class BouqetManager(object):
"""
A class used to operate input data from the input stream.
It takes input data and decide whether it is flower or design, then it stores it in internal storage
for flowers or create bouqet design objects for further bouqets producing. It also stores designs in a list
This class was designed under the pressure of time and can be optimized in different ways.
First of all we have to decide do we consume all flowers and then produce bouqets or we produce bouqets
on the fly.
I don't see at the moment how we can optimize storage of the designs, we should walk over the list and check
do we have enough flowers per every design. This is inefficient and can be optimized but I don't see clear
solution at the moment. Probably we can store designs using TRIE datastructure or use another binary tree
implementation
Maintaining total amount of flowers both small and large can be improved either distinction between what
kind of design do we have at the moment. Because current implementation produces huge if conditions and it
looks ugly.
Implementing reminder of the bouqet name must be improved as well. I think about maintaining priority queue
with dictionary keys as flower specie and values as amount of left flowers large or small. Keeping it sorted
will reduce amount of dict walking in order to fulfill left flowers in the bouqet.
Attributes
----------
_designs : list
list of all designs we received from the input
_flowersS : dict
dictionary of small flower species together with amount we received from the input
_flowersL : dict
dictionary of large flower species together with amount we received from the input
_totalFlowersS : int
total amount of small flowers
_totalFlowersL : int
total amount of large flowers
Methods
-------
manage()
Consumes flower or design from the input and decide what to create flower or design
addBouqetDesign()
Creates object of bouqet design
addFlower()
Adds flower to the internal storage
getDesigns()
Returns existing designs
getSmallFlowers()
Returns dictionary with small flowers
getLargeFlowers()
Returns dictionary with large flowers
produceBouqet()
Check if we can create one of existing designs from the stream of incoming flowers.
Most time consuming and core function of the assessment
_getFlowers()
Private helper function for cutting flowers from the input string
_getTotalQuantityOfFlowers()
Private helper function for cutting total quantity of flowers from the input string
"""
EXCEPTION_MESSAGE = 'Booket design {} does not have quantity of flowers or it is less then 1'
def __init__(self):
self._designs = []
self._flowersS = defaultdict(int)
self._flowersL = defaultdict(int)
self._totalFlowersS = 0
self._totalFlowersL = 0
def manage(self, line):
""" void function consumes flower or design from the input and decide what to create flower or design
:return: void
"""
if not line:
return None
self.addFlower(line) if re.match('[a-z][L|S]', line) else self.addBouqetDesign(line)
def addBouqetDesign(self, line):
""" Creates new Bouqet design and store it in the list
:return: void
"""
quantity = self._getTotalQuantityOfFlowers(line)
design = BouqetDesign(
line[0],
line[1],
self._getFlowers(line[2:-int(len(str(quantity)))]),
quantity
)
self._designs.append(design)
def addFlower(self, line):
""" Adds flower to the internal storage and increase total amount of flowers
needed for checking if we can create bouqet or not
:return: void
"""
if line[1] == 'L':
self._flowersL[line[0]] += 1
self._totalFlowersL += 1
else:
self._flowersS[line[0]] += 1
self._totalFlowersS += 1
def getDesigns(self):
""" Returns existing designs
:return: list of existing designs
:rtype: list
"""
return self._designs
def getSmallFlowers(self):
""" Returns dictionary with small flowers
:return: small flowers
:rtype: dict
"""
return self._flowersS
def getLargeFlowers(self):
""" void function consumes flower or design from the input and decide what to create flower or design
:return: large flowers
:rtype: dict
"""
return self._flowersL
def _getFlowers(self, row):
""" Walk over the input string and cut flower specie and quantity then return it in dict
:return: flowers species and quantities
:rtype: dict
"""
result = {}
j = 0
for i, val in enumerate(row):
if not val.isdigit():
result[val] = int(row[j:i])
j = i + 1
return result
def _getTotalQuantityOfFlowers(self, row):
""" Cut total amount of flowers from the tail of the string
:raises: RuntimeError(EXCEPTION_MESSAGE)
If quantity of flowers is 0 or does not exists at all
:return: quantity of flowers
:rtype: int
"""
quantity = 0
if not row[-1].isdigit():
raise RuntimeError(BouqetManager.EXCEPTION_MESSAGE.format(row))
for index, _ in enumerate(row, 1):
if not row[-index].isdigit():
quantity = int(row[-index + 1:])
break
if quantity == 0:
raise RuntimeError(BouqetManager.EXCEPTION_MESSAGE.format(row))
return quantity
def produceBouqet(self):
""" Produce bouqet from existing flowers, checks if it has enough flowers to do it.
Walking over designs and checking can have bugs which has not been covered by tests, be careful
If you find issue please cover it by tests
:return: bouqet name
:rtype: str
"""
for design in self._designs:
designFlowers = design.getFlowers()
flowers = self._flowersL if design.getSize() == 'L' else self._flowersS
total = self._totalFlowersL if design.getSize() == 'L' else self._totalFlowersS
for key in designFlowers.keys():
if sum(designFlowers.values()) > total:
break
if key not in flowers:
break
if flowers[key] < designFlowers[key]:
break
else:
# Building main bouqet name
name = design.getName() + design.getSize()
flowersName = defaultdict(int)
for key in designFlowers.keys():
flowersName[key] += designFlowers[key]
if design.getSize() == 'L':
self._flowersL[key] -= designFlowers[key]
self._totalFlowersL -= designFlowers[key]
else:
self._flowersS[key] -= designFlowers[key]
self._totalFlowersS -= designFlowers[key]
# Building bouqet reminder
reminder = design.getFlowersQuantity() - sum(designFlowers.values())
for key in designFlowers.keys():
if reminder <= 0:
break
if design.getSize() == 'L':
if reminder > 0 and self._flowersL[key] > 0:
if self._flowersL[key] >= reminder:
flowersName[key] += reminder
self._flowersL[key] -= reminder
self._totalFlowersL -= reminder
reminder = 0
else:
flowersName[key] += self._flowersL[key]
self._totalFlowersL -= self._flowersL[key]
reminder -= self._flowersL[key]
self._flowersL[key] = 0
else:
if reminder > 0 and self._flowersS[key] > 0:
if self._flowersS[key] >= reminder:
flowersName[key] += reminder
self._flowersS[key] -= reminder
self._totalFlowersS -= reminder
reminder = 0
else:
flowersName[key] += self._flowersS[key]
self._totalFlowersS -= self._flowersS[key]
reminder -= self._flowersS[key]
self._flowersS[key] = 0
retName = ''.join('{}{}'.format(value, key) for key, value in flowersName.items())
return design.getName() + design.getSize(), name + retName
return None
| 38.253012
| 116
| 0.567454
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.