blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42c7c20a57cfd7667a3ee3a05e5ad7b229d4de70 | 79e1d04867c4298b23c907f92c7119e4bea8ef02 | /allennlp/allennlp/training/learning_rate_schedulers.py | 487e8be55e48131c234a59bfe62e3adf9db87fe7 | [
"Apache-2.0"
] | permissive | ethanjperez/convince | 53db0bcd978831799c68fe63ecb0c91473ec40c4 | ccf60824b28f0ce8ceda44a7ce52a0d117669115 | refs/heads/master | 2023-01-08T09:12:16.722614 | 2021-11-03T18:50:30 | 2021-11-03T18:50:30 | 205,189,291 | 27 | 8 | Apache-2.0 | 2023-01-05T22:43:12 | 2019-08-29T15:03:34 | Python | UTF-8 | Python | false | false | 16,775 | py | """
AllenNLP uses most
`PyTorch learning rate schedulers <http://pytorch.org/docs/master/optim.html#how-to-adjust-learning-rate>`_,
with a thin wrapper to allow registering them and instantiating them ``from_params``.
The available learning rate schedulers from PyTorch are
* `"step" <http://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.StepLR>`_
* `"multi_step" <http://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.MultiStepLR>`_
* `"exponential" <http://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.ExponentialLR>`_
* `"reduce_on_plateau" <http://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.ReduceLROnPlateau>`_
In addition, AllenNLP also provides a Noam schedule and `cosine with restarts
<https://arxiv.org/abs/1608.03983>`_, which are registered as "noam" and "cosine", respectively.
"""
# During training using the AllenNLP `Trainer`, this is the API and calling
#sequence for `step` and `step_batch`. Also note that `step` is called
#once in `torch.optim.lr_scheduler._LRScheduler.__init__`.
#
# scheduler = ... # creates scheduler, calls self.step(last_epoch + 1) in __init__
#
# batch_num_total = 0
# for epoch in range(num_epochs):
# for batch in batchs_in_epoch:
# # compute loss, update parameters with current learning rates
# # call step_batch AFTER updating parameters
# batch_num_total += 1
# scheduler.step_batch(batch_num_total)
# # call step() at the END of each epoch
# scheduler.step(validation_metrics, epoch)
import logging
from typing import Optional, List
import numpy as np
import torch.optim.lr_scheduler
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.common.registrable import Registrable
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class LearningRateScheduler(Registrable):
"""
This class just allows us to implement ``Registrable`` for Pytorch :class:`LRSchedulers`.
"""
def __init__(self, lr_scheduler) -> None:
self.lr_scheduler = lr_scheduler
def step(self, metric: float, epoch: Optional[int] = None):
raise NotImplementedError
def step_batch(self, batch_num_total: Optional[int]):
if batch_num_total is not None:
if hasattr(self.lr_scheduler, 'step_batch'):
self.lr_scheduler.step_batch(batch_num_total)
return
# Requires custom from_params
@classmethod
def from_params(cls, optimizer: torch.optim.Optimizer, params: Params): # type: ignore
# pylint: disable=arguments-differ
scheduler = params.pop_choice("type", LearningRateScheduler.list_available())
schedulers = LearningRateScheduler.by_name(scheduler)(optimizer, **params.as_dict()) # type: ignore
if isinstance(schedulers, torch.optim.lr_scheduler.ReduceLROnPlateau):
return LearningRateWithMetricsWrapper(schedulers)
else:
return LearningRateWithoutMetricsWrapper(schedulers)
class LearningRateWithoutMetricsWrapper(LearningRateScheduler):
"""
A wrapper around learning rate schedulers that do not require metrics
"""
def __init__(self, lr_scheduler: torch.optim.lr_scheduler._LRScheduler) -> None: # pylint: disable=protected-access
super().__init__(lr_scheduler)
self.lr_scheduler = lr_scheduler
@overrides
def step(self, metric: float, epoch: Optional[int] = None):
self.lr_scheduler.step(epoch)
class LearningRateWithMetricsWrapper(LearningRateScheduler):
"""
A wrapper around learning rate schedulers that require metrics,
At the moment there is only a single instance of this lrs. It is the ReduceLROnPlateau
"""
def __init__(self, lr_scheduler: torch.optim.lr_scheduler.ReduceLROnPlateau) -> None:
super().__init__(lr_scheduler)
self.lr_scheduler = lr_scheduler
@overrides
def step(self, metric: float, epoch: Optional[int] = None):
if metric is None:
raise ConfigurationError("The reduce_on_plateau learning rate scheduler requires "
"a validation metric to compute the schedule and therefore "
"must be used with a validation dataset.")
self.lr_scheduler.step(metric, epoch)
class NoamLR(torch.optim.lr_scheduler._LRScheduler): # pylint: disable=protected-access
"""
Implements the Noam Learning rate schedule. This corresponds to increasing the learning rate
linearly for the first ``warmup_steps`` training steps, and decreasing it thereafter proportionally
to the inverse square root of the step number, scaled by the inverse square root of the
dimensionality of the model. Time will tell if this is just madness or it's actually important.
Parameters
----------
model_size : ``int``, required.
The hidden size parameter which dominates the number of parameters in your model.
warmup_steps: ``int``, required.
The number of steps to linearly increase the learning rate.
factor : ``float``, optional (default = 1.0).
The overall scale factor for the learning rate decay.
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
model_size: int,
warmup_steps: int,
factor: float = 1.0,
last_epoch: int = -1) -> None:
self.warmup_steps = warmup_steps
self.factor = factor
self.model_size = model_size
super().__init__(optimizer, last_epoch=last_epoch)
def step(self, epoch=None):
pass
def step_batch(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, learning_rate in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = learning_rate
def get_lr(self):
step = max(self.last_epoch, 1)
scale = self.factor * (self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup_steps ** (-1.5)))
return [scale for _ in range(len(self.base_lrs))]
class SlantedTriangular(torch.optim.lr_scheduler._LRScheduler): # pylint: disable=protected-access
"""
Implements the Slanted Triangular Learning Rate schedule with optional gradual
unfreezing. The schedule corresponds to first linearly increasing the learning
rate and annealing the learning based on a fixed ratio.
If we gradually unfreeze, then in the first epoch of training, only the top
layer is trained; in the second epoch, the top two layers are trained, etc.
During freezing, the learning rate is increased and annealed over one epoch.
After freezing finished, the learning rate is increased and annealed over
the remaining training iterations.
Note that with this schedule, early stopping should typically be avoided.
Parameters
----------
num_epochs : ``int``, required.
The total number of epochs for which the model should be trained.
num_steps_per_epoch: ``int``, required.
The number of steps (updates, batches) per training epoch.
cut_frac: ``float``, optional (default = 0.1).
The fraction of the steps to increase the learning rate.
ratio: ``float``, optional (default = 32).
The ratio of the smallest to the (largest) base learning rate.
gradual_unfreezing: ``bool``, optional (default = False).
Whether gradual unfreezing should be used.
discriminative_fine_tuning: ``bool``, optional (default = False).
Whether discriminative fine-tuning (different learning rates per layer)
are used.
decay_factor: ``float``, optional (default = 0.38).
The decay factor by which the learning rate is reduced with
discriminative fine-tuning when going a layer deeper.
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_steps_per_epoch: int,
cut_frac: float = 0.1,
ratio: int = 32,
last_epoch: int = -1,
gradual_unfreezing: bool = False,
discriminative_fine_tuning: bool = False,
decay_factor: float = 0.38) -> None:
self.num_epochs = num_epochs
self.num_steps_per_epoch = num_steps_per_epoch
self.cut_frac = cut_frac
self.ratio = ratio
self.gradual_unfreezing = gradual_unfreezing
self.freezing_current = self.gradual_unfreezing
self.is_first_epoch = True
# track the actual number of steps for each epoch
self.batch_num_total_epoch_end: List[int] = []
if self.gradual_unfreezing:
assert not optimizer.param_groups[-1]["params"], \
"The default group should be empty."
if self.gradual_unfreezing or discriminative_fine_tuning:
assert len(optimizer.param_groups) > 2, \
"There should be at least 3 param_groups (2 + empty default group)" \
" for gradual unfreezing / discriminative fine-tuning to make sense."
super().__init__(optimizer, last_epoch=last_epoch)
if discriminative_fine_tuning:
# skip the last param_group if it is has no parameters
exponent = 0
for i in range(len(self.base_lrs)-1, -1, -1):
param_group = optimizer.param_groups[i]
if param_group['params']:
param_group['lr'] = self.base_lrs[i] * decay_factor ** exponent
self.base_lrs[i] = param_group['lr']
exponent += 1
# set up for the first batch
self.last_batch_num_total = -1
self.step_batch(0)
def step(self, epoch=None):
if len(self.batch_num_total_epoch_end) == 0: # pylint: disable=len-as-condition
self.batch_num_total_epoch_end.append(0)
else:
self.batch_num_total_epoch_end.append(self.last_batch_num_total)
if self.gradual_unfreezing:
# the method is called once when initialising before the
# first epoch (epoch 0) and then always at the end of each
# epoch; so the first time, with epoch id 0, we want to set
# up for epoch #1; the second time, still with epoch id 0,
# we want to set up for epoch #2, etc.
num_layers_to_unfreeze = epoch + 1 if self.is_first_epoch else epoch + 2
if self.is_first_epoch:
self.is_first_epoch = False
if num_layers_to_unfreeze >= len(self.optimizer.param_groups)-1:
logger.info('Gradual unfreezing finished. Training all layers.')
self.freezing_current = False
else:
logger.info(f'Gradual unfreezing. Training only the top {num_layers_to_unfreeze} layers.')
for i, param_group in enumerate(reversed(self.optimizer.param_groups)):
for param in param_group["params"]:
# i = 0 is the default group; we care about i > 0
param.requires_grad = bool(i <= num_layers_to_unfreeze)
def step_batch(self, batch_num_total=None):
if batch_num_total is None:
batch_num_total = self.last_batch_num_total + 1
self.last_batch_num_total = batch_num_total
for param_group, learning_rate in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = learning_rate
def get_lr(self):
# get the actual number of batches per epoch seen in training
if len(self.batch_num_total_epoch_end) > 1:
# have finished an epoch
actual_num_steps_per_epoch = int(
self.batch_num_total_epoch_end[-1] /
(len(self.batch_num_total_epoch_end) - 1)
)
else:
actual_num_steps_per_epoch = max(self.num_steps_per_epoch,
self.last_batch_num_total)
if self.freezing_current:
# if we still freeze, we restrict the schedule to the current epoch
num_steps = actual_num_steps_per_epoch
step = min(self.last_batch_num_total - self.batch_num_total_epoch_end[-1],
num_steps)
else:
# otherwise we use the schedule for the rest of training
if not self.gradual_unfreezing:
frozen_steps = 0
else:
num_frozen_epochs = len(self.optimizer.param_groups) - 2
frozen_steps = self.batch_num_total_epoch_end[num_frozen_epochs]
num_steps = self.num_epochs * actual_num_steps_per_epoch - frozen_steps
step = min(self.last_batch_num_total - frozen_steps,
num_steps)
cut = int(num_steps * self.cut_frac)
prop = step / cut if step < cut else 1 - (step - cut) / (num_steps - cut)
return [lr * (1 + prop * (self.ratio - 1)) / self.ratio for lr in self.base_lrs]
class CosineWithRestarts(torch.optim.lr_scheduler._LRScheduler): # pylint: disable=protected-access
"""
Cosine annealing with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Parameters
----------
optimizer : ``torch.optim.Optimizer``
t_initial : ``int``
The number of iterations within the first cycle.
t_mul : ``float``, optional (default=1)
Determines the number of iterations in the i-th decay cycle, which is the
length of the last cycle multiplied by ``t_mul``.
eta_min : ``float``, optional (default=0)
The minimum learning rate.
eta_mul : ``float``, optional (default=1)
Determines the initial learning rate for the i-th decay cycle, which is the
last initial learning rate multiplied by ``m_mul``.
last_epoch : ``int``, optional (default=-1)
The index of the last epoch. This is used when restarting.
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
eta_min: float = 0.,
eta_mul: float = 1.,
last_epoch: int = -1) -> None:
assert t_initial > 0
assert eta_min >= 0
if t_initial == 1 and t_mul == 1 and eta_mul == 1:
logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.eta_min = eta_min
self.eta_mul = eta_mul
self._last_restart: int = 0
self._cycle_counter: int = 0
self._cycle_len: int = t_initial
self._n_restarts: int = 0
self._initialized: bool = False
super(CosineWithRestarts, self).__init__(optimizer, last_epoch)
def get_lr(self):
"""Get updated learning rate."""
# HACK: We need to check if this is the first time ``self.get_lr()`` was called,
# since ``torch.optim.lr_scheduler._LRScheduler`` will call ``self.get_lr()``
# when first initialized, but the learning rate should remain unchanged
# for the first epoch.
if not self._initialized:
self._initialized = True
return self.base_lrs
step = self.last_epoch + 1
self._cycle_counter = step - self._last_restart
if self._cycle_counter % self._cycle_len == 0:
self._n_restarts += 1
self._cycle_counter = 0
self._last_restart = step
base_lrs = [lr * self.eta_mul**self._n_restarts for lr in self.base_lrs]
self._cycle_len = int(self.t_initial * self.t_mul**self._n_restarts)
lrs = [
self.eta_min + ((lr - self.eta_min) / 2) * (
np.cos(np.pi * (self._cycle_counter % self._cycle_len) / self._cycle_len) + 1
)
for lr in base_lrs
]
return lrs
# We just use the Pytorch LRSchedulers, so here we force them into
# Registry._registry so we can build them from params.
Registrable._registry[LearningRateScheduler] = { # pylint: disable=protected-access
"step": torch.optim.lr_scheduler.StepLR,
"multi_step": torch.optim.lr_scheduler.MultiStepLR,
"exponential": torch.optim.lr_scheduler.ExponentialLR,
"reduce_on_plateau": torch.optim.lr_scheduler.ReduceLROnPlateau,
"cosine": CosineWithRestarts,
"noam": NoamLR,
"slanted_triangular": SlantedTriangular,
}
| [
"ethanperez18@gmail.com"
] | ethanperez18@gmail.com |
c12ed3555353b036fbee254e5955d663e05c5577 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/tcp/const.py | 3a42736c753ef8a6036d4568a0522b0ac67010f0 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 328 | py | """Constants for TCP platform."""
from __future__ import annotations
from typing import Final
CONF_BUFFER_SIZE: Final = "buffer_size"
CONF_VALUE_ON: Final = "value_on"
DEFAULT_BUFFER_SIZE: Final = 1024
DEFAULT_NAME: Final = "TCP Sensor"
DEFAULT_TIMEOUT: Final = 10
DEFAULT_SSL: Final = False
DEFAULT_VERIFY_SSL: Final = True
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
566a0739dd637ac3f992fb3bde87f6e86a163245 | 98b1956594921aeef6e4b3c0f5b15703c3eee6a7 | /atom/nucleus/python/nucleus_api/models/notification_setting.py | 8a24b850138171e7f281df812ece026cb33b2857 | [
"Apache-2.0"
] | permissive | sumit4-ttn/SDK | d4db3dcac077e9c9508a8227010a2ab764c31023 | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | refs/heads/master | 2022-11-25T14:05:16.911068 | 2020-08-09T17:31:55 | 2020-08-09T17:31:55 | 286,413,715 | 0 | 0 | Apache-2.0 | 2020-08-10T08:03:04 | 2020-08-10T08:03:03 | null | UTF-8 | Python | false | false | 11,187 | py | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class NotificationSetting(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'client_id': 'str',
'create_date': 'datetime',
'frequency': 'int',
'frequency_unit': 'str',
'id': 'str',
'is_receive': 'bool',
'metadata': 'dict(str, str)',
'notification_id': 'str',
'secondary_id': 'str',
'threshold_value': 'float',
'update_date': 'datetime'
}
attribute_map = {
'client_id': 'client_id',
'create_date': 'create_date',
'frequency': 'frequency',
'frequency_unit': 'frequency_unit',
'id': 'id',
'is_receive': 'is_receive',
'metadata': 'metadata',
'notification_id': 'notification_id',
'secondary_id': 'secondary_id',
'threshold_value': 'threshold_value',
'update_date': 'update_date'
}
def __init__(self, client_id=None, create_date=None, frequency=None, frequency_unit=None, id=None, is_receive=None, metadata=None, notification_id=None, secondary_id=None, threshold_value=None, update_date=None): # noqa: E501
"""NotificationSetting - a model defined in Swagger""" # noqa: E501
self._client_id = None
self._create_date = None
self._frequency = None
self._frequency_unit = None
self._id = None
self._is_receive = None
self._metadata = None
self._notification_id = None
self._secondary_id = None
self._threshold_value = None
self._update_date = None
self.discriminator = None
self.client_id = client_id
if create_date is not None:
self.create_date = create_date
if frequency is not None:
self.frequency = frequency
if frequency_unit is not None:
self.frequency_unit = frequency_unit
if id is not None:
self.id = id
if is_receive is not None:
self.is_receive = is_receive
if metadata is not None:
self.metadata = metadata
self.notification_id = notification_id
if secondary_id is not None:
self.secondary_id = secondary_id
if threshold_value is not None:
self.threshold_value = threshold_value
if update_date is not None:
self.update_date = update_date
@property
def client_id(self):
"""Gets the client_id of this NotificationSetting. # noqa: E501
client_id # noqa: E501
:return: The client_id of this NotificationSetting. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this NotificationSetting.
client_id # noqa: E501
:param client_id: The client_id of this NotificationSetting. # noqa: E501
:type: str
"""
if client_id is None:
raise ValueError("Invalid value for `client_id`, must not be `None`") # noqa: E501
self._client_id = client_id
@property
def create_date(self):
"""Gets the create_date of this NotificationSetting. # noqa: E501
:return: The create_date of this NotificationSetting. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this NotificationSetting.
:param create_date: The create_date of this NotificationSetting. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def frequency(self):
"""Gets the frequency of this NotificationSetting. # noqa: E501
frequency # noqa: E501
:return: The frequency of this NotificationSetting. # noqa: E501
:rtype: int
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
"""Sets the frequency of this NotificationSetting.
frequency # noqa: E501
:param frequency: The frequency of this NotificationSetting. # noqa: E501
:type: int
"""
self._frequency = frequency
@property
def frequency_unit(self):
"""Gets the frequency_unit of this NotificationSetting. # noqa: E501
frequency_unit # noqa: E501
:return: The frequency_unit of this NotificationSetting. # noqa: E501
:rtype: str
"""
return self._frequency_unit
@frequency_unit.setter
def frequency_unit(self, frequency_unit):
"""Sets the frequency_unit of this NotificationSetting.
frequency_unit # noqa: E501
:param frequency_unit: The frequency_unit of this NotificationSetting. # noqa: E501
:type: str
"""
self._frequency_unit = frequency_unit
@property
def id(self):
"""Gets the id of this NotificationSetting. # noqa: E501
:return: The id of this NotificationSetting. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NotificationSetting.
:param id: The id of this NotificationSetting. # noqa: E501
:type: str
"""
self._id = id
@property
def is_receive(self):
"""Gets the is_receive of this NotificationSetting. # noqa: E501
is_receive # noqa: E501
:return: The is_receive of this NotificationSetting. # noqa: E501
:rtype: bool
"""
return self._is_receive
@is_receive.setter
def is_receive(self, is_receive):
"""Sets the is_receive of this NotificationSetting.
is_receive # noqa: E501
:param is_receive: The is_receive of this NotificationSetting. # noqa: E501
:type: bool
"""
self._is_receive = is_receive
@property
def metadata(self):
"""Gets the metadata of this NotificationSetting. # noqa: E501
metadata # noqa: E501
:return: The metadata of this NotificationSetting. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this NotificationSetting.
metadata # noqa: E501
:param metadata: The metadata of this NotificationSetting. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def notification_id(self):
"""Gets the notification_id of this NotificationSetting. # noqa: E501
notification_id # noqa: E501
:return: The notification_id of this NotificationSetting. # noqa: E501
:rtype: str
"""
return self._notification_id
@notification_id.setter
def notification_id(self, notification_id):
"""Sets the notification_id of this NotificationSetting.
notification_id # noqa: E501
:param notification_id: The notification_id of this NotificationSetting. # noqa: E501
:type: str
"""
if notification_id is None:
raise ValueError("Invalid value for `notification_id`, must not be `None`") # noqa: E501
self._notification_id = notification_id
@property
def secondary_id(self):
"""Gets the secondary_id of this NotificationSetting. # noqa: E501
:return: The secondary_id of this NotificationSetting. # noqa: E501
:rtype: str
"""
return self._secondary_id
@secondary_id.setter
def secondary_id(self, secondary_id):
"""Sets the secondary_id of this NotificationSetting.
:param secondary_id: The secondary_id of this NotificationSetting. # noqa: E501
:type: str
"""
self._secondary_id = secondary_id
@property
def threshold_value(self):
"""Gets the threshold_value of this NotificationSetting. # noqa: E501
:return: The threshold_value of this NotificationSetting. # noqa: E501
:rtype: float
"""
return self._threshold_value
@threshold_value.setter
def threshold_value(self, threshold_value):
"""Sets the threshold_value of this NotificationSetting.
:param threshold_value: The threshold_value of this NotificationSetting. # noqa: E501
:type: float
"""
self._threshold_value = threshold_value
@property
def update_date(self):
"""Gets the update_date of this NotificationSetting. # noqa: E501
:return: The update_date of this NotificationSetting. # noqa: E501
:rtype: datetime
"""
return self._update_date
@update_date.setter
def update_date(self, update_date):
"""Sets the update_date of this NotificationSetting.
:param update_date: The update_date of this NotificationSetting. # noqa: E501
:type: datetime
"""
self._update_date = update_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NotificationSetting, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NotificationSetting):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hydrogen@Hydrogens-MacBook-Pro.local"
] | hydrogen@Hydrogens-MacBook-Pro.local |
6ada1adbb78c2a1e0598d59d181a4cbf8af24c48 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/network/cloudengine/ce_lldp_interface.py | 648d9118d9dc54468208c42c118adcbb5d4733a0 | [
"GPL-3.0-or-later",
"GPL-3.0-only",
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 70,008 | py | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ce_lldp_interface
version_added: '0.2.0'
short_description: Manages INTERFACE LLDP configuration on HUAWEI CloudEngine switches.
description:
- Manages INTERFACE LLDP configuration on HUAWEI CloudEngine switches.
author: xuxiaowei0512 (@CloudEngine-Ansible)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
lldpenable:
description:
- Set global LLDP enable state.
type: str
choices: ['enabled', 'disabled']
function_lldp_interface_flag:
description:
- Used to distinguish between command line functions.
type: str
choices: ['disableINTERFACE','tlvdisableINTERFACE','tlvenableINTERFACE','intervalINTERFACE']
type_tlv_disable:
description:
- Used to distinguish between command line functions.
type: str
choices: ['basic_tlv', 'dot3_tlv']
type_tlv_enable:
description:
- Used to distinguish between command line functions.
type: str
choices: ['dot1_tlv','dcbx']
lldpadminstatus:
description:
- Set interface lldp enable state.
type: str
choices: ['txOnly', 'rxOnly', 'txAndRx', 'disabled']
ifname:
description:
- Interface name.
type: str
txinterval:
description:
- LLDP send message interval.
type: int
txprotocolvlanid:
description:
- Set tx protocol vlan id.
type: int
txvlannameid:
description:
- Set tx vlan name id.
type: int
vlannametxenable:
description:
- Set vlan name tx enable or not.
type: bool
manaddrtxenable:
description:
- Make it able to send management address TLV.
type: bool
portdesctxenable:
description:
- Enabling the ability to send a description of TLV.
type: bool
syscaptxenable:
description:
- Enable the ability to send system capabilities TLV.
type: bool
sysdesctxenable:
description:
- Enable the ability to send system description TLV.
type: bool
sysnametxenable:
description:
- Enable the ability to send system name TLV.
type: bool
portvlantxenable:
description:
- Enable port vlan tx.
type: bool
protovlantxenable:
description:
- Enable protocol vlan tx.
type: bool
protoidtxenable:
description:
- Enable the ability to send protocol identity TLV.
type: bool
macphytxenable:
description:
- Enable MAC/PHY configuration and state TLV to be sent.
type: bool
linkaggretxenable:
description:
- Enable the ability to send link aggregation TLV.
type: bool
maxframetxenable:
description:
- Enable the ability to send maximum frame length TLV.
type: bool
eee:
description:
- Enable the ability to send EEE TLV.
type: bool
dcbx:
description:
- Enable the ability to send DCBX TLV.
type: bool
state:
description:
- Manage the state of the resource.
type: str
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Configure global LLDP enable state"
ce_lldp_interface_interface:
lldpenable: enabled
- name: "Configure interface lldp enable state"
community.network.ce_lldp_interface:
function_lldp_interface_flag: disableINTERFACE
ifname: 10GE1/0/1
lldpadminstatus: rxOnly
- name: "Configure LLDP transmit interval and ensure global LLDP state is already enabled"
community.network.ce_lldp_interface:
function_lldp_interface_flag: intervalINTERFACE
ifname: 10GE1/0/1
txinterval: 4
- name: "Configure basic-tlv: management-address TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
manaddrtxenable: true
- name: "Configure basic-tlv: prot description TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
portdesctxenable: true
- name: "Configure basic-tlv: system capabilities TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
syscaptxenable: true
- name: "Configure basic-tlv: system description TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
sysdesctxenable: true
- name: "Configure basic-tlv: system name TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
sysnametxenable: true
- name: "TLV types that are forbidden to be published on the configuration interface, link aggregation TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
linkAggreTxEnable: true
- name: "TLV types that are forbidden to be published on the configuration interface, MAC/PHY configuration/status TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
macPhyTxEnable: true
- name: "TLV types that are forbidden to be published on the configuration interface, maximum frame size TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
maxFrameTxEnable: true
- name: "TLV types that are forbidden to be published on the configuration interface, EEE TLV"
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
eee: true
- name: "Configure the interface to publish an optional DCBX TLV type "
community.network.ce_lldp_interface:
function_lldp_interface_flag: tlvenableINTERFACE
ifname: 10GE1/0/1
type_tlv_enable: dcbx
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"lldpadminstatus": "rxOnly",
"function_lldp_interface_flag": "tlvenableINTERFACE",
"type_tlv_enable": "dot1_tlv",
"ifname": "10GE1/0/1",
"state": "present"
}
existing:
description: k/v pairs of existing global LLDP configration
returned: always
type: dict
sample: {
"lldpenable": "disabled",
"ifname": "10GE1/0/1",
"lldpadminstatus": "txAndRx"
}
end_state:
description: k/v pairs of global DLDP configration after module execution
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"lldpadminstatus": "rxOnly",
"function_lldp_interface_flag": "tlvenableINTERFACE",
"type_tlv_enable": "dot1_tlv",
"ifname": "10GE1/0/1"
}
updates:
description: command sent to the device
returned: always
type: list
sample: [
"lldp enable",
"interface 10ge 1/0/1",
"undo lldp disable",
"lldp tlv-enable dot1-tlv vlan-name 4",
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import copy
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.network.plugins.module_utils.network.cloudengine.ce import set_nc_config, get_nc_config
CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys>
<lldpEnable></lldpEnable>
</lldpSys>
</lldp>
</filter>
"""
CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys operation="merge">
<lldpEnable>%s</lldpEnable>
</lldpSys>
</lldp>
</config>
"""
CE_NC_GET_INTERFACE_LLDP_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<lldpAdminStatus></lldpAdminStatus>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_MERGE_INTERFACE_LLDP_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface operation="merge">
<ifName>%s</ifName>
<lldpAdminStatus>%s</lldpAdminStatus>
</lldpInterface>
</lldpInterfaces>
</lldp>
</config>
"""
CE_NC_GET_INTERFACE_INTERVAl_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<msgInterval>
<txInterval></txInterval>
</msgInterval>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_MERGE_INTERFACE_INTERVAl_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName>%s</ifName>
<msgInterval operation="merge">
<txInterval>%s</txInterval>
</msgInterval>
</lldpInterface>
</lldpInterfaces>
</lldp>
</config>
"""
CE_NC_GET_INTERFACE_TLV_ENABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<tlvTxEnable>
<dcbx></dcbx>
<protoIdTxEnable></protoIdTxEnable>
</tlvTxEnable>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_GET_INTERFACE_TLV_DISABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<tlvTxEnable>
<manAddrTxEnable></manAddrTxEnable>
<portDescTxEnable></portDescTxEnable>
<sysCapTxEnable></sysCapTxEnable>
<sysDescTxEnable></sysDescTxEnable>
<sysNameTxEnable></sysNameTxEnable>
<linkAggreTxEnable></linkAggreTxEnable>
<macPhyTxEnable></macPhyTxEnable>
<maxFrameTxEnable></maxFrameTxEnable>
<eee></eee>
</tlvTxEnable>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName>%s</ifName>
<tlvTxEnable operation="merge">
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_PROTOIDTXENABLE = """
<protoIdTxEnable>%s</protoIdTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_DCBX = """
<dcbx>%s</dcbx>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MANADDRTXENABLE = """
<manAddrTxEnable>%s</manAddrTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_PORTDESCTXENABLE = """
<portDescTxEnable>%s</portDescTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSCAPTXENABLE = """
<sysCapTxEnable>%s</sysCapTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSDESCTXENABLE = """
<sysDescTxEnable>%s</sysDescTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSNAMETXENABLE = """
<sysNameTxEnable>%s</sysNameTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_LINKAGGRETXENABLE = """
<linkAggreTxEnable>%s</linkAggreTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MACPHYTXENABLE = """
<macPhyTxEnable>%s</macPhyTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MAXFRAMETXENABLE = """
<maxFrameTxEnable>%s</maxFrameTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_EEE = """
<eee>%s</eee>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL = """
</tlvTxEnable>
</lldpInterface>
</lldpInterfaces>
</lldp>
</config>
"""
CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys operation="merge">
<lldpEnable>%s</lldpEnable>
</lldpSys>
</lldp>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE"""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('PORT-GROUP'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Lldp_interface(object):
"""Manage global lldp enable configuration"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
self.lldpenable = self.module.params['lldpenable'] or None
self.function_lldp_interface_flag = self.module.params['function_lldp_interface_flag']
self.type_tlv_disable = self.module.params['type_tlv_disable']
self.type_tlv_enable = self.module.params['type_tlv_enable']
self.ifname = self.module.params['ifname']
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.ifname = self.module.params['ifname']
self.lldpadminstatus = self.module.params['lldpadminstatus']
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
self.ifname = self.module.params['ifname']
self.manaddrtxenable = self.module.params['manaddrtxenable']
self.portdesctxenable = self.module.params['portdesctxenable']
self.syscaptxenable = self.module.params['syscaptxenable']
self.sysdesctxenable = self.module.params['sysdesctxenable']
self.sysnametxenable = self.module.params['sysnametxenable']
if self.type_tlv_disable == 'dot3_tlv':
self.ifname = self.module.params['ifname']
self.macphytxenable = self.module.params['macphytxenable']
self.linkaggretxenable = self.module.params['linkaggretxenable']
self.maxframetxenable = self.module.params['maxframetxenable']
self.eee = self.module.params['eee']
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
self.ifname = self.module.params['ifname']
self.protoidtxenable = self.module.params['protoidtxenable']
if self.type_tlv_enable == 'dcbx':
self.ifname = self.module.params['ifname']
self.dcbx = self.module.params['dcbx']
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
self.ifname = self.module.params['ifname']
self.txinterval = self.module.params['txinterval']
self.state = self.module.params['state']
self.lldp_conf = dict()
self.conf_disable_exsit = False
self.conf_interface_lldp_disable_exsit = False
self.conf_interval_exsit = False
self.conf_tlv_disable_exsit = False
self.conf_tlv_enable_exsit = False
self.enable_flag = 0
self.check_params()
self.existing_state_value = dict()
self.existing_end_state_value = dict()
self.interface_lldp_info = list()
# state
self.changed = False
self.proposed_changed = dict()
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_params(self):
"""Check all input params"""
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(msg='Error: ifname name of %s is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(msg='Error: Ifname length is beetween 1 and 63.')
if self.function_lldp_interface_flag == 'intervalINTERFACE':
if self.txinterval:
if int(self.txinterval) < 1 or int(self.txinterval) > 32768:
self.module.fail_json(
msg='Error: The value of txinterval is out of [1 - 32768].')
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(
msg='Error: ifname name of %s '
'is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(
msg='Error: Ifname length is beetween 1 and 63.')
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'dot1_tlv':
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(
msg='Error: ifname name of %s '
'is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(
msg='Error: Ifname length is beetween 1 and 63.')
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(
msg='Error: ifname name of %s '
'is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(
msg='Error: Ifname length is beetween 1 and 63.')
def check_response(self, xml_str, xml_name):
"""Check if response message is already OK"""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def get_lldp_enable_pre_config(self):
"""Get lldp enable configure"""
lldp_dict = dict()
lldp_config = list()
conf_enable_str = CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG
conf_enable_obj = get_nc_config(self.module, conf_enable_str)
xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get lldp enable config info
root_enable = ElementTree.fromstring(xml_enable_str)
ntpsite_enable = root_enable.findall("lldp/lldpSys")
for nexthop_enable in ntpsite_enable:
for ele_enable in nexthop_enable:
if ele_enable.tag in ["lldpEnable"]:
lldp_dict[ele_enable.tag] = ele_enable.text
if lldp_dict['lldpEnable'] == 'enabled':
self.enable_flag = 1
lldp_config.append(dict(lldpenable=lldp_dict['lldpEnable']))
return lldp_config
def get_interface_lldp_disable_pre_config(self):
"""Get interface undo lldp disable configure"""
lldp_dict = dict()
interface_lldp_disable_dict = dict()
if self.enable_flag == 1:
conf_enable_str = CE_NC_GET_INTERFACE_LLDP_CONFIG
conf_enable_obj = get_nc_config(self.module, conf_enable_str)
if "<data/>" in conf_enable_obj:
return
xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_enable_str)
lldp_disable_enable = root.findall("lldp/lldpInterfaces/lldpInterface")
for nexthop_enable in lldp_disable_enable:
name = nexthop_enable.find("ifName")
status = nexthop_enable.find("lldpAdminStatus")
if name is not None and status is not None:
interface_lldp_disable_dict[name.text] = status.text
return interface_lldp_disable_dict
def get_interface_lldp_disable_config(self):
lldp_config = list()
interface_lldp_disable_dict_tmp = dict()
if self.state == "present":
if self.ifname:
interface_lldp_disable_dict_tmp = self.get_interface_lldp_disable_pre_config()
key_list = interface_lldp_disable_dict_tmp.keys()
if len(key_list) != 0:
for key in key_list:
if key == self.ifname:
if interface_lldp_disable_dict_tmp[key] != self.lldpadminstatus:
self.conf_interface_lldp_disable_exsit = True
else:
self.conf_interface_lldp_disable_exsit = False
elif self.ifname not in key_list:
self.conf_interface_lldp_disable_exsit = True
elif (len(key_list) == 0) and self.ifname and self.lldpadminstatus:
self.conf_interface_lldp_disable_exsit = True
lldp_config.append(interface_lldp_disable_dict_tmp)
return lldp_config
def get_interface_tlv_disable_config(self):
lldp_config = list()
lldp_dict = dict()
cur_interface_mdn_cfg = dict()
exp_interface_mdn_cfg = dict()
if self.enable_flag == 1:
conf_str = CE_NC_GET_INTERFACE_TLV_DISABLE_CONFIG
conf_obj = get_nc_config(self.module, conf_str)
if "<data/>" in conf_obj:
return lldp_config
xml_str = conf_obj.replace('\r', '').replace('\n', '')
xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "")
xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
lldp_tlvdisable_ifname = root.findall("lldp/lldpInterfaces/lldpInterface")
for ele in lldp_tlvdisable_ifname:
ifname_tmp = ele.find("ifName")
manaddrtxenable_tmp = ele.find("tlvTxEnable/manAddrTxEnable")
portdesctxenable_tmp = ele.find("tlvTxEnable/portDescTxEnable")
syscaptxenable_tmp = ele.find("tlvTxEnable/sysCapTxEnable")
sysdesctxenable_tmp = ele.find("tlvTxEnable/sysDescTxEnable")
sysnametxenable_tmp = ele.find("tlvTxEnable/sysNameTxEnable")
linkaggretxenable_tmp = ele.find("tlvTxEnable/linkAggreTxEnable")
macphytxenable_tmp = ele.find("tlvTxEnable/macPhyTxEnable")
maxframetxenable_tmp = ele.find("tlvTxEnable/maxFrameTxEnable")
eee_tmp = ele.find("tlvTxEnable/eee")
if ifname_tmp is not None:
if ifname_tmp.text is not None:
cur_interface_mdn_cfg["ifname"] = ifname_tmp.text
if ifname_tmp is not None and manaddrtxenable_tmp is not None:
if manaddrtxenable_tmp.text is not None:
cur_interface_mdn_cfg["manaddrtxenable"] = manaddrtxenable_tmp.text
if ifname_tmp is not None and portdesctxenable_tmp is not None:
if portdesctxenable_tmp.text is not None:
cur_interface_mdn_cfg['portdesctxenable'] = portdesctxenable_tmp.text
if ifname_tmp is not None and syscaptxenable_tmp is not None:
if syscaptxenable_tmp.text is not None:
cur_interface_mdn_cfg['syscaptxenable'] = syscaptxenable_tmp.text
if ifname_tmp is not None and sysdesctxenable_tmp is not None:
if sysdesctxenable_tmp.text is not None:
cur_interface_mdn_cfg['sysdesctxenable'] = sysdesctxenable_tmp.text
if ifname_tmp is not None and sysnametxenable_tmp is not None:
if sysnametxenable_tmp.text is not None:
cur_interface_mdn_cfg['sysnametxenable'] = sysnametxenable_tmp.text
if ifname_tmp is not None and linkaggretxenable_tmp is not None:
if linkaggretxenable_tmp.text is not None:
cur_interface_mdn_cfg['linkaggretxenable'] = linkaggretxenable_tmp.text
if ifname_tmp is not None and macphytxenable_tmp is not None:
if macphytxenable_tmp.text is not None:
cur_interface_mdn_cfg['macphytxenable'] = macphytxenable_tmp.text
if ifname_tmp is not None and maxframetxenable_tmp is not None:
if maxframetxenable_tmp.text is not None:
cur_interface_mdn_cfg['maxframetxenable'] = maxframetxenable_tmp.text
if ifname_tmp is not None and eee_tmp is not None:
if eee_tmp.text is not None:
cur_interface_mdn_cfg['eee'] = eee_tmp.text
if self.state == "present":
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.manaddrtxenable:
exp_interface_mdn_cfg['manaddrtxenable'] = self.manaddrtxenable
if self.portdesctxenable:
exp_interface_mdn_cfg['portdesctxenable'] = self.portdesctxenable
if self.syscaptxenable:
exp_interface_mdn_cfg['syscaptxenable'] = self.syscaptxenable
if self.sysdesctxenable:
exp_interface_mdn_cfg['sysdesctxenable'] = self.sysdesctxenable
if self.sysnametxenable:
exp_interface_mdn_cfg['sysnametxenable'] = self.sysnametxenable
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if key == "ifname" and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname']))
if "manaddrtxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(manaddrtxenable=cur_interface_mdn_cfg['manaddrtxenable']))
if "portdesctxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(portdesctxenable=cur_interface_mdn_cfg['portdesctxenable']))
if "syscaptxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(syscaptxenable=cur_interface_mdn_cfg['syscaptxenable']))
if "sysdesctxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(sysdesctxenable=cur_interface_mdn_cfg['sysdesctxenable']))
if "sysnametxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(sysnametxenable=cur_interface_mdn_cfg['sysnametxenable']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_disable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_disable_exsit = True
return lldp_config
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.linkaggretxenable:
exp_interface_mdn_cfg['linkaggretxenable'] = self.linkaggretxenable
if self.macphytxenable:
exp_interface_mdn_cfg['macphytxenable'] = self.macphytxenable
if self.maxframetxenable:
exp_interface_mdn_cfg['maxframetxenable'] = self.maxframetxenable
if self.eee:
exp_interface_mdn_cfg['eee'] = self.eee
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if key == "ifname" and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname']))
if "linkaggretxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(linkaggretxenable=cur_interface_mdn_cfg['linkaggretxenable']))
if "macphytxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(macphytxenable=cur_interface_mdn_cfg['macphytxenable']))
if "maxframetxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(maxframetxenable=cur_interface_mdn_cfg['maxframetxenable']))
if "eee" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(eee=cur_interface_mdn_cfg['eee']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_disable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_disable_exsit = True
return lldp_config
return lldp_config
def get_interface_tlv_enable_config(self):
lldp_config = list()
lldp_dict = dict()
cur_interface_mdn_cfg = dict()
exp_interface_mdn_cfg = dict()
if self.enable_flag == 1:
conf_str = CE_NC_GET_INTERFACE_TLV_ENABLE_CONFIG
conf_obj = get_nc_config(self.module, conf_str)
if "<data/>" in conf_obj:
return lldp_config
xml_str = conf_obj.replace('\r', '').replace('\n', '')
xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "")
xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
lldpenablesite = root.findall("lldp/lldpInterfaces/lldpInterface")
for ele in lldpenablesite:
ifname_tmp = ele.find("ifName")
protoidtxenable_tmp = ele.find("tlvTxEnable/protoIdTxEnable")
dcbx_tmp = ele.find("tlvTxEnable/dcbx")
if ifname_tmp is not None:
if ifname_tmp.text is not None:
cur_interface_mdn_cfg["ifname"] = ifname_tmp.text
if ifname_tmp is not None and protoidtxenable_tmp is not None:
if protoidtxenable_tmp.text is not None:
cur_interface_mdn_cfg["protoidtxenable"] = protoidtxenable_tmp.text
if ifname_tmp is not None and dcbx_tmp is not None:
if dcbx_tmp.text is not None:
cur_interface_mdn_cfg['dcbx'] = dcbx_tmp.text
if self.state == "present":
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.protoidtxenable:
exp_interface_mdn_cfg['protoidtxenable'] = self.protoidtxenable
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if "protoidtxenable" == str(key) and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(protoidtxenable=cur_interface_mdn_cfg['protoidtxenable']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_enable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_enable_exsit = True
return lldp_config
if self.type_tlv_enable == 'dcbx':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.dcbx:
exp_interface_mdn_cfg['dcbx'] = self.dcbx
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if "dcbx" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(dcbx=cur_interface_mdn_cfg['dcbx']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_enable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_enable_exsit = True
return lldp_config
return lldp_config
def get_interface_interval_config(self):
lldp_config = list()
lldp_dict = dict()
cur_interface_mdn_cfg = dict()
exp_interface_mdn_cfg = dict()
interface_lldp_disable_dict_tmp2 = self.get_interface_lldp_disable_pre_config()
if self.enable_flag == 1:
if interface_lldp_disable_dict_tmp2[self.ifname] != 'disabled':
conf_str = CE_NC_GET_INTERFACE_INTERVAl_CONFIG
conf_obj = get_nc_config(self.module, conf_str)
if "<data/>" in conf_obj:
return lldp_config
xml_str = conf_obj.replace('\r', '').replace('\n', '')
xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "")
xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
txintervalsite = root.findall("lldp/lldpInterfaces/lldpInterface")
for ele in txintervalsite:
ifname_tmp = ele.find("ifName")
txinterval_tmp = ele.find("msgInterval/txInterval")
if ifname_tmp is not None:
if ifname_tmp.text is not None:
cur_interface_mdn_cfg["ifname"] = ifname_tmp.text
if txinterval_tmp is not None:
if txinterval_tmp.text is not None:
cur_interface_mdn_cfg["txinterval"] = txinterval_tmp.text
if self.state == "present":
if self.ifname:
exp_interface_mdn_cfg["ifname"] = self.ifname
if self.txinterval:
exp_interface_mdn_cfg["txinterval"] = self.txinterval
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if "txinterval" == str(key) and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname'], txinterval=exp_interface_mdn_cfg['txinterval']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_interval_exsit = True
lldp_config.append(cur_interface_mdn_cfg)
return lldp_config
else:
self.conf_interval_exsit = True
return lldp_config
return lldp_config
def config_global_lldp_enable(self):
if self.state == 'present':
if self.enable_flag == 0 and self.lldpenable == 'enabled':
xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "LLDP_ENABLE_CONFIG")
self.changed = True
elif self.enable_flag == 1 and self.lldpenable == 'disabled':
xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "LLDP_ENABLE_CONFIG")
self.changed = True
def config_interface_lldp_disable_config(self):
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.enable_flag == 1 and self.conf_interface_lldp_disable_exsit:
if self.ifname:
xml_str = CE_NC_MERGE_INTERFACE_LLDP_CONFIG % (self.ifname, self.lldpadminstatus)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "INTERFACE_LLDP_DISABLE_CONFIG")
self.changed = True
def config_interface_tlv_disable_config(self):
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.enable_flag == 1 and self.conf_tlv_disable_exsit:
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
if self.portdesctxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_PORTDESCTXENABLE % self.portdesctxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_PORTDESCTXENABLE")
self.changed = True
if self.manaddrtxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MANADDRTXENABLE % self.manaddrtxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_MANADDRTXENABLE")
self.changed = True
if self.syscaptxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSCAPTXENABLE % self.syscaptxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_SYSCAPTXENABLE")
self.changed = True
if self.sysdesctxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSDESCTXENABLE % self.sysdesctxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_SYSDESCTXENABLE")
self.changed = True
if self.sysnametxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSNAMETXENABLE % self.sysnametxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_SYSNAMETXENABLE")
self.changed = True
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
if self.linkaggretxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_LINKAGGRETXENABLE % self.linkaggretxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_LINKAGGRETXENABLE")
self.changed = True
if self.macphytxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MACPHYTXENABLE % self.macphytxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_MACPHYTXENABLE")
self.changed = True
if self.maxframetxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MAXFRAMETXENABLE % self.maxframetxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_MAXFRAMETXENABLE")
self.changed = True
if self.eee:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_EEE % self.eee) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_EEE")
self.changed = True
def config_interface_tlv_enable_config(self):
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.enable_flag == 1 and self.conf_tlv_enable_exsit:
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
if self.protoidtxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_PROTOIDTXENABLE % self.protoidtxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_ENABLE_DOT1_PORT_VLAN")
self.changed = True
if self.type_tlv_enable == 'dcbx':
if self.ifname:
if self.dcbx:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_DCBX % self.dcbx) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_ENABLE_DCBX_VLAN")
self.changed = True
def config_interface_interval_config(self):
if self.function_lldp_interface_flag == 'intervalINTERFACE':
tmp = self.get_interface_lldp_disable_pre_config()
if self.enable_flag == 1 and self.conf_interval_exsit and tmp[self.ifname] != 'disabled':
if self.ifname:
if self.txinterval:
xml_str = CE_NC_MERGE_INTERFACE_INTERVAl_CONFIG % (self.ifname, self.txinterval)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "INTERFACE_INTERVAL_CONFIG")
self.changed = True
def get_existing(self):
"""get existing information"""
self.get_lldp_enable_pre_config()
if self.lldpenable:
self.existing['globalLLDPENABLE'] = self.get_lldp_enable_pre_config()
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.existing['disableINTERFACE'] = self.get_interface_lldp_disable_config()
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
self.existing['tlvdisableINTERFACE'] = self.get_interface_tlv_disable_config()
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
self.existing['tlvenableINTERFACE'] = self.get_interface_tlv_enable_config()
if self.function_lldp_interface_flag == 'intervalINTERFACE':
self.existing['intervalINTERFACE'] = self.get_interface_interval_config()
def get_proposed(self):
"""get proposed"""
if self.lldpenable:
self.proposed = dict(lldpenable=self.lldpenable)
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.enable_flag == 1:
self.proposed = dict(ifname=self.ifname, lldpadminstatus=self.lldpadminstatus)
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.enable_flag == 1:
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
if self.manaddrtxenable:
self.proposed = dict(ifname=self.ifname, manaddrtxenable=self.manaddrtxenable)
if self.portdesctxenable:
self.proposed = dict(ifname=self.ifname, portdesctxenable=self.portdesctxenable)
if self.syscaptxenable:
self.proposed = dict(ifname=self.ifname, syscaptxenable=self.syscaptxenable)
if self.sysdesctxenable:
self.proposed = dict(ifname=self.ifname, sysdesctxenable=self.sysdesctxenable)
if self.sysnametxenable:
self.proposed = dict(ifname=self.ifname, sysnametxenable=self.sysnametxenable)
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
if self.linkaggretxenable:
self.proposed = dict(ifname=self.ifname, linkaggretxenable=self.linkaggretxenable)
if self.macphytxenable:
self.proposed = dict(ifname=self.ifname, macphytxenable=self.macphytxenable)
if self.maxframetxenable:
self.proposed = dict(ifname=self.ifname, maxframetxenable=self.maxframetxenable)
if self.eee:
self.proposed = dict(ifname=self.ifname, eee=self.eee)
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.enable_flag == 1:
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
if self.protoidtxenable:
self.proposed = dict(ifname=self.ifname, protoidtxenable=self.protoidtxenable)
if self.type_tlv_enable == 'dcbx':
if self.ifname:
if self.dcbx:
self.proposed = dict(ifname=self.ifname, dcbx=self.dcbx)
if self.function_lldp_interface_flag == 'intervalINTERFACE':
tmp1 = self.get_interface_lldp_disable_pre_config()
if self.enable_flag == 1 and tmp1[self.ifname] != 'disabled':
self.proposed = dict(ifname=self.ifname, txinterval=self.txinterval)
def config_lldp_interface(self):
"""config lldp interface"""
if self.lldpenable:
self.config_global_lldp_enable()
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.config_interface_lldp_disable_config()
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
self.config_interface_tlv_disable_config()
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
self.config_interface_tlv_enable_config()
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
self.config_interface_interval_config()
def get_end_state(self):
"""get end_state information"""
self.get_lldp_enable_pre_config()
if self.lldpenable:
self.end_state['globalLLDPENABLE'] = self.get_lldp_enable_pre_config()
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.end_state['disableINTERFACE'] = self.get_interface_lldp_disable_config()
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
self.end_state['tlvdisableINTERFACE'] = self.get_interface_tlv_disable_config()
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
self.end_state['tlvenableINTERFACE'] = self.get_interface_tlv_enable_config()
if self.function_lldp_interface_flag == 'intervalINTERFACE':
self.end_state['intervalINTERFACE'] = self.get_interface_interval_config()
def get_update_cmd(self):
"""Get updated commands"""
cmds = []
if self.state == "present":
if self.lldpenable == "enabled":
cmds.append("lldp enable")
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.lldpadminstatus == 'disabled':
cmds.append("lldp disable")
else:
cmds.append("undo lldp disable")
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.manaddrtxenable:
if self.manaddrtxenable == "false":
cmds.append("lldp tlv-disable basic-tlv management-address")
if self.manaddrtxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv management-address")
if self.portdesctxenable:
if self.portdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv port-description")
if self.portdesctxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv port-description")
if self.syscaptxenable:
if self.syscaptxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-capability")
if self.syscaptxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-capability")
if self.sysdesctxenable:
if self.sysdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-description")
if self.sysdesctxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-description")
if self.sysnametxenable:
if self.sysnametxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-name")
if self.sysnametxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-name")
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.linkaggretxenable:
if self.linkaggretxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv link-aggregation")
if self.linkaggretxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv link-aggregation")
if self.macphytxenable:
if self.macphytxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv mac-physic")
if self.macphytxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv mac-physic")
if self.maxframetxenable:
if self.maxframetxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv max-frame-size")
if self.maxframetxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv max-frame-size")
if self.eee:
if self.eee == "false":
cmds.append("lldp tlv-disable dot3-tlv eee")
if self.eee == "true":
cmds.append("undo lldp tlv-disable dot3-tlv eee")
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.protoidtxenable:
if self.protoidtxenable == "false":
cmds.append("undo lldp tlv-enable dot1-tlv protocol-identity")
if self.protoidtxenable == "true":
cmds.append("lldp tlv-enable dot1-tlv protocol-identity")
if self.type_tlv_enable == 'dcbx':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.dcbx:
if self.dcbx == "false":
cmds.append("undo lldp tlv-enable dcbx")
if self.dcbx == "true":
cmds.append("lldp tlv-enable dcbx")
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.txinterval:
cmds.append("lldp transmit fast-mode interval %s" % self.txinterval)
elif self.lldpenable == "disabled":
cmds.append("undo lldp enable")
else:
if self.enable_flag == 1:
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.lldpadminstatus == 'disabled':
cmds.append("lldp disable")
else:
cmds.append("undo lldp disable")
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.manaddrtxenable:
if self.manaddrtxenable == "false":
cmds.append("lldp tlv-disable basic-tlv management-address")
if self.manaddrtxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv management-address")
if self.portdesctxenable:
if self.portdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv port-description")
if self.portdesctxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv port-description")
if self.syscaptxenable:
if self.syscaptxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-capability")
if self.syscaptxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-capability")
if self.sysdesctxenable:
if self.sysdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-description")
if self.sysdesctxenable == "true":
cli_str = "%s %s\n" % (cli_str, "undo lldp tlv-disable basic-tlv system-description")
if self.sysnametxenable:
if self.sysnametxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-name")
if self.sysnametxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-name")
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.linkaggretxenable:
if self.linkaggretxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv link-aggregation")
if self.linkaggretxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv link-aggregation")
if self.macphytxenable:
if self.macphytxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv mac-physic")
if self.macphytxenable == "true":
cli_str = "%s %s\n" % (cli_str, "undo lldp tlv-disable dot3-tlv mac-physic")
if self.maxframetxenable:
if self.maxframetxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv max-frame-size")
if self.maxframetxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv max-frame-size")
if self.eee:
if self.eee == "false":
cmds.append("lldp tlv-disable dot3-tlv eee")
if self.eee == "true":
cmds.append("undo lldp tlv-disable dot3-tlv eee")
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.protoidtxenable:
if self.protoidtxenable == "false":
cmds.append("undo lldp tlv-enable dot1-tlv protocol-identity")
if self.protoidtxenable == "true":
cmds.append("lldp tlv-enable dot1-tlv protocol-identity")
if self.type_tlv_enable == 'dcbx':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.dcbx:
if self.dcbx == "false":
cmds.append("undo lldp tlv-enable dcbx")
if self.dcbx == "true":
cmds.append("lldp tlv-enable dcbx")
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.txinterval:
cmds.append("lldp transmit fast-mode interval %s" % self.txinterval)
self.updates_cmd = cmds
def work(self):
"""Execute task"""
self.check_params()
self.get_existing()
self.get_proposed()
self.config_lldp_interface()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""Main function"""
argument_spec = dict(
lldpenable=dict(choices=['enabled', 'disabled']),
function_lldp_interface_flag=dict(choices=['disableINTERFACE', 'tlvdisableINTERFACE', 'tlvenableINTERFACE', 'intervalINTERFACE'], type='str'),
type_tlv_disable=dict(choices=['basic_tlv', 'dot3_tlv'], type='str'),
type_tlv_enable=dict(choices=['dot1_tlv', 'dcbx'], type='str'),
ifname=dict(type='str'),
lldpadminstatus=dict(choices=['txOnly', 'rxOnly', 'txAndRx', 'disabled'], type='str'),
manaddrtxenable=dict(type='bool'),
portdesctxenable=dict(type='bool'),
syscaptxenable=dict(type='bool'),
sysdesctxenable=dict(type='bool'),
sysnametxenable=dict(type='bool'),
portvlantxenable=dict(type='bool'),
protovlantxenable=dict(type='bool'),
txprotocolvlanid=dict(type='int'),
vlannametxenable=dict(type='bool'),
txvlannameid=dict(type='int'),
txinterval=dict(type='int'),
protoidtxenable=dict(type='bool'),
macphytxenable=dict(type='bool'),
linkaggretxenable=dict(type='bool'),
maxframetxenable=dict(type='bool'),
eee=dict(type='bool'),
dcbx=dict(type='bool'),
state=dict(type='str', choices=['absent', 'present'], default='present'),
)
lldp_interface_obj = Lldp_interface(argument_spec)
lldp_interface_obj.work()
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
0571fd0e83d3c8cedd7d3130d18e0298230f8218 | 2c4763aa544344a3a615f9a65d1ded7d0f59ae50 | /playground/test_fail/wscript | 95c282a2af9a13a02b6175cb035208d83d474246 | [] | no_license | afeldman/waf | 572bf95d6b11571bbb2941ba0fe463402b1e39f3 | 4c489b38fe1520ec1bc0fa7e1521f7129c20f8b6 | refs/heads/master | 2021-05-09T18:18:16.598191 | 2019-03-05T06:33:42 | 2019-03-05T06:33:42 | 58,713,085 | 0 | 0 | null | 2016-05-13T07:34:33 | 2016-05-13T07:34:33 | null | UTF-8 | Python | false | false | 1,163 | #! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2011 (ita)
"""
Map a compilation failure to a success status. People playing with C++ templates
might need this.
"""
top = '.'
out = 'build'
def options(opt):
opt.load('compiler_cxx')
def configure(conf):
conf.load('compiler_cxx')
def build(bld):
bld.objects(source='success.cpp', target='ok')
bld.objects(source='fail.cpp', target='fail', features='fail')
##################################################################
# the feature 'fail' is defined below
from waflib.Tools.cxx import cxx
# our task class
class cxxfail(cxx):
def run(self):
ret = super(cxxfail, self).run()
self.outputs[0].write('just a simulation')
return not ret
# @extension would apply this to all through TaskGen.mappings
def one_more_mapping(self, node):
return self.create_compiled_task('cxxfail', node)
from waflib.TaskGen import feature, before
@before('process_source')
@feature('fail')
def remap_failure_to_success(self):
# override
self.mappings = dict(self.mappings)
# then change the extension processing
self.mappings['.cpp'] = one_more_mapping
| [
"anton.feldmann@outlook.de"
] | anton.feldmann@outlook.de | |
792f0e26fb0531faa14be43065ca915945d46398 | f771e83756436594a145bd7b80e5e5d8bca53268 | /test_app/migrations/twitter/0002_auto_20180530_0935.py | 7f522ac7dda3d0cc45602484943d733bdcf6df26 | [
"MIT"
] | permissive | bnzk/djangocms-baseplugins | b76ed75460fbeacb62366935824d2bcfac52b25e | 98e390482aa4facc35efe2412ff1603d85e2c8ba | refs/heads/develop | 2023-06-17T23:55:41.574828 | 2023-06-09T09:22:01 | 2023-06-09T09:22:01 | 68,296,521 | 2 | 0 | MIT | 2023-04-17T09:18:11 | 2016-09-15T13:32:05 | Python | UTF-8 | Python | false | false | 2,319 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-30 09:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitter', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tweetembed',
name='anchor_de',
field=models.SlugField(blank=True, default='', null=True, verbose_name='Anchor'),
),
migrations.AddField(
model_name='tweetembed',
name='anchor_en',
field=models.SlugField(blank=True, default='', null=True, verbose_name='Anchor'),
),
migrations.AddField(
model_name='tweetembed',
name='anchor_fr',
field=models.SlugField(blank=True, default='', null=True, verbose_name='Anchor'),
),
migrations.AddField(
model_name='tweetembed',
name='title_de',
field=models.CharField(blank=True, default='', max_length=256, null=True, verbose_name='Title'),
),
migrations.AddField(
model_name='tweetembed',
name='title_en',
field=models.CharField(blank=True, default='', max_length=256, null=True, verbose_name='Title'),
),
migrations.AddField(
model_name='tweetembed',
name='title_fr',
field=models.CharField(blank=True, default='', max_length=256, null=True, verbose_name='Title'),
),
migrations.AddField(
model_name='tweetembed',
name='tweet_url_de',
field=models.URLField(help_text='Example: https://twitter.com/MdDoomFest/status/795834590481018880', null=True, verbose_name='Tweet URL'),
),
migrations.AddField(
model_name='tweetembed',
name='tweet_url_en',
field=models.URLField(help_text='Example: https://twitter.com/MdDoomFest/status/795834590481018880', null=True, verbose_name='Tweet URL'),
),
migrations.AddField(
model_name='tweetembed',
name='tweet_url_fr',
field=models.URLField(help_text='Example: https://twitter.com/MdDoomFest/status/795834590481018880', null=True, verbose_name='Tweet URL'),
),
]
| [
"bnzk@bnzk.ch"
] | bnzk@bnzk.ch |
f847203df4ede5b0fbb299394fd26184af8cdc8a | 88ed6ed99589f7fb8e49aeb6c15bf0d51fe14a01 | /049_group-anagrams.py | be39e43dcb820a52855a524f2c1fe18e6cb730a2 | [] | no_license | ryeLearnMore/LeetCode | 3e97becb06ca2cf4ec15c43f77447b6ac2a061c6 | 04ec1eb720474a87a2995938743f05e7ad5e66e3 | refs/heads/master | 2020-04-07T19:02:43.171691 | 2019-06-23T15:09:19 | 2019-06-23T15:09:19 | 158,634,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#@author: rye
#@time: 2019/3/13
'''
总结:
思路正解差不多,但是过不了,原因在于如果strs里有重复的字符串就无法处理。
还没想好怎么解决。
'''
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
arr = []
have = []
for i in range(len(strs)):
temp = []
if strs[i] not in have:
temp.append(strs[i])
for j in range(i + 1, len(strs)):
if sorted(strs[i]) == sorted(strs[j]):
if strs[j] not in have:
temp.append(strs[j])
have.append(strs[j])
if temp != []:
arr.append(temp)
return arr
# 大佬的做法
'''
用字典的方式,感觉确实开拓了思路。
'''
class Solution1(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
mapx = {}
for i in strs:
tmp = ''.join(sorted(list(i)))
if tmp in mapx:
mapx[tmp].append(i) # 注意这步,自己写的时候可能想不到还可以这样添加。即:一个key对应多个value,并用list表示
else:
mapx[tmp] = [i]
return mapx.values()
if __name__ == '__main__':
strs1 = ["eat", "tea", "tan", "ate", "nat", "bat"]
strs2 = ["","",""]
print(Solution1().groupAnagrams(strs1)) | [
"noreply@github.com"
] | ryeLearnMore.noreply@github.com |
5aa4056b5a3b3b7562859e1199fa04a338390c39 | 5730110af5e4f0abe538ed7825ddd62c79bc3704 | /pacu/pacu/core/model/analysis_fixture.py | 392c070eabf6f44a81418eea1b412337f5febeca | [] | no_license | jzeitoun/pacu-v2 | bdbb81def96a2d87171ca20b89c878b2f66975e7 | 0ccb254a658263b4fe8c80ea623f860cb7dc1428 | refs/heads/master | 2021-06-03T18:50:50.890399 | 2020-04-27T16:31:59 | 2020-04-27T16:31:59 | 110,889,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | # one = dict(
# type=u'i3d',
# title=u'First transplantation',
# user=u'Sunil',
# desc=u'The brain has the remarkable capacity to rewire its connections and thereby reorganize its function. In the juvenile brain, the plasticity of neuronal connections mediates the fine-tuning of a wide range of behaviors, from visual perception to language acquisition to social recognition.',
# host=u'Scanbox',
# src=u'JZ_003'
# )
# two = dict(
# type=u'i3d',
# title=u'The dummy session',
# user=u'HT',
# desc=u'What mechanism regulates the plasticity of connections in the young brain? How might we manipulate neural circuits to reactivate this plasticity?',
# host=u'Scanbox',
# src=u'JZ_006'
# )
def get(Model=None):
if not Model:
from .analysis import AnalysisV1 as Model
return dict(
# one = Model(**one),
# two = Model(**two),
)
def dump(session):
session.add_all(get().values())
session.commit()
| [
"jzeitoun@uci.edu"
] | jzeitoun@uci.edu |
c32eb96335d89570632c54e7cfe7bbea03eb18aa | b22205aa21ac51c7b14dfaab556eea1f8902a922 | /bin/foamOutputDirs.py | e6882006845de0e8a6ce636f6f9e54ca763c15b1 | [] | no_license | ewquon/pylib | a1c6a64a0127c5078e19f190ec252ccd00b5035e | c34afb2a13fc0075f95a43bac99219b25b3984a2 | refs/heads/master | 2023-07-12T11:32:31.671093 | 2023-06-21T15:59:15 | 2023-06-21T15:59:15 | 41,262,844 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/env python
import sys
import os
dirs = [ dname for dname in os.listdir('.') if os.path.isdir(dname) ]
dirlist = []
numlist = []
for d in dirs:
try:
step = float(d)
numlist.append(step)
dirlist.append(d)
except ValueError: pass
# sort list of floats
indices = [i[0] for i in sorted(enumerate(numlist), key=lambda x:x[1])]
if len(sys.argv) > 1:
sep = sys.argv[1]
else: sep = ' '
#print(' '.join(dirlist))
#print(' '.join([dirlist[i] for i in indices]))
#print(' '.join([dirlist[i] for i in indices]).strip())
print(sep.join([dirlist[i] for i in indices]).strip())
| [
"eliot.quon@nrel.gov"
] | eliot.quon@nrel.gov |
609a9ef66b72016bf583dc87d491d71c0fe4395e | 18a0e8f672359f8f0e0e1b8a356e87627399be87 | /testproject/settings.py | 62082bdfc9780bf034cf882de53e64e6c9ab93ca | [
"BSD-2-Clause"
] | permissive | ptim/formulation | 69ee3cf24981ded2552ef47f1c8ba999820e038d | 2351cc85cd189c7029a35801a8f95e7450b175d3 | refs/heads/master | 2021-01-18T05:47:46.960270 | 2014-07-23T14:04:59 | 2014-07-23T14:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # test project django settings
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'name': ':memory:',
},
}
ROOT_URLCONF = 'testproject.urls'
SECRET_KEY = 's3cr3t'
INSTALLED_APPS = (
'formulation',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'testproject', 'templates'),
)
try:
from django.test.runner import DiscoverRunner
except:
TEST_RUNNER = 'discover_runner.DiscoverRunner'
| [
"curtis@tinbrain.net"
] | curtis@tinbrain.net |
3938f608cebd2d4198512b2979b0290982d04b86 | 6dfb7fe44b6c5bfb7feb5a101656e3d3402a621f | /simp_py_examples/course/SM1801/t012.py | a660e43c7dba6f39eeb873127d7ed0732ab05063 | [
"MIT"
] | permissive | kcfkwok2003/Simp_py | 11d6813fac83ab6309eb8efc22fcd8edde5b19b8 | f75e66da01b45dc8688dda602f8b33d4258f0c31 | refs/heads/master | 2021-05-11T00:36:36.872754 | 2018-12-19T01:41:15 | 2018-12-19T01:41:15 | 118,306,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | # t012.py
from machine import Pin
led=Pin(26,Pin.OUT)
while True:
led.value(0) # 0V
time.sleep(1)
led.value(1) # 3.3V
time.sleep(1)
| [
"kcfkwok@gmail.com"
] | kcfkwok@gmail.com |
627a2e28b59cabee99cdbfa37daee24e496a77f5 | 561a032be5f4f37f40e49ed70740d167e3a12d56 | /django_movie/movies/migrations/0002_auto_20210820_0202.py | cfcce499af7dc2452be06bb99ba4325e264a6a22 | [] | no_license | Mazev/django_movie | 6f3e0cfbd4e46431f03bd900a86cae4dca9f27f3 | af5a194d5fb5a08a944358ba2226a2e1db2e137b | refs/heads/main | 2023-07-12T21:11:07.410313 | 2021-08-21T16:38:58 | 2021-08-21T16:38:58 | 397,938,816 | 0 | 0 | null | 2021-08-21T16:38:58 | 2021-08-19T12:40:24 | JavaScript | UTF-8 | Python | false | false | 1,270 | py | # Generated by Django 3.2.6 on 2021-08-19 23:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='fees_in_world',
field=models.PositiveIntegerField(default=0, help_text='бюджета е в долари', verbose_name='Приходи по цял свят'),
),
migrations.CreateModel(
name='Reviews',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('name', models.CharField(max_length=100, verbose_name='Име')),
('text', models.TextField(max_length=5000, verbose_name='Съобщение')),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.movie', verbose_name='филм')),
],
options={
'verbose_name': 'Коментар',
'verbose_name_plural': 'Коментари',
},
),
]
| [
"77510376+Mazev@users.noreply.github.com"
] | 77510376+Mazev@users.noreply.github.com |
be33e0ee19c20030f9a4e0d0e74e372a872cd5e7 | 784a030b7afb119b5b7024339117a33549db4d74 | /taravel/locations/migrations/0002_auto_20160409_0001.py | 526d2deaffc258d532fb5a657c28a7d49a7b052a | [
"MIT"
] | permissive | ad-m/taravel | bb136d789cf3a22ffe3744fe3cc273edd5c74640 | 4697ee51eec48ed8bb57d7b4a00f352f47e40ba0 | refs/heads/master | 2020-12-29T02:32:04.320280 | 2017-04-09T18:05:28 | 2017-04-09T18:05:28 | 55,809,856 | 0 | 0 | null | 2016-07-21T14:54:38 | 2016-04-08T21:10:20 | JavaScript | UTF-8 | Python | false | false | 473 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-09 00:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='country',
name='name',
field=models.CharField(max_length=255, verbose_name='Name of country'),
),
]
| [
"naczelnik@jawnosc.tk"
] | naczelnik@jawnosc.tk |
2d7fb17440dde49931c5efae141b324e970191e8 | eab1756b01717e81537133400f36aea4d7a0876f | /yuxin_numpy/variable_fetch_bug_report.py | 5e9afe5df43ac413eecfdf31779316e614e6e14c | [] | no_license | bearpelican/cluster | d677fe392ac1196b77e3f8fb79e530ec8371080f | 2e316cf1def0b72b47f79a864ed3aa778c297b95 | refs/heads/master | 2020-03-21T06:52:57.514901 | 2018-08-10T10:20:26 | 2018-08-10T22:33:05 | 138,246,892 | 3 | 1 | null | 2018-06-22T02:51:07 | 2018-06-22T02:51:07 | null | UTF-8 | Python | false | false | 5,162 | py | # Run D2H and H2D benchmark with synthetic workload with feed-fetch step
import tensorflow as tf
import argparse
import numpy as np
import time
import ray
import os
import portpicker
import subprocess
import sys
import tensorflow as tf
import threading
import time
import pickle
from collections import OrderedDict
parser = argparse.ArgumentParser()
parser.add_argument("--dim", default=25*1000*1000, type=int,
help="The number of parameters.")
parser.add_argument("--align", default='none', type=str,
help="none/cpu/gpu/ray")
parser.add_argument("--target", default='cpu', type=str,
help="where target tensor lives (cpu or gpu)")
args = parser.parse_args()
global_timeit_dict = OrderedDict()
class timeit:
"""Decorator to measure length of time spent in the block in millis and log
it to TensorBoard."""
def __init__(self, tag=""):
self.tag = tag
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
interval_ms = 1000*(self.end - self.start)
global_timeit_dict.setdefault(self.tag, []).append(interval_ms)
# print("%20s %10.2f"%(self.tag, interval_ms))
def summarize_time(tag, time_list_ms):
# delete first large interval if exists
# if time_list_ms and time_list_ms[0]>3600*10:
del time_list_ms[0]
if len(time_list_ms)>0:
min = np.min(time_list_ms)
mean = np.mean(time_list_ms)
median = np.median(time_list_ms)
data_size_gb = args.dim*4/1e9
time_sec = min/1000
bw = data_size_gb/time_sec
formatted = ["%.2f"%(d,) for d in time_list_ms[:10]]
print("%-20s: %.1f GB/sec, min: %.2f, median: %.2f, mean: %.2f"%(tag, bw, min, median, mean))
else:
print("Times: <empty>")
timeline_counter = 0
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE, output_partition_graphs=True)
def sessrun(*args, **kwargs):
"""Runs fetches, dumps timeline files in current directory."""
global timeline_counter
run_metadata = tf.RunMetadata()
log_fn = "%s"%(timeline_counter,)
sess = tf.get_default_session()
root = os.getcwd()+"/data"
os.system('mkdir -p '+root)
from tensorflow.python.client import timeline
kwargs['options'] = run_options
kwargs['run_metadata'] = run_metadata
results = sess.run(*args, **kwargs)
tl = timeline.Timeline(step_stats=run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=True,
show_dataflow=False)
open(root+"/timeline_%s.json"%(log_fn,), "w").write(ctf)
open(root+"/stepstats_%s.pbtxt"%(log_fn,), "w").write(str(
run_metadata.step_stats))
timeline_counter+=1
return results
def fetch_cpu_variable():
data = np.ones((args.dim,), dtype=np.float32)
with tf.device('/cpu:0'):
params = tf.Variable(initial_value=data)
sess.run(tf.global_variables_initializer())
for i in range(20):
with timeit('fetch_cpu_variable'):
sess.run(params)
def fetch_cpu_variable_add():
data = np.ones((args.dim,), dtype=np.float32)
with tf.device('/cpu:0'):
params = tf.Variable(initial_value=data)
params = params+0.1
params_first = params[0]
params_sum = tf.reduce_sum(params)
sess.run(tf.global_variables_initializer())
for i in range(20):
with timeit('fetch_cpu_variable_add'):
# sess.run(params)
result = sess.run(params)
def fetch_cpu_variable_concat():
data = np.ones((args.dim,), dtype=np.float32)
with tf.device('/cpu:0'):
params = tf.Variable(initial_value=data)
params = tf.concat([params, tf.fill([1],1.0)], axis=0)
sess.run(tf.global_variables_initializer())
for i in range(20):
with timeit('fetch_cpu_variable_concat'):
sess.run(params)
def main():
global grad_cached_const
import gc
gc.disable()
params0 = np.ones((args.dim,), dtype=np.float32)/(np.sqrt(args.dim))
if args.align == 'none':
pass
elif args.align == 'cpu':
params0 = align_numpy_cpu(params0)
elif args.align == 'gpu':
params0 = align_numpy_gpu(params0)
loss, params, grad_cached, grad_assign_op = create_net('net1', params0)
sess.run(tf.global_variables_initializer())
lr = 0.01
for i in range(10):
loss0 = loss.eval()
print(loss0)
with timeit('step'):
pass
# sess.run(grad_assign_op)
with timeit('fetch'):
# grad0 = sess.run(grad_cached)
grad0 = sess.run(grad_cached_const)
# takes 75ms, 33ms is on allocation, 16ms on multiplication
with timeit('add'):
params0-=grad0*lr
with timeit('feed'):
# params.load(params0)
sess.run(params.initializer, feed_dict={params.initial_value:params0})
for key, times in global_timeit_dict.items():
summarize_time(key, times)
assert abs(loss0-0.69513524)<0.01
print('test passed')
if __name__ == '__main__':
import gc
gc.disable()
sess = tf.InteractiveSession()
fetch_cpu_variable()
fetch_cpu_variable_add()
fetch_cpu_variable_concat()
for key, times in global_timeit_dict.items():
summarize_time(key, times)
| [
"yaroslavvb@gmail.com"
] | yaroslavvb@gmail.com |
66ea90b48965b0961d8c7d2105f84ad9a958f42f | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/stmt_for_list-48.py | 50e53e7e7d2f5a49593f26d6b5dfdfb9e4cb9c7e | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | x:int = 0
z:[int] = None
z = [1, 2, 3]
for x in z:
print($Parameters)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
28a011661f24c1229e4e973964de433c47835416 | fd0328f6a5f78cfa80d61094517fa0f32943bb9e | /superlists/urls.py | 902f87670539d653d465801868e9194a9774418b | [] | no_license | liangsongyou/superlists | 3eee6ae492e89a13a54aec55f4b94c78c1fa049a | fd1704a14d18fe9fa7dc1074a172d9b0708ba1f3 | refs/heads/master | 2020-03-16T21:50:41.382819 | 2018-05-31T07:44:14 | 2018-05-31T07:44:14 | 111,548,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | """superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
from lists import views as list_views
from lists import urls as list_urls
from accounts import urls as account_urls
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^$', list_views.home_page, name='home'),
url(r'^lists/', include(list_urls)),
url(r'^accounts/', include(account_urls)),
]
| [
"yuebei58@gmail.com"
] | yuebei58@gmail.com |
b92f292c218ba6dc7a54c573b10dc237a4ac6bff | 7a0070b15636653f404c2b2b85d300e949db1fb2 | /muglaSepetiApp/migrationsex/0033_auto_20200921_0209.py | 8d9ec33af59cb6b3643d7afcc29b60c2de0cea8c | [] | no_license | furkankykc/MuglaSepeti | 8d0f29faf8a868b159ca0d158cdb2e312784c626 | 58a650e68fd283baeaa0ae6716c8ea316b996c16 | refs/heads/master | 2023-01-08T22:15:00.878505 | 2020-11-09T21:18:13 | 2020-11-09T21:18:13 | 287,108,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # Generated by Django 3.0.8 on 2020-09-20 23:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('muglaSepetiApp', '0032_auto_20200921_0208'),
]
operations = [
migrations.AlterField(
model_name='config',
name='aboutus_title',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"furkanfbr@gmail.com"
] | furkanfbr@gmail.com |
b26759a3ad30279cdb6e5e3d1504992e161eee56 | 9cbab916088192af67a19aaee25fe7d6e5d27a31 | /file/create.py | 255728651113829e6fdd6bb10971222ec7e3638c | [] | no_license | ddayzzz/Pythonlearning | 806c75304d7d954f2c935031d4d7516be7ce7300 | 54e92aa5282da97b6d4bd2355a668a16c272ee68 | refs/heads/master | 2020-12-30T12:44:49.465356 | 2017-05-25T15:12:53 | 2017-05-25T15:12:53 | 91,356,527 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 361 | py | #create py
#coding=gb2312
import os,shelve,pprint
selffile='createfile'
selfptr=shelve.open(selffile)
dic=[{'name':'Ãû×Ö'},{'USSR':'former soviet'},{'China':{'1':'china','2':'Chinese'}}]
selfptr['dic']=dic
crepy='create1.py'
fptr=open(crepy,'a')
fptr.write('#coding=gb2312\nimport pprint,shelve\nprint(pprint.pformat(shelve.open(\'createfile\')[\'dic\']))') | [
"wangshu214@live.cn"
] | wangshu214@live.cn |
9124499c826b46e8fc759077b08027aae9b2d2d4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02709/s723829299.py | 01a4d49022c1c656def13e678e9cfeee8f882cc1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | n=int(input())
a=list(map(int,input().split()))
a=list(zip(range(n),a))#[index,a]の配列
a.sort(key=lambda x:x[1])#aの昇順で並べる
dp=[[0 for i in range(n+1)]for j in range(n+1)] #dp[x][y]:=左にx、右にy人並べたときの最大値
ans=0
for k in range(n):# k人目まで終了、k+1人目に対して
i,ai=a.pop()#aの大きいものから取り出す
dp[k+1][0]=dp[k][0]+ai*(i-k)
dp[0][k+1]=dp[0][k]+ai*(n-k-1-i)
for l in range(k):#右にl+1人並べたとき
dp[k-l][l+1]=max(dp[k-l-1][l+1]+ai*(i-k+l+1),dp[k-l][l]+ai*(n-l-1-i))
for k in range(n+1):
ans=max(ans,dp[k][n-k])
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4c8ab6bc0205d4424c71cea52b683546ac62f73b | c4313edda0f14795490080af1ba400b826611be8 | /lib/Crypto.lin.x64/Crypto/SelfTest/Hash/test_SHA3_224.py | 69128d9f74da2ba9b5b697ea56e7343ea0daab7a | [
"MIT"
] | permissive | tosher/Mediawiker | 821a4eab9f812e820bab3a8f4d3f3d542d3aeafa | 89c25d4fa6c6224edbaf5f06794a03594bcccad0 | refs/heads/master | 2023-07-06T02:45:05.924541 | 2023-07-01T18:32:09 | 2023-07-01T18:32:09 | 6,582,157 | 104 | 22 | NOASSERTION | 2023-09-14T18:00:30 | 2012-11-07T16:18:25 | Python | UTF-8 | Python | false | false | 2,874 | py | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/test_SHA3_224.py: Self-test for the SHA-3/224 hash function
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.SHA3_224"""
import unittest
from binascii import hexlify
from Crypto.SelfTest.loader import load_tests
from Crypto.SelfTest.st_common import list_test_cases
from io import StringIO
from Crypto.Hash import SHA3_224 as SHA3
from Crypto.Util.py3compat import b
class APITest(unittest.TestCase):
def test_update_after_digest(self):
msg=b("rrrrttt")
# Normally, update() cannot be done after digest()
h = SHA3.new(data=msg[:4])
dig1 = h.digest()
self.assertRaises(TypeError, h.update, msg[4:])
dig2 = SHA3.new(data=msg).digest()
# With the proper flag, it is allowed
h = SHA3.new(data=msg[:4], update_after_digest=True)
self.assertEquals(h.digest(), dig1)
# ... and the subsequent digest applies to the entire message
# up to that point
h.update(msg[4:])
self.assertEquals(h.digest(), dig2)
def get_tests(config={}):
from .common import make_hash_tests
tests = []
test_vectors = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "SHA3"),
"ShortMsgKAT_SHA3-224.txt",
"KAT SHA-3 224",
{ "len" : lambda x: int(x) } )
test_data = []
for tv in test_vectors:
if tv.len == 0:
tv.msg = b("")
test_data.append((hexlify(tv.md), tv.msg, tv.desc))
tests += make_hash_tests(SHA3, "SHA3_224", test_data,
digest_size=SHA3.digest_size,
oid="2.16.840.1.101.3.4.2.7")
tests += list_test_cases(APITest)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| [
"to.tosher@gmail.com"
] | to.tosher@gmail.com |
19c10d17f3d2fb83cd470c70b8a4a7eaa4e2d4c5 | 5b5145ce47a6e14f342f21ba3752ab8823d8043a | /panoptes_aggregation/tests/reducer_tests/test_shape_reducer_fan.py | 1e05d18634e45d2e3d228b12da528f8c96a1801f | [
"Apache-2.0"
] | permissive | isabella232/aggregation-for-caesar | 335f40c801e2cd18e807b6f10d8228e9c659df97 | 9ce7616b60ab32b13791868ace1637801ea937e9 | refs/heads/master | 2023-03-20T08:21:44.889957 | 2020-12-14T11:54:55 | 2020-12-14T14:17:28 | 322,817,491 | 0 | 0 | Apache-2.0 | 2021-02-23T23:28:33 | 2020-12-19T10:06:10 | null | UTF-8 | Python | false | false | 6,281 | py | from panoptes_aggregation.reducers.shape_reducer_dbscan import process_data as process_data_dbscan, shape_reducer_dbscan
from panoptes_aggregation.reducers.shape_reducer_hdbscan import process_data as process_data_hdbscan, shape_reducer_hdbscan
from .base_test_class import ReducerTest
import copy
extracted_data = [
{
'frame0': {
'T0_tool0_x': [0.0, 100.0],
'T0_tool0_y': [0.0, 100.0],
'T0_tool0_radius': [50.0, 10.0],
'T0_tool0_spread': [60.0, 20.0],
'T0_tool0_rotation': [1.0, 359.0]
},
'frame1': {
'T0_tool1_x': [50.0],
'T0_tool1_y': [50.0],
'T0_tool1_radius': [50.0],
'T0_tool1_spread': [50.0],
'T0_tool1_rotation': [50.0]
}
},
{
'frame0': {
'T0_tool0_x': [0.0, 100.0],
'T0_tool0_y': [0.0, 100.0],
'T0_tool0_radius': [50.0, 10.0],
'T0_tool0_spread': [60.0, 20.0],
'T0_tool0_rotation': [359.0, 1.0],
'T0_tool1_x': [0.0, 100.0],
'T0_tool1_y': [100.0, 0.0],
'T0_tool1_radius': [10.0, 50.0],
'T0_tool1_spread': [50.0, 10.0],
'T0_tool1_rotation': [1.0, 359.0]
}
},
{
'frame1': {
'T0_tool1_x': [50.0],
'T0_tool1_y': [50.0],
'T0_tool1_radius': [50.0],
'T0_tool1_spread': [50.0],
'T0_tool1_rotation': [50.0]
}
},
{
'frame0': {
'T0_tool1_x': [0.0, 100.0],
'T0_tool1_y': [100.0, 0.0],
'T0_tool1_radius': [10.0, 50.0],
'T0_tool1_spread': [50.0, 10.0],
'T0_tool1_rotation': [359.0, 1.0]
},
'frame1': {
'T0_tool0_x': [20.0],
'T0_tool0_y': [20.0],
'T0_tool0_radius': [20.0],
'T0_tool0_spread': [20.0],
'T0_tool0_rotation': [20.0]
}
},
{}
]
kwargs_extra_data = {
'user_id': [
1,
2,
3,
4,
5
]
}
processed_data = {
'shape': 'fan',
'symmetric': False,
'frame0': {
'T0_tool0': [
(0.0, 0.0, 50.0, 60.0, 1.0),
(100.0, 100.0, 10.0, 20.0, 359.0),
(0.0, 0.0, 50.0, 60.0, 359.0),
(100.0, 100.0, 10.0, 20.0, 1.0)
],
'T0_tool1': [
(0.0, 100.0, 10.0, 50.0, 1.0),
(100.0, 0.0, 50.0, 10.0, 359.0),
(0.0, 100.0, 10.0, 50.0, 359.0),
(100.0, 0.0, 50.0, 10.0, 1.0)
]
},
'frame1': {
'T0_tool0': [
(20.0, 20.0, 20.0, 20.0, 20.0)
],
'T0_tool1': [
(50.0, 50.0, 50.0, 50.0, 50.0),
(50.0, 50.0, 50.0, 50.0, 50.0)
]
}
}
reduced_data = {
'frame0': {
'T0_tool0_fan_x': [0.0, 100.0, 0.0, 100.0],
'T0_tool0_fan_y': [0.0, 100.0, 0.0, 100.0],
'T0_tool0_fan_radius': [50.0, 10.0, 50.0, 10.0],
'T0_tool0_fan_spread': [60.0, 20.0, 60.0, 20.0],
'T0_tool0_fan_rotation': [1.0, 359.0, 359.0, 1.0],
'T0_tool0_cluster_labels': [0, 1, 0, 1],
'T0_tool0_clusters_count': [2, 2],
'T0_tool0_clusters_x': [0.0, 100.0],
'T0_tool0_clusters_y': [0.0, 100.0],
'T0_tool0_clusters_radius': [50.0, 10.0],
'T0_tool0_clusters_spread': [60.0, 20.0],
'T0_tool0_clusters_rotation': [0.0, 0.0],
'T0_tool1_fan_x': [0.0, 100.0, 0.0, 100.0],
'T0_tool1_fan_y': [100.0, 0.0, 100.0, 0.0],
'T0_tool1_fan_radius': [10.0, 50.0, 10.0, 50.0],
'T0_tool1_fan_spread': [50.0, 10.0, 50.0, 10.0],
'T0_tool1_fan_rotation': [1.0, 359.0, 359.0, 1.0],
'T0_tool1_cluster_labels': [0, 1, 0, 1],
'T0_tool1_clusters_count': [2, 2],
'T0_tool1_clusters_x': [0.0, 100.0],
'T0_tool1_clusters_y': [100.0, 0.0],
'T0_tool1_clusters_radius': [10.0, 50.0],
'T0_tool1_clusters_spread': [50.0, 10.0],
'T0_tool1_clusters_rotation': [0.0, 0.0]
},
'frame1': {
'T0_tool0_fan_x': [20.0],
'T0_tool0_fan_y': [20.0],
'T0_tool0_fan_radius': [20.0],
'T0_tool0_fan_spread': [20.0],
'T0_tool0_fan_rotation': [20.0],
'T0_tool0_cluster_labels': [-1],
'T0_tool1_fan_x': [50.0, 50.0],
'T0_tool1_fan_y': [50.0, 50.0],
'T0_tool1_fan_radius': [50.0, 50.0],
'T0_tool1_fan_spread': [50.0, 50.0],
'T0_tool1_fan_rotation': [50.0, 50.0],
'T0_tool1_cluster_labels': [0, 0],
'T0_tool1_clusters_count': [2],
'T0_tool1_clusters_x': [50.0],
'T0_tool1_clusters_y': [50.0],
'T0_tool1_clusters_radius': [50.0],
'T0_tool1_clusters_spread': [50.0],
'T0_tool1_clusters_rotation': [50.0]
}
}
TestShapeReducerFan = ReducerTest(
shape_reducer_dbscan,
process_data_dbscan,
extracted_data,
processed_data,
reduced_data,
'Test shape fan reducer with DBSCAN',
network_kwargs=kwargs_extra_data,
pkwargs={'shape': 'fan'},
kwargs={
'eps': 5,
'min_samples': 2
},
test_name='TestShapeReducerFan'
)
reduced_data_hdbscan = copy.deepcopy(reduced_data)
reduced_data_hdbscan['frame0']['T0_tool0_cluster_probabilities'] = [1.0, 1.0, 1.0, 1.0]
reduced_data_hdbscan['frame0']['T0_tool0_clusters_persistance'] = [0.9868693567140278, 0.9868693567140278]
reduced_data_hdbscan['frame0']['T0_tool1_cluster_probabilities'] = [1.0, 1.0, 1.0, 1.0]
reduced_data_hdbscan['frame0']['T0_tool1_clusters_persistance'] = [0.9868693567140278, 0.9868693567140278]
reduced_data_hdbscan['frame1']['T0_tool0_cluster_probabilities'] = [0.0]
reduced_data_hdbscan['frame1']['T0_tool1_cluster_probabilities'] = [1.0, 1.0]
reduced_data_hdbscan['frame1']['T0_tool1_clusters_persistance'] = [1.0]
TestShapeReducerFanHdbscan = ReducerTest(
shape_reducer_hdbscan,
process_data_hdbscan,
extracted_data,
processed_data,
reduced_data_hdbscan,
'Test shape fan reducer with HDBSCAN',
network_kwargs=kwargs_extra_data,
pkwargs={'shape': 'fan'},
kwargs={
'min_cluster_size': 2,
'min_samples': 1,
'allow_single_cluster': True
},
test_name='TestShapeReducerFanHdbscan'
)
| [
"coleman.krawczyk@gmail.com"
] | coleman.krawczyk@gmail.com |
d3c4d7c5e6c299881eefffeaddcf06d19bd78463 | 4ccaf5252f5936414638f254ca5932ad922cd582 | /ex034 - Aumentos multiplos.py | 66bc546db7f4f138303f01e794b214d8ad0051a7 | [] | no_license | carlosaugus1o/Python-Exercicios | b0a525436d7cf24e3fc9ccfd046278ad383eb01c | 6216430dac9d5fc6fe3b75ae9625063d4971e419 | refs/heads/main | 2023-07-03T22:59:31.913494 | 2021-07-28T03:05:50 | 2021-07-28T03:05:50 | 390,193,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | salário = float(input('Informe o salário do funcionário: R$ '))
if salário < 1250:
aumento = salário * 1.15
else:
aumento = salário * 1.1
print('O novo salário do funcionário será de {:.2f}'.format(aumento))
| [
"noreply@github.com"
] | carlosaugus1o.noreply@github.com |
6f9bb16e2b0de1dc1aaabf69938acdcbe66f7819 | b7d0f003cfb0ec6fa25f99d9f7b544dc38ae6aa8 | /concept/greedy/fractionalKnapsack.py | a04a8c99034d44bba0bd5bf14a0d3aa537b69b91 | [] | no_license | Kimyechan/dataStructureAndArgorithm | 43c2cfa0d12a5c729f687d786ef6dde23bf193a7 | c9f8f614621aee9e236ffef20e5e563b37bab0b3 | refs/heads/master | 2021-07-09T03:31:31.405725 | 2021-03-09T13:18:55 | 2021-03-09T13:18:55 | 231,402,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | data_list = [(10, 10), (15, 12), (20, 10), (25, 8), (30, 5)]
def getMaxValue(dataList, capacity):
dataList = sorted(dataList, key=lambda x: x[1] / x[0], reverse=True)
detail = list()
value = 0
for data in dataList:
if capacity - data[0] >= 0:
value += data[1]
capacity -= data[0]
detail.append(data)
else:
value += data[1] * (capacity / data[0])
detail.append(data)
break
return value, detail
print(getMaxValue(data_list, 30)) | [
"vlvkcjswo7@naver.com"
] | vlvkcjswo7@naver.com |
eba37ec8db396d0c781197f2c79e8b1305138bf5 | 537e30c108e7a575cec1b7a9332439e1abac811d | /venv/bin/list_instances | c49900bc82f96c01a5302e64f08b84b988637fd1 | [] | no_license | kangqiwang/extractOntology | d5401bb27603e94264d84ec41fc9e994ece92278 | 89b3d7bc1cf08ea8856cb9221448e988d4d2f992 | refs/heads/master | 2020-08-13T01:44:19.399402 | 2019-11-18T18:52:28 | 2019-11-18T18:52:28 | 214,881,034 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | #!/home/kang/Project/myGit/extractOntology/venv/bin/python
import sys
from operator import attrgetter
from optparse import OptionParser
import boto
from boto.ec2 import regions
HEADERS = {
'ID': {'get': attrgetter('id'), 'length':15},
'Zone': {'get': attrgetter('placement'), 'length':15},
'Groups': {'get': attrgetter('groups'), 'length':30},
'Hostname': {'get': attrgetter('public_dns_name'), 'length':50},
'PrivateHostname': {'get': attrgetter('private_dns_name'), 'length':50},
'State': {'get': attrgetter('state'), 'length':15},
'Image': {'get': attrgetter('image_id'), 'length':15},
'Type': {'get': attrgetter('instance_type'), 'length':15},
'IP': {'get': attrgetter('ip_address'), 'length':16},
'PrivateIP': {'get': attrgetter('private_ip_address'), 'length':16},
'Key': {'get': attrgetter('key_name'), 'length':25},
'T:': {'length': 30},
}
def get_column(name, instance=None):
if name.startswith('T:'):
_, tag = name.split(':', 1)
return instance.tags.get(tag, '')
return HEADERS[name]['get'](instance)
def main():
parser = OptionParser()
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Groups,Hostname,State,T:Name")
parser.add_option("-t", "--tab", help="Tab delimited, skip header - useful in shell scripts", action="store_true", default=False)
parser.add_option("-f", "--filter", help="Filter option sent to DescribeInstances API call, format is key1=value1,key2=value2,...", default=None)
(options, args) = parser.parse_args()
# Connect the region
for r in regions():
if r.name == options.region:
region = r
break
else:
print("Region %s not found." % options.region)
sys.exit(1)
ec2 = boto.connect_ec2(region=region)
# Read headers
if options.headers:
headers = tuple(options.headers.split(','))
else:
headers = ("ID", 'Zone', "Groups", "Hostname")
# Create format string
format_string = ""
for h in headers:
if h.startswith('T:'):
format_string += "%%-%ds" % HEADERS['T:']['length']
else:
format_string += "%%-%ds" % HEADERS[h]['length']
# Parse filters (if any)
if options.filter:
filters = dict([entry.split('=') for entry in options.filter.split(',')])
else:
filters = {}
# List and print
if not options.tab:
print(format_string % headers)
print("-" * len(format_string % headers))
for r in ec2.get_all_reservations(filters=filters):
groups = [g.name for g in r.groups]
for i in r.instances:
i.groups = ','.join(groups)
if options.tab:
print("\t".join(tuple(get_column(h, i) for h in headers)))
else:
print(format_string % tuple(get_column(h, i) for h in headers))
if __name__ == "__main__":
main()
| [
"kangqiwang@outlook.com"
] | kangqiwang@outlook.com | |
0b001c9312d96ede3a0cf7bf4d0ba79d5db06435 | 13fbdd8f19c98c0b0295ae0c8d5dccd8c57761f5 | /chapter_4_modeling/train_w2vclassify.py | ebf853a144074da1c4f30939c1c04e22e05362f1 | [
"Apache-2.0"
] | permissive | gridl/voicebook | 13a6d2ba7d9f0dd1786bb6fdabfefb14cc5b4924 | 1a889b443e81e56c48f5a5b4146370ea3d9f7bb1 | refs/heads/master | 2020-03-30T00:47:10.145422 | 2018-09-24T16:13:51 | 2018-09-24T16:13:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,158 | py | '''
train w2vclassify.py
Author: @Jim Schwoebel
Title: Train Word2Vec
This script takes in two lists of sentences, which are then broken down to their
respective words. Word2vec embedding models are then created for each of these
lists in numpy arrays.
Now, we have two word2vec models we can use for machine learning tasks. This is useful
for vocabulary-sensitive classificaion tasks.
For example,
--> we could apply model A on A and model B on A for a feature representation of A (200 features)
--> and a model A on B and model B on B for a feature representation of B (200 features)
#This would make it useful to train both typical classification models (SVM) as well as
#higher-order RNN/CNN models for deep learning.
#This embedding also suits itself to add other similar dimensional feature vectors for images and/or audio as well
#to increase model accuracy into the future.
(C) 2018, NeuroLex Laboratories
'''
import speech_recognition as sr
from gensim.models import Word2Vec
import numpy as np
import random, os, json, getpass
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn import svm
from sklearn import metrics
from textblob import TextBlob
from operator import itemgetter
import getpass
import numpy as np
import pickle
import datetime
import time
#INITIALIZE FUNCTIONS
#####################################################################
#function to make a word2vec model from a series of sentences
def w2v(textlist,size,modelname):
sentences=list()
#split into individual word embeddings
for i in range(len(textlist)):
if len(textlist[i].split())==0:
pass
else:
sentences.append(textlist[i].split())
#test (for small samples)
#print(sentences)
model = Word2Vec(sentences, size=size, window=5, min_count=1, workers=4)
if modelname in os.listdir():
#do not save if already file in folder with same name
pass
else:
print('saving %s to disk...'%(modelname))
model.save(modelname)
return model
#function to get output embedding given a test set and a word2vec model
def w2v_embedding(test_set,size,modelname):
model=Word2Vec.load(modelname)
sentences2=list()
for i in range(len(test_set)):
sentences2.append(test_set[i].split())
w2v_embed=list()
for i in range(len(sentences2)):
for j in range(len(sentences2[i])):
try:
w2v_embed.append(model[sentences2[i][j]])
except:
#pass if there is an error to not distort averages
pass
#other option: w2v_embed.append(np.zeros(size))
return w2v_embed
def sentence_embedding(sentence,size,modelname):
model=Word2Vec.load(modelname)
sentences2=sentence.split()
w2v_embed=list()
for i in range(len(sentences2)):
try:
#print(sentences2[i])
w2v_embed.append(model[sentences2[i]])
#print(model[sentences2[i]])
except:
#pass if there is an error to not distort averages... :)
pass
out_embed=np.zeros(size)
for j in range(len(w2v_embed)):
out_embed=out_embed+w2v_embed[j]
out_embed=(1/size)*out_embed
##
## try:
## print(sentence)
## print(out_embed)
## except:
## print('contains unknown character')
return out_embed
def transcribe(wavfile):
r = sr.Recognizer()
# use wavfile as the audio source (must be .wav file)
with sr.AudioFile(wavfile) as source:
#extract audio data from the file
audio = r.record(source)
transcript=r.recognize_sphinx(audio)
print(transcript)
return transcript
def optimizemodel_sc(train_set2,labels_train_set2,test_set2,labels_test_set2,modelname,classes,testing_set,min_num,selectedfeature,training_data):
filename=modelname
start=time.time()
jmsgs=train_set2+test_set2
omsgs=labels_train_set2+labels_test_set2
c1=0
c5=0
try:
#decision tree
classifier2 = DecisionTreeClassifier(random_state=0)
classifier2.fit(train_set2,labels_train_set2)
scores = cross_val_score(classifier2, test_set2, labels_test_set2,cv=5)
print('Decision tree accuracy (+/-) %s'%(str(scores.std())))
c2=scores.mean()
c2s=scores.std()
print(c2)
except:
c2=0
c2s=0
try:
classifier3 = GaussianNB()
classifier3.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier3, test_set2, labels_test_set2,cv=5)
print('Gaussian NB accuracy (+/-) %s'%(str(scores.std())))
c3=scores.mean()
c3s=scores.std()
print(c3)
except:
c3=0
c3s=0
try:
#svc
classifier4 = SVC()
classifier4.fit(train_set2,labels_train_set2)
scores=cross_val_score(classifier4, test_set2, labels_test_set2,cv=5)
print('SKlearn classifier accuracy (+/-) %s'%(str(scores.std())))
c4=scores.mean()
c4s=scores.std()
print(c4)
except:
c4=0
c4s=0
try:
#adaboost
classifier6 = AdaBoostClassifier(n_estimators=100)
classifier6.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier6, test_set2, labels_test_set2,cv=5)
print('Adaboost classifier accuracy (+/-) %s'%(str(scores.std())))
c6=scores.mean()
c6s=scores.std()
print(c6)
except:
c6=0
c6s=0
try:
#gradient boosting
classifier7=GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
classifier7.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier7, test_set2, labels_test_set2,cv=5)
print('Gradient boosting accuracy (+/-) %s'%(str(scores.std())))
c7=scores.mean()
c7s=scores.std()
print(c7)
except:
c7=0
c7s=0
try:
#logistic regression
classifier8=LogisticRegression(random_state=1)
classifier8.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier8, test_set2, labels_test_set2,cv=5)
print('Logistic regression accuracy (+/-) %s'%(str(scores.std())))
c8=scores.mean()
c8s=scores.std()
print(c8)
except:
c8=0
c8s=0
try:
#voting
classifier9=VotingClassifier(estimators=[('gradboost', classifier7), ('logit', classifier8), ('adaboost', classifier6)], voting='hard')
classifier9.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier9, test_set2, labels_test_set2,cv=5)
print('Hard voting accuracy (+/-) %s'%(str(scores.std())))
c9=scores.mean()
c9s=scores.std()
print(c9)
except:
c9=0
c9s=0
try:
#knn
classifier10=KNeighborsClassifier(n_neighbors=7)
classifier10.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier10, test_set2, labels_test_set2,cv=5)
print('K Nearest Neighbors accuracy (+/-) %s'%(str(scores.std())))
c10=scores.mean()
c10s=scores.std()
print(c10)
except:
c10=0
c10s=0
try:
#randomforest
classifier11=RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)
classifier11.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier11, test_set2, labels_test_set2,cv=5)
print('Random forest accuracy (+/-) %s'%(str(scores.std())))
c11=scores.mean()
c11s=scores.std()
print(c11)
except:
c11=0
c11s=0
try:
## #svm
classifier12 = svm.SVC(kernel='linear', C = 1.0)
classifier12.fit(train_set2, labels_train_set2)
scores = cross_val_score(classifier12, test_set2, labels_test_set2,cv=5)
print('svm accuracy (+/-) %s'%(str(scores.std())))
c12=scores.mean()
c12s=scores.std()
print(c12)
except:
c12=0
c12s=0
#IF IMBALANCED, USE http://scikit-learn.org/dev/modules/generated/sklearn.naive_bayes.ComplementNB.html
maxacc=max([c2,c3,c4,c6,c7,c8,c9,c10,c11,c12])
if maxacc==c1:
print('most accurate classifier is Naive Bayes'+'with %s'%(selectedfeature))
classifiername='naive-bayes'
classifier=classifier1
#show most important features
classifier1.show_most_informative_features(5)
elif maxacc==c2:
print('most accurate classifier is Decision Tree'+'with %s'%(selectedfeature))
classifiername='decision-tree'
classifier2 = DecisionTreeClassifier(random_state=0)
classifier2.fit(train_set2+test_set2,labels_train_set2+labels_test_set2)
classifier=classifier2
elif maxacc==c3:
print('most accurate classifier is Gaussian NB'+'with %s'%(selectedfeature))
classifiername='gaussian-nb'
classifier3 = GaussianNB()
classifier3.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier3
elif maxacc==c4:
print('most accurate classifier is SK Learn'+'with %s'%(selectedfeature))
classifiername='sk'
classifier4 = SVC()
classifier4.fit(train_set2+test_set2,labels_train_set2+labels_test_set2)
classifier=classifier4
elif maxacc==c5:
print('most accurate classifier is Maximum Entropy Classifier'+'with %s'%(selectedfeature))
classifiername='max-entropy'
classifier=classifier5
#can stop here (c6-c10)
elif maxacc==c6:
print('most accuracate classifier is Adaboost classifier'+'with %s'%(selectedfeature))
classifiername='adaboost'
classifier6 = AdaBoostClassifier(n_estimators=100)
classifier6.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier6
elif maxacc==c7:
print('most accurate classifier is Gradient Boosting '+'with %s'%(selectedfeature))
classifiername='graidentboost'
classifier7=GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
classifier7.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier7
elif maxacc==c8:
print('most accurate classifier is Logistic Regression '+'with %s'%(selectedfeature))
classifiername='logistic_regression'
classifier8=LogisticRegression(random_state=1)
classifier8.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier8
elif maxacc==c9:
print('most accurate classifier is Hard Voting '+'with %s'%(selectedfeature))
classifiername='hardvoting'
classifier7=GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
classifier7.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier8=LogisticRegression(random_state=1)
classifier8.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier6 = AdaBoostClassifier(n_estimators=100)
classifier6.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier9=VotingClassifier(estimators=[('gradboost', classifier7), ('logit', classifier8), ('adaboost', classifier6)], voting='hard')
classifier9.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier9
elif maxacc==c10:
print('most accurate classifier is K nearest neighbors '+'with %s'%(selectedfeature))
classifiername='knn'
classifier10=KNeighborsClassifier(n_neighbors=7)
classifier10.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier10
elif maxacc==c11:
print('most accurate classifier is Random forest '+'with %s'%(selectedfeature))
classifiername='randomforest'
classifier11=RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)
classifier11.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier11
elif maxacc==c12:
print('most accurate classifier is SVM '+' with %s'%(selectedfeature))
classifiername='svm'
classifier12 = svm.SVC(kernel='linear', C = 1.0)
classifier12.fit(train_set2+test_set2, labels_train_set2+labels_test_set2)
classifier=classifier12
modeltypes=['decision-tree','gaussian-nb','sk','adaboost','gradient boosting','logistic regression','hard voting','knn','random forest','svm']
accuracym=[c2,c3,c4,c6,c7,c8,c9,c10,c11,c12]
accuracys=[c2s,c3s,c4s,c6s,c7s,c8s,c9s,c10s,c11s,c12s]
model_accuracy=list()
for i in range(len(modeltypes)):
model_accuracy.append([modeltypes[i],accuracym[i],accuracys[i]])
model_accuracy.sort(key=itemgetter(1))
endlen=len(model_accuracy)
print('saving classifier to disk')
f=open(modelname+'.pickle','wb')
pickle.dump(classifier,f)
f.close()
end=time.time()
execution=end-start
print('summarizing session...')
accstring=''
for i in range(len(model_accuracy)):
accstring=accstring+'%s: %s (+/- %s)\n'%(str(model_accuracy[i][0]),str(model_accuracy[i][1]),str(model_accuracy[i][2]))
training=len(train_set2)
testing=len(test_set2)
summary='SUMMARY OF MODEL SELECTION \n\n'+'WINNING MODEL: \n\n'+'%s: %s (+/- %s) \n\n'%(str(model_accuracy[len(model_accuracy)-1][0]),str(model_accuracy[len(model_accuracy)-1][1]),str(model_accuracy[len(model_accuracy)-1][2]))+'MODEL FILE NAME: \n\n %s.pickle'%(filename)+'\n\n'+'DATE CREATED: \n\n %s'%(datetime.datetime.now())+'\n\n'+'EXECUTION TIME: \n\n %s\n\n'%(str(execution))+'GROUPS: \n\n'+str(classes)+'\n'+'('+str(min_num)+' in each class, '+str(int(testing_set*100))+'% used for testing)'+'\n\n'+'TRAINING SUMMARY:'+'\n\n'+training_data+'FEATURES: \n\n %s'%(selectedfeature)+'\n\n'+'MODELS, ACCURACIES, AND STANDARD DEVIATIONS: \n\n'+accstring+'\n\n'+'(C) 2018, NeuroLex Laboratories'
data={
'model':modelname,
'modeltype':model_accuracy[len(model_accuracy)-1][0],
'accuracy':model_accuracy[len(model_accuracy)-1][1],
'deviation':model_accuracy[len(model_accuracy)-1][2]
}
return [classifier, model_accuracy[endlen-1], summary, data]
#LOAD AND BALANCE DATASETS
#####################################################################
#size of each embedding vector
#(100 is used as default to reduce dimensionality)
size=100
classnum=input('how many classes are you training?')
folderlist=list()
a=0
while a != int(classnum):
folderlist.append(input('what is the folder name for class %s?'%(str(a+1))))
a=a+1
name=''
for i in range(len(folderlist)):
if i==0:
name=name+folderlist[i]
else:
name=name+'_'+folderlist[i]
start=time.time()
#modelname=input('what is the name of your classifier?')
modelname=name+'_sc_w2v'
jsonfilename=name+'_w2v.json'
dir3=os.getcwd()+'/train-diseases/spreadsheets/'
model_dir=os.getcwd()+'/models/'
try:
os.chdir(model_dir)
except:
os.mkdir(model_dir)
cur_dir=dir3
testing_set=0.33
try:
os.chdir(dir3)
except:
os.mkdir(dir3)
os.chdir(dir3)
if jsonfilename not in os.listdir():
features_list=list()
transcripts_list=list()
filenames_list=list()
for i in range(len(folderlist)):
name=folderlist[i]
dir_=cur_dir+name
g='error'
while g == 'error':
try:
g='noterror'
print('changing directory: '.upper()+dir_)
os.chdir(dir_)
except:
g='error'
print('directory not recognized')
dir_=input('input directory %s path'%(str(i+1)))
#now go through each directory and featurize the samples and save them as .json files
try:
os.chdir(dir_)
except:
os.mkdir(dir_)
os.chdir(dir_)
# remove any prior featurization
dirlist=os.listdir()
for j in range(len(dirlist)):
if dirlist[j][-5:]=='.json':
os.remove(dirlist[j])
dirlist=os.listdir()
transcripts=list()
filenames=list()
for j in range(len(dirlist)):
try:
filename=dirlist[j][0:-4]+'.json'
if dirlist[j][-4:]=='.wav' and dirlist[j][0:-4]+'.json' not in dirlist:
#transcribe and save as .json data
print(name.upper()+' - transcribing %s'%(dirlist[j]))
transcript=transcribe(dirlist[j])
print(transcript)
transcripts.append(transcript)
data={
'filename':filename,
'transcript':transcript,
}
jsonfile=open(dirlist[j][0:-4]+'.json','w')
json.dump(data,jsonfile)
jsonfile.close()
filenames.append(filename)
else:
#load file if already in directory
if dirlist[j][0:-4]+'.json' in dirlist:
transcripts.append(json.load(open(dirlist[j][0:-4]+'.json'))['transcript'])
filenames.append(filename)
except:
pass
# now train word2vec model
os.chdir(model_dir)
print('training w2v models')
w2v(transcripts,size,modelname+'_%s.pickle'%(str(i+1)))
filenames_list.append(filenames)
transcripts_list.append(transcripts)
# for testing only
# print(transcripts_list[0])
# print(transcripts_list[1])
# randomly shuffle lists
feature_lengths=list()
feature_list2=list()
for i in range(len(transcripts_list)):
one=transcripts_list[i]
random.shuffle(one)
feature_list2.append(one)
feature_lengths.append(len(one))
min_num=np.amin(feature_lengths)
#make sure they are the same length (For later) - this avoid errors
while min_num*len(folderlist) != np.sum(feature_lengths):
for i in range(len(folderlist)):
while len(feature_list2[i])>min_num:
print('%s is %s more than %s, balancing...'%(folderlist[i].upper(),str(len(feature_list2[i])-int(min_num)),'min value'))
feature_list2[i].pop()
feature_lengths=list()
for i in range(len(feature_list2)):
one=feature_list2[i]
feature_lengths.append(len(one))
#rename intermediate varibale back into list
transcript_list=feature_list2
# FEATURIZE
# now that we have an equal number of transcripts in each feature array,
# we can now build an actual feature set from each representative transcript
labels=list()
for i in range(len(folderlist)):
name=folderlist[i]
dir_=cur_dir+name
transcripts=transcript_list[i]
random.shuffle(transcripts)
for j in range(len(transcripts)):
try:
#get numpy array features
sentence=transcripts[j]
embedding=np.array([])
os.chdir(model_dir)
for k in range(len(folderlist)-1):
if k==0:
output=sentence_embedding(sentence,size,modelname+'_%s.pickle'%(str(k+1)))
output2=sentence_embedding(sentence,size,modelname+'_%s.pickle'%(str(k+2)))
embedding_temp=np.append(output,output2)
embedding=np.append(embedding,embedding_temp)
else:
embedding_temp=sentence_embedding(sentence,size,modelname+'_%s.pickle'%(str(k+2)))
embedding=np.append(embedding,embedding_temp)
print(embedding)
os.chdir(dir_)
features=embedding.tolist()
features_list.append(features)
labels.append(name)
except:
pass
os.chdir(cur_dir)
data={
'features':features_list,
'labels':labels,
}
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
else:
g=json.load(open(jsonfilename))
features_list=g['features']
labels=g['labels']
# TRAIN AND TEST SET GENERATION
#################################################################
# note that this assumes a classification problem based on total number of classes
os.chdir(cur_dir)
#load data - can do this through loading .txt or .json files
#json file must have 'message' field
data=json.loads(open(jsonfilename).read())
classes=list()
for i in range(len(labels)):
if labels[i] not in classes:
classes.append(labels[i])
train_set, test_set, train_labels, test_labels = train_test_split(features_list,
labels,
test_size=testing_set,
random_state=42)
try:
os.chdir(model_dir)
except:
os.mkdir(model_dir)
os.chdir(model_dir)
g=open(modelname+'_training_data.txt','w')
g.write('train labels'+'\n\n'+str(train_labels)+'\n\n')
g.write('test labels'+'\n\n'+str(test_labels)+'\n\n')
g.close()
training_data=open(modelname+'_training_data.txt').read()
# MODEL OPTIMIZATION / SAVE TO DISK
#################################################################
selectedfeature='w2v features from all vocabulary of each class.'
min_num=int(len(features_list)/len(classes))
[w2v_model, w2v_acc, w2v_summary, data]=optimizemodel_sc(train_set,train_labels,test_set,test_labels,modelname,classes,testing_set,min_num,selectedfeature,training_data)
g=open(modelname+'.txt','w')
g.write(w2v_summary)
g.close()
g2=open(modelname+'.json','w')
json.dump(data,g2)
g2.close()
print(w2v_model)
print(w2v_acc)
| [
"noreply@github.com"
] | gridl.noreply@github.com |
e8d97fc8a462b5ec994919115a9da2beb1399e1e | f8967772b9c67b3917285e5c14ff0c26440c75ad | /p_code/for.py | f4319f51509e38e57cfff177e4c91bdcb0e10193 | [] | no_license | mdAshrafuddin/python | 9302b5285f5f83ee4e03c001b2fa1234a54affbf | ae84e5c677a649c9916af38e60d91491b498a36f | refs/heads/main | 2023-02-17T13:56:19.855635 | 2021-01-13T16:45:17 | 2021-01-13T16:45:17 | 329,363,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | # # for target_list in expression_list: suite
# # [else:suite]
# # n = [1, 3, 5, 6]
# for i in range(10):
# print(i)
# if i == 5:
# break
# # # string, tuple or list
# # persion = ["AShraf", 'tanjil', 'tamim', 'asj']
# # for p in persion:
# # print(p, len(p))
# phone_book = {
# "Mom": "123-44-55-665",
# "Dad": "233-33-55"
# }
# print(phone_book['Mom'])
# # users = {
# # "Ashraf": "inactive",
# # "Tanjil": "active",
# # "tamim" : "active"
# # }
# # for user, status in users.copy().items():
# # if status == 'inactive':
# # del users[user]
# # print("After deleting users")
# # for user, status in users.items():
# # print(user, status)
# # users = {
# # "John": "inactive",
# # "Helen": "active",
# # "James": "active", # and so on...
# # }
# # for user, status in users.copy().items():
# # if status == 'inactive':
# # del users[user]
# # print('After deleting users')
# # for user, status in users.copy().items():
# # print(user, status)
# users = {
# "John": "inactive",
# "Helen": "active",
# "James": "active", # and so on...
# }
# # active_users = {}
# # for user, status in users.items():
# # if status == 'active':
# # active_users[user] = status
# # for user, status in active_users.items():
# # print(user, status)
# for user, status in users.copy().items():
# if status == 'inactive':
# del users[user]
# for user, status in users.copy().items():
# print(user, status)
# print(sum(range(1, 10, 5)))
list = ['Ashraf', 'Tanjil', 'Tamim Chowdhury']
for i in list:
print(i, len(i))
a = ['Mary', 'had', 'a', 'little', 'lamb']
for i, v in enumerate(a):
print(i, v)
dic = {'name':'Ashrav', 'age':20}
for k, v in dic.items():
print(k,v)
| [
"mdtanjil01753511918@gmail.com"
] | mdtanjil01753511918@gmail.com |
5f43704fc9571b6699a66ef7258a37a6851a5d9f | d4adf8e72bfc767bb6ad32e81a2d24a0309d90b7 | /Clients/Python/DBSAPI/UserExamples/dbsUpdateLumiSection.py | e7cdc9a58acf08ed3e61d61816c2246ec9745685 | [] | no_license | dmwm/DBSAPI | 5c55dd10161a1bb1fb6f646ca92dd3c4f263a256 | 3117ac30672a1932cef4606fbf7693ce4952b79d | refs/heads/master | 2021-01-21T19:28:22.329112 | 2013-10-30T12:07:54 | 2013-10-30T12:07:54 | 13,984,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #!/usr/bin/env python
#
# Revision: 1.3 $"
# Id: DBSXMLParser.java,v 1.3 2006/10/26 18:26:04 afaq Exp $"
#
#
import sys
from DBSAPI.dbsApi import DbsApi
from DBSAPI.dbsException import *
from DBSAPI.dbsApiException import *
from DBSAPI.dbsLumiSection import DbsLumiSection
from DBSAPI.dbsOptions import DbsOptionParser
optManager = DbsOptionParser()
(opts,args) = optManager.getOpt()
api = DbsApi(opts.__dict__)
lumi = DbsLumiSection (
LumiSectionNumber=99,
StartEventNumber=333,
EndEventNumber=777,
LumiStartTime=8888,
LumiEndTime=999999,
RunNumber=1,
)
print "updating the lumi section"
try:
api.updateLumiSection (lumi)
print "Result: %s" % lumi
except DbsApiException, ex:
print "Caught API Exception %s: %s " % (ex.getClassName(), ex.getErrorMessage() )
if ex.getErrorCode() not in (None, ""):
print "DBS Exception Error Code: ", ex.getErrorCode()
print "Done"
| [
"giulio.eulisse@cern.ch"
] | giulio.eulisse@cern.ch |
0f71e7d1375a7e6b6ec28cf3c3c93b812af90331 | 33af6185b48bd76f97f0a74390a3a812ee216c78 | /angr-doc/examples/mma_howtouse/solve.py | 590cbe00ab75741931603ffef1f242a6c027b2ee | [
"BSD-2-Clause"
] | permissive | Ruide/angr-dev | dab0cabd907fce47ac698f890c3f3a8b80ab7e2a | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | refs/heads/master | 2022-11-10T11:27:13.355024 | 2017-10-07T14:29:09 | 2017-10-07T14:29:09 | 104,417,044 | 0 | 1 | BSD-2-Clause | 2022-10-16T04:48:10 | 2017-09-22T01:35:12 | C | UTF-8 | Python | false | false | 1,329 | py | #!/usr/bin/env python
#
# This binary, from the MMA CTF, was a simple reversing challenge. THe biggest
# challenge was actually *running* this library in Windows. Luckily, with angr,
# we can avoid having to do this!
#
# The approach here is to use angr as a concrete execution engine to call the
# `howtouse` function 45 times, as the array of function pointers in that
# function has 45 entries. The result turned out to be the flag.
#
import angr
import claripy
def main():
# Load the binary. Base addresses are weird when loading binaries directly, so
# we specify it explicitly.
p = angr.Project('howtouse.dll', load_options={'main_opts': {'custom_base_addr': 0x10000000}})
# A "Callable" is angr's FFI-equivalent. It allows you to call binary functions
# from Python. Here, we use it to call the `howtouse` function.
howtouse = p.factory.callable(0x10001130)
# In this binary, the result is a concrete char, so we don't need a symbolic
# state or a solver to get its value.
getch = lambda i: chr(claripy.backends.concrete.convert(howtouse(i)).value)
# Let's call this 45 times, and that's the result!
return ''.join(getch(i) for i in xrange(45))
def test():
assert main() == 'MMA{fc7d90ca001fc8712497d88d9ee7efa9e9b32ed8}'
if __name__ == '__main__':
print main()
| [
"rd.cheung.bupt.sms@gmail.com"
] | rd.cheung.bupt.sms@gmail.com |
fdf6a8987be76bdec8c219e115f22fc45208bdc9 | c41bbe6374d896c6bb36fe6c94a83f474388214e | /train.py | 079bc9699d85b1dd9a766040f43a84bc48c4fd4a | [] | no_license | DableUTeeF/sift_rep | dce8f0d15d80a7d41b7f97480eb13ef744806888 | 922109478bd33e7e3de74e352a99023ef479bcbf | refs/heads/master | 2020-04-24T16:28:24.124287 | 2019-02-22T17:38:40 | 2019-02-22T17:38:40 | 172,108,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | import natthaphon
import models
import datagen
from torch import nn, optim
import json
import os
def lrstep(epoch):
if epoch < 150:
a = 0.05
elif 150 < epoch < 225:
a = 0.005
else:
a = 0.0005
print(f'Epoch: {epoch+1} - returning learning rate {a}')
return a
if __name__ == '__main__':
model = natthaphon.Model(models.ResCift((3, 3, 3)))
rprop = optim.SGD(model.model.parameters(), lr=0.01, momentum=0.9)
model.compile(optimizer=rprop,
loss=nn.MSELoss(),
device='cuda'
)
if os.path.isdir('/home/palm/Pictures'):
train_datagen = datagen.SiftGenerator('/home/palm/Pictures/phuket')
val_datagen = datagen.SiftGenerator('/home/palm/Pictures/phuket')
else:
train_datagen = datagen.SiftGenerator('/root/palm/DATA/mscoco/images/train2017')
val_datagen = datagen.SiftGenerator('/root/palm/DATA/mscoco/images/val2017')
trainloader = natthaphon.Loader(train_datagen, shuffle=True, num_workers=4)
testloader = natthaphon.Loader(val_datagen, shuffle=False, num_workers=4)
schedule = natthaphon.LambdaLR(rprop, lrstep)
history = model.fit_generator(trainloader, 300, validation_data=testloader, schedule=schedule)
with open('logs/ResCift333-1.json', 'w') as wr:
json.dump(history, wr)
| [
"palm22180@gmail.com"
] | palm22180@gmail.com |
129fa6a2abc715f1b035273b3ccc5bae8d183bf9 | fe3bc38d2a9f80a6b258e2c61dbe4557323a1d71 | /corehq/apps/smsbillables/management/commands/add_moz_zero_charge.py | 3d49a8b4d97eb67199c33df40d022eb1d8355668 | [] | no_license | ekush/commcare-hq | 077eb3f525ffb7d1acca0848b9c7678baf776832 | 97a1f55f24f79224724b2ecdc7d5cea87d42f65b | refs/heads/master | 2021-01-17T22:25:09.734898 | 2015-08-25T23:07:49 | 2015-08-25T23:07:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | from decimal import Decimal
import logging
from couchdbkit import ResourceNotFound
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.backend.http_api import HttpBackend
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
logger = logging.getLogger('accounting')
def add_moz_zero_charge(orm):
mzn, _ = (orm['accounting.Currency'] if orm else Currency).objects.get_or_create(code='MZN')
sms_gateway_fee_class = orm['smsbillables.SmsGatewayFee'] if orm else SmsGatewayFee
sms_gateway_fee_criteria_class = orm['smsbillables.SmsGatewayFeeCriteria'] if orm else SmsGatewayFeeCriteria
SmsGatewayFee.create_new(
'SISLOG',
INCOMING,
Decimal('0'),
country_code=None,
prefix='',
currency=mzn,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
backend_id = '7ddf3301c093b793c6020ebf755adb6f'
try:
backend = HttpBackend.get(backend_id)
SmsGatewayFee.create_new(
backend.get_api_id(),
OUTGOING,
Decimal('0'),
backend_instance=backend._id,
country_code=None,
prefix='',
currency=mzn,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
logger.info("Updated Moz gateway default fees.")
except ResourceNotFound:
logger.error("[SMS-BILLING] Could not find HttpBackend %s - did not create outgoing Moz gateway default fees."
" Please look into if this is on production, otherwise ignore." % backend_id)
class Command(LabelCommand):
help = "bootstrap MOZ global SMS backend gateway default fees"
args = ""
label = ""
def handle(self, *args, **options):
add_moz_zero_charge(None)
| [
"npellegrino@dimagi.com"
] | npellegrino@dimagi.com |
36012a75fd7fc8f9abd7cf667e21753bedc9c361 | c8362b6beb84577a89b90fa729beec35c094cf96 | /generate_json.py | 9e7aece6f258377d56e8c121de372110e0547509 | [] | no_license | akx/twitter-swagger-api-defs | 651f4117341fb5476e586a93940b5f663ec9e4f5 | 0fbb55527a990df35ebe659d7adcfbcab4ab123a | refs/heads/master | 2016-09-05T22:19:51.440050 | 2013-06-04T16:31:23 | 2013-06-04T16:31:23 | 10,478,163 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | import os
import re
import urlparse
from lxml.html import HTMLParser
from lxml import etree
url_param_re = re.compile(r":([a-z_]+)", re.I)
def inner_text(el):
if not el:
return None
if isinstance(el, list):
el = el[0]
return etree.tostring(el, method="text", encoding="UTF-8").decode("UTF-8").strip().replace("\n", " ")
def parse_tree(tree):
title = inner_text(tree.cssselect("#title"))
if not ("GET" in title or "POST" in title):
return None
is_post = title.startswith("POST")
endpoint = inner_text(tree.cssselect(".field-doc-resource-url div")).replace("format", "{format}")
description = inner_text(tree.cssselect(".doc-updated+div>p"))
url_params = set()
def fix_url_param(m):
var = m.group(1)
url_params.add(var)
return "{%s}" % var
endpoint = url_param_re.sub(fix_url_param, endpoint)
parameters = []
for param in tree.cssselect("div.parameter"):
p_name_raw = inner_text(param.cssselect(".param"))
try:
p_name, required = p_name_raw.rsplit(" ", 1)
except ValueError:
p_name = p_name_raw
required = "required"
p_desc = inner_text(param.cssselect("p"))
parameters.append({
"name": p_name,
"description": p_desc,
"required": (required == "required"),
"dataType": "string", # Can't assume anything else,
"paramType": ("path" if p_name in url_params else ("form" if is_post else "query")),
})
return {
"path": urlparse.urlparse(endpoint).path,
"description": "",
"operations": [{
"httpMethod": "POST" if is_post else "GET",
"nickname": title.lower().replace("/", "_").replace(" ", "_"),
"responseClass": "complex",
"parameters": parameters,
"summary": description,
}]
}
def parse_file(fn):
parser = HTMLParser()
tree = etree.parse(fn, parser=parser).getroot()
return parse_tree(tree)
def parse_from_string(s):
parser = HTMLParser()
tree = etree.fromstring(s, parser=parser)
return parse_tree(tree)
def parse_from_zip():
import zipfile
apis = []
zf = zipfile.ZipFile("apidocs.zip")
for fileinfo in zf.infolist():
if fileinfo.file_size > 0:
apis.append(parse_from_string(zf.read(fileinfo)))
return apis
def main():
from json import dumps
apis = dict((api["path"], api) for api in parse_from_zip() if api).values()
print "%d unique API definitions parsed." % len(apis)
spec = {
"apiVersion": "1.1",
"swaggerVersion": "1.1",
"basePath": "https://api.twitter.com",
"description": u"Twitter",
"apis": sorted(apis, key=lambda api:api["path"]),
}
file("twitter_api.json", "wb").write(dumps(spec, indent=4))
if __name__ == "__main__":
main()
| [
"akx@iki.fi"
] | akx@iki.fi |
5393132b63d05209be58fd2eecf5fdb158d8cfab | 8cf9c32fcad16c4109809447a530b435d290aa25 | /desktop/libs/notebook/src/notebook/connectors/altus.py | c5b7c4646ffa65ca93d8617617fa8b39dfbde67d | [
"Apache-2.0"
] | permissive | veritascl/hue | 38618e923d43bc167be6dd15c9d4b084758655d3 | ceb267982049638d306aff975bf0c9572db1560d | refs/heads/master | 2020-03-26T13:57:57.673750 | 2018-08-15T16:27:06 | 2018-08-15T16:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,082 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from datetime import datetime, timedelta
from django.urls import reverse
from django.utils.translation import ugettext as _
from metadata.conf import ALTUS
from navoptapi.api_lib import ApiLib
from desktop.lib.exceptions_renderable import PopupException
LOG = logging.getLogger(__name__)
DATE_FORMAT = "%Y-%m-%d"
def _exec(service, command, parameters=None):
if parameters is None:
parameters = {}
if service == 'analyticdb':
hostname = ALTUS.HOSTNAME_ANALYTICDB.get()
elif service == 'dataeng':
hostname = ALTUS.HOSTNAME_DATAENG.get()
elif service == 'wa':
hostname = ALTUS.HOSTNAME_WA.get()
else:
hostname = ALTUS.HOSTNAME.get()
try:
api = ApiLib(service, hostname, ALTUS.AUTH_KEY_ID.get(), ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n'))
LOG.debug('%s : %s' % (command, parameters))
resp = api.call_api(command, parameters)
LOG.info(resp)
json_resp = resp.json()
LOG.debug(json_resp )
return json_resp
except Exception, e:
raise PopupException(e, title=_('Error accessing'))
class IAMApi(): pass
# altus iam list-user-assigned-roles --user=crn:altus:ia
class SdxApi():
def __init__(self, user): pass
def list_namespaces(self):
"""
e.g. returns
[{
u'status': u'CREATED',
u'namespaceName': u'cca-5150-ns',
u'creationDate': u'2018-06-03T23:24:46.125000+00:00',
u'crn': u'crn:altus:sdx:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:namespace:cca-5150-ns/f54461af-b241-4f1d-a521-ab3624e841c4'},
...
]
"""
return _exec('sdx', 'listNamespaces')['namespaces']
class DataEngApi():
def __init__(self, user): pass
def list_jobs(self, submitter_crns=None, page_size=None, starting_token=None, job_statuses=None, job_ids=None, job_types=None, creation_date_before=None,
creation_date_after=None, cluster_crn=None, order=None):
args = {}
if creation_date_after is None:
creation_date_after = (datetime.today() - timedelta(days=7)).strftime(DATE_FORMAT)
if submitter_crns:
args['submitterCrns'] = submitter_crns
if page_size is not None:
args['pageSize'] = str(page_size)
if starting_token:
args['startingToken'] = starting_token
if job_statuses:
args['jobStatuses'] = job_statuses
if job_ids:
args['jobIds'] = job_ids
if job_types:
args['jobTypes'] = job_types
if creation_date_before:
args['creationDateBefore'] = creation_date_before
if creation_date_after:
args['creationDateAfter'] = creation_date_after
if cluster_crn:
args['clusterCrn'] = cluster_crn
if order:
args['order'] = order
return _exec('dataeng', 'listJobs', args)
def describe_job(self, job_id):
return _exec('dataeng', 'describeJob', {'jobId', job_id})
def submit_hive_job(self, cluster_name, script, params=None, job_xml=None):
job = {'script': script}
if params:
job['params'] = params
if job_xml:
job['jobXml'] = job_xml
return self.submit_jobs(cluster_name, [{'hiveJob': job}])
def submit_spark_job(self, cluster_name, jars=None, main_class=None, arguments=None, spark_arguments=None, properties_file=None):
job = {
"jars": jars if jars else [],
"applicationArguments": arguments if arguments else [],
#"sparkArguments": "string",
#"propertiesFile": "string"
}
if main_class:
job["mainClass"] = main_class
return self.submit_jobs(cluster_name, [{'sparkJob': job, 'name': None, 'failureAction': 'NONE'}]),
def submit_yarn_job(self):
return _exec('dataeng', 'submitJobs')
def submit_jobs(self, cluster_name, jobs):
return _exec('dataeng', 'submitJobs', {'clusterName': cluster_name, 'jobs': jobs})
def terminate_job(self, job_id):
return _exec('dataeng', 'terminateJob', {'jobId': job_id})
def list_clusters(self, names=None, page_size=None, starting_token=None):
args = {}
if names:
args['clusterNames'] = names
if page_size is not None:
args['pageSize'] = str(page_size)
if starting_token:
args['startingToken'] = starting_token
return _exec('dataeng', 'listClusters', args)
def create_cluster(self):
return _exec('dataeng', 'createCluster')
def delete_cluster(self):
return _exec('dataeng', 'deleteCluster')
def describe_clusters(self):
return _exec('dataeng', 'describeCluster')
class AnalyticDbApi():
def __init__(self, user): pass
def create_cluster(self, cloud_provider, cluster_name, cdh_version, public_key, instance_type, environment_name, workers_group_size=3, namespace_name=None):
# [--cloudera-manager-username <value>]
# [--cloudera-manager-password <value>]
params = { # cloud_provider: AWS, Azure...
'clusterName': cluster_name,
'cdhVersion': cdh_version,
'publicKey': public_key,
'instanceType': instance_type,
'environmentName': environment_name,
'workersGroupSize': workers_group_size
}
if namespace_name:
params['namespaceName'] = namespace_name
return _exec('analyticdb', 'createAWSCluster', params)
def list_clusters(self):
"""
e.g. returns
[{
"status": "CREATED",
"namespaceCrn": "crn:altus:sdx:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:namespace:spot-ns/7bdb225f-a7a1-408e-8503-1b3a422cc039",
"workersGroupSize": 4,
"clusterName": "spot",
"environmentType": "AWS",
"secured": false,
"environmentCrn": "crn:altus:environments:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:environment:Spot-AWS-dev2/5a6d0ced-c8af-4fa3-9b24-8c5a3ea11cf8",
"securityConfiguration": {
"enabled": false
},
"creationDate": "2018-06-01T13:14:43.530000+00:00",
"crn": "crn:altus:analyticdb:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:cluster:spot/70595482-6a46-4a9d-b395-56fcabe079e4",
"instanceType": "r4.4xlarge",
"cdhVersion": "CDH514"
},
...
]
"""
return _exec('analyticdb', 'listClusters')
def submit_hue_query(self, cluster_crn, payload):
return _exec('analyticdb', 'submitHueQuery', {'clusterCrn': cluster_crn, 'payload': payload})
| [
"romain@cloudera.com"
] | romain@cloudera.com |
6b48c0a42b044f51c67f1fc12bca48691e1a126d | f594c17d04a882d80d0cc2bbcb54163fbeca0aa8 | /geotagging/fixes/gis/admin/options.py | f98e796036a2cbe5a280338ce47b51b5a5287cea | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | AlphaRomeo13/django-geotagging | 9f3752c16260d4eab3592975ac9930ebe87f9244 | c846bdb82556a64e16ba08730c85921c66ca01eb | refs/heads/master | 2021-01-12T09:22:11.384120 | 2016-12-11T18:23:59 | 2016-12-11T18:23:59 | 76,152,012 | 1 | 0 | null | 2016-12-11T03:30:44 | 2016-12-11T03:30:44 | null | UTF-8 | Python | false | false | 6,161 | py | from django.contrib.admin.options import BaseModelAdmin, InlineModelAdmin, \
StackedInline, TabularInline
from geotagging.fixes.gis.admin.widgets import OpenLayersWidgetFixed as OpenLayersWidget
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.db import models
from django.contrib.contenttypes.generic import GenericInlineModelAdmin, \
GenericStackedInline, GenericTabularInline
class GeoBaseModelAdmin(BaseModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.8/OpenLayers.js'
wms_url = 'http://labs.metacarta.com/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
debug = False
widget = OpenLayersWidget
# inject Open Street map if GDAL works
from django.contrib.gis import gdal
if gdal.HAS_GDAL:
map_template = 'gis/admin/osm.html'
extra_js = ['http://openstreetmap.org/openlayers/OpenStreetMap.js']
num_zoom = 20
map_srid = 900913
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = 156543.0339
units = 'm'
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing GeometryFields.
"""
if isinstance(db_field, models.GeometryField):
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoBaseModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field._geom in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field._geom == 'GEOMETRYCOLLECTION': collection_type = 'Any'
else: collection_type = OGRGeomType(db_field._geom.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field._geom
params = {'default_lon' : self.default_lon,
'default_lat' : self.default_lat,
'default_zoom' : self.default_zoom,
'display_wkt' : self.debug or self.display_wkt,
'geom_type' : OGRGeomType(db_field._geom),
'field_name' : db_field.name,
'is_collection' : is_collection,
'scrollable' : self.scrollable,
'layerswitcher' : self.layerswitcher,
'collection_type' : collection_type,
'is_linestring' : db_field._geom in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon' : db_field._geom in ('POLYGON', 'MULTIPOLYGON'),
'is_point' : db_field._geom in ('POINT', 'MULTIPOINT'),
'num_zoom' : self.num_zoom,
'max_zoom' : self.max_zoom,
'min_zoom' : self.min_zoom,
'units' : self.units, #likely shoud get from object
'max_resolution' : self.max_resolution,
'max_extent' : self.max_extent,
'modifiable' : self.modifiable,
'mouse_position' : self.mouse_position,
'scale_text' : self.scale_text,
'map_width' : self.map_width,
'map_height' : self.map_height,
'srid' : self.map_srid,
'display_srid' : self.display_srid,
'wms_url' : self.wms_url,
'wms_layer' : self.wms_layer,
'wms_name' : self.wms_name,
'debug' : self.debug,
}
return OLMap
# Using the Beta OSM in the admin requires the following:
# (1) The Google Maps Mercator projection needs to be added
# to your `spatial_ref_sys` table. You'll need at least GDAL 1.5:
# >>> from django.contrib.gis.gdal import SpatialReference
# >>> from django.contrib.gis.utils import add_postgis_srs
# >>> add_postgis_srs(SpatialReference(900913)) # Adding the Google Projection
#inlines
class GeoInlineModelAdmin(InlineModelAdmin, GeoBaseModelAdmin):
def _media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoInlineModelAdmin, self)._media()
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
media = property(_media)
class GeoStackedInline(StackedInline, GeoInlineModelAdmin):
pass
class GeoTabularInline(TabularInline, GeoInlineModelAdmin):
map_width = 300
map_height = 200
#generic inlines
class GeoGenericInlineModelAdmin(GenericInlineModelAdmin, GeoInlineModelAdmin):
pass
class GeoGenericStackedInline(GenericStackedInline, GeoGenericInlineModelAdmin):
pass
class GeoGenericTablularInline(GenericTabularInline, GeoGenericInlineModelAdmin):
map_width = 300
map_height = 200
| [
"pete@lincolnloop.com"
] | pete@lincolnloop.com |
047ee7629aacfb019abe87491bacab106a583b03 | efb7180c05964aee07756dbd4f9982f81559d7e3 | /TradeBot/tradebotapp/admin.py | d151aa6b4efce4984dc6473207db707c1514746f | [] | no_license | ShunnoSaiful/Trade-Bot | 920ba75225d921f54530fc9f0d10a8eb9eabdaaf | d07489dea5fcf1d1d51a918a3127f620682107f2 | refs/heads/master | 2022-11-24T08:22:00.946773 | 2019-10-29T05:20:08 | 2019-10-29T05:20:08 | 218,207,062 | 0 | 0 | null | 2022-11-22T04:18:04 | 2019-10-29T04:54:41 | JavaScript | UTF-8 | Python | false | false | 372 | py | from django.contrib import admin
# Register your models here.
from .models import Question, Answer, Plan, Description, Download, Section, FeatureCategory
admin.site.register(Question)
admin.site.register(Answer)
admin.site.register(Plan)
admin.site.register(Description)
admin.site.register(Download)
admin.site.register(FeatureCategory)
admin.site.register(Section)
| [
"sunnosaiful@gmail.com"
] | sunnosaiful@gmail.com |
f8fbeb582f0bc1475af2524ec4a330871d14c9f0 | 98efe1aee73bd9fbec640132e6fb2e54ff444904 | /loldib/getratings/models/NA/na_maokai/__init__.py | d2c95d6f119c057f7496b2ef8fedde90603c8eda | [
"Apache-2.0"
] | permissive | koliupy/loldib | be4a1702c26546d6ae1b4a14943a416f73171718 | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | refs/heads/master | 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from .na_maokai_top import *
from .na_maokai_jng import *
from .na_maokai_mid import *
from .na_maokai_bot import *
from .na_maokai_sup import *
| [
"noreply@github.com"
] | koliupy.noreply@github.com |
c2c263958c80fabd4c1e349cb2fe676374cc2a1d | 36fc492ad0d36e9d4a4452007d57c733a84ccaac | /python_programming_drive/mixed/list/List funtion.py | 4b832abee986c92c6691bdb575dd49dc49ed18bb | [] | no_license | mahmudgithub/Core-python | 864568994490f857ba89e2c66fbf10a65a4aea98 | c37cb4218fe1e216a4e3e80544cae262582cf4b5 | refs/heads/master | 2023-04-29T02:14:47.106443 | 2021-05-14T12:36:26 | 2021-05-14T12:36:26 | 346,885,590 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | # creat a simple list method
list=[2,3,4,5,6,7,8,9]
print(list)
mh=[2,3,4,5,6,7,8,9]
print(mh)
#list into list or sub list
list=[2,[3,[4,4,[5,5,5],4],3],3,4,5,6,7,8,9]
print(list)
# find similar cherecter in a list
list=[2,3,4,5,6,7,85,5,5,5,9]
x=list.count(5)
print("similar cherecter in list is:",x)
#find index number in a list
list=[2,3,4,5,6,7,8,9,"mahmud"]
x=list.index("mahmud") # here show mahmud stand position 8
print(x)
#list shorting means small to bigger list
list=[2,3,4,5,6,7,3,8,3,9]
list.sort()
print(list)
list=['a','d','h','m','a','a']
list.sort()
print(list)
list=["mah","ham","mah","azwy"] # for string sorting ,string letter is countable
list.sort()
print(list)
#remove a charecter from list
list=[1,2,3,4,555,6,"mahmud"]
list.remove(list[6]) # here 6 is index number
print(list)
list=[1,2,3,4,555,6,"mahmud"]
list.remove(555) # here 555 is item position
print(list)
#delet last item from list use pop funtion
list=[1,2,3,4,555,6,"mahmud"]
list.pop() # here pop delet last itm
print(list)
#added external element in a list
list=[1,2,3,4,555,6,"mahmud"]
list.insert(7,9) # here 7 is index number and 9 is adding number
print(list)
# 2D list or Matrix
mh=[[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0]
]
for x in mh:
for y in x:
print(y,end=' ') # here end use for new row
print()
mh=[[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0]
]
for x in mh:
for y in x:
print(y) # if we do not use end statement
print()
| [
"mahmudhossain838@gmail.com"
] | mahmudhossain838@gmail.com |
75b5118bb015d733e8457babdce95cce98f80cd9 | f177033785079d752502a6f91035e645b52d6416 | /env/lib/python3.7/operator.py | d2a83d688f25048acd75c31699bafd9fd6b13203 | [] | no_license | ousamasama/Django-Exercise-14 | 18d00d563e29df7804664ac33cd1de5966a10126 | ecd023c2915e5c3b85614d857c916cd6a2b36316 | refs/heads/master | 2020-04-17T19:40:05.540532 | 2019-01-24T21:22:32 | 2019-01-24T21:22:32 | 166,873,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | /Users/sama/.pyenv/versions/3.7.1/lib/python3.7/operator.py | [
"ousamasama@gmail.com"
] | ousamasama@gmail.com |
d4b595bf7439bdd8d1befa931a7f24802be5c9ee | 4e02eefa71196aac8d62a61e3d698b1d1257a523 | /豆瓣电影词云/爬取豆瓣影评生成词云.py | 378f32ac017fa8828f836d40976b8a828b7d8472 | [] | no_license | onism7/spider | e7723f9cc8727184b0edf468c8821b57a80af501 | 5a0fe16f367876ab5f63aa7737a9e0a0efdb3b09 | refs/heads/爬虫学习 | 2023-04-04T23:59:02.385924 | 2020-07-05T15:10:08 | 2020-07-05T15:10:08 | 268,724,369 | 1 | 0 | null | 2021-03-30T12:10:27 | 2020-06-02T06:54:24 | null | UTF-8 | Python | false | false | 2,951 | py | import jieba.analyse
import re
from urllib import request
from bs4 import BeautifulSoup
from wordcloud import WordCloud
import matplotlib.pyplot as plt
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}
# 分析网页函数
def getNowPlayingMovie_list():
res = request.Request("https://movie.douban.com/nowplaying/hangzhou/", headers=headers)
resp = request.urlopen(res)
html_data = resp.read().decode("utf-8")
# 解析网页
soup = BeautifulSoup(html_data, "html.parser")
nowplaying = soup.find_all("div", id="nowplaying")
nowplaying_movie_list = nowplaying[0].find_all("li", class_="list-item")
movie_list = list()
for item in nowplaying_movie_list:
movie_dict = {} # 以字典形式存储每部电影的ID和名称
movie_dict["id"] = item["data-subject"]
for tag_img_item in item.find_all("img"):
movie_dict["name"] = tag_img_item["alt"]
movie_list.append(movie_dict)
return movie_list
# 抓取电影评论函数
def getCommentById(movieId, page_num):
if page_num > 0:
start = (page_num - 1) * 20
else:
return False
sub_url = "https://movie.douban.com/subject/" + movieId + "/comments?start=" + str(start) + "&limit=20"
sub_res = request.Request(sub_url, headers=headers)
sub_res_ = request.urlopen(sub_res)
comment_data = sub_res_.read().decode("utf-8")
soup = BeautifulSoup(comment_data, "html.parser")
comment_div_list = soup.find_all("div", class_="comment")
eachCommentList = list()
for item in comment_div_list:
if item.find_all("p")[0].find("span").string is not None:
eachCommentList.append(item.find_all("p")[0].find("span").string)
return eachCommentList
if __name__ == '__main__':
commentList = list()
movie_list = getNowPlayingMovie_list()
for i in range(10): # 前10页
num = i + 1
commentList_temp = getCommentById(movie_list[2]["id"], num)
commentList.append(commentList_temp)
# 将列表中的数据转换为字符串
comments = ""
for k in range(len(commentList)):
comments = comments + (str(commentList[k])).strip()
# 使用正则表达式去除标点符号
pattern = re.compile(r"[\u4e00-\u9fa5]+")
filterdata = re.findall(pattern, comments)
cleaned_comments = "".join(filterdata)
# 使用jieba分词进行中文分词
results = jieba.analyse.textrank(cleaned_comments, topK=50, withWeight=True)
keyword = dict()
for i in results:
keyword[i[0]] = i[1]
print("删除停用词前:", keyword)
# 用词云进行显示
wordcloud = WordCloud(font_path="simhei.ttf", background_color="white", max_font_size=80)
word_frequence = keyword
myword = wordcloud.fit_words(word_frequence)
plt.imshow(myword)
plt.axis("off")
plt.show()
| [
"1125699801@qq.com"
] | 1125699801@qq.com |
6296f20f61cae4491766f0d1526cc1ef1f53687d | 26be744685a62eb921e4d27f5c98cd4dd795a5b8 | /start_gui_for_nodes.py | 31dd718d98096eecc4a2f0c238c00731633603ff | [] | no_license | AlexDobrushskiy/python_layer | 51c7b736eb8997ede2d24899020b565ca11c4153 | 5ed47f2eacf920cfb285d0763a9170abfe5f5e95 | refs/heads/master | 2023-01-11T19:57:39.854724 | 2019-03-06T11:29:03 | 2019-03-06T11:29:03 | 173,755,304 | 0 | 0 | null | 2023-01-01T05:00:06 | 2019-03-04T14:00:08 | Python | UTF-8 | Python | false | false | 1,234 | py | # -*- coding: utf-8 -*-
import multiprocessing
import logging
import sys
from core_modules.blackbox_modules.keys import id_keypair_generation_func
from core_modules.masternode_discovery import discover_nodes
from client_prototype.cefpython.cefpython import start_cefpython
def initlogging():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(' %(asctime)s - ' + __name__ + ' - %(levelname)s - %(message)s')
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(formatter)
logger.addHandler(consolehandler)
return logger
if __name__ == "__main__":
basedir = sys.argv[1]
# discover nodes
logger = initlogging()
privkey, pubkey = id_keypair_generation_func()
# load tabs for masternodes
browsers = []
for settings in discover_nodes(basedir):
url = "http://%s:%s@%s:%s" % (settings["rpcuser"], settings["rpcpassword"], settings["ip"], settings["pyhttpadmin"])
p = multiprocessing.Process(target=start_cefpython, args=(settings["nodename"], url))
p.start()
browsers.append(p)
input("Press ENTER to stop browsers")
for browser in browsers:
browser.terminate()
| [
"a.dobrushskiy@gmail.com"
] | a.dobrushskiy@gmail.com |
696f5774d6985530bf8dfb8702b191a2694bb9eb | 8000eb88e4ebc6f4357632a58d3b2f4c67a5aa46 | /flask测试/wenjian/测试类2.py | dbbcc24aaa89cca305b6e0d4c19e1f366a3492e2 | [] | no_license | cao-xing/project | 0f38b86716bce7c8512283ee6203dff54bc17504 | 26db564c1bdb903796ae2c1213376239ae39e4c2 | refs/heads/master | 2021-09-05T01:03:45.385542 | 2018-01-23T07:58:45 | 2018-01-23T07:58:45 | 114,438,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def helloworld():
print("这是测试类2") | [
"38168722@qq.com"
] | 38168722@qq.com |
983aa700f8495ac6e419b73ea7de4a40ea515472 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/925.py | 469e68f78efb9387f7b68d65ff2ee906763fbb7b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | raw_alphabet = "yhesocvxduiglbkrztnwjpfmaq"
alphabet = {}
for i in xrange(len(raw_alphabet)):
alphabet[chr(ord('a') + i)] = raw_alphabet[i]
def decrypt(text):
output = ""
for c in text:
if alphabet.has_key(c):
output += alphabet[c]
elif c != "\n":
output += c
return output
fin = open("tongues.txt", "r")
fin = fin.readlines()
inputs = int(fin[0])
fin.pop(0)
for i in xrange(inputs):
output = decrypt(fin[i])
print ("Case #%d: " % (i+1)) + output
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5a5dfeb589e1fc91b46155e11a6c53bb8daeb886 | a04b5e853fda50a1af5d6d0102624770958898c4 | /piecash/core/transaction.py | 906e37a6ba0974188460364bc506004c3f3913eb | [
"MIT"
] | permissive | drummonds/piecash | 7f7aa841d6d172a8aa6e82aa2f8a377fa2cb43d1 | 82b8e5dad79a749136c9b148c35477a06b1c7692 | refs/heads/master | 2020-12-25T22:36:41.616805 | 2016-03-19T10:38:27 | 2016-03-19T10:38:27 | 53,066,050 | 0 | 0 | null | 2016-03-03T16:36:18 | 2016-03-03T16:36:18 | null | UTF-8 | Python | false | false | 19,144 | py | import datetime
import uuid
from collections import defaultdict
from decimal import Decimal
from sqlalchemy import Column, VARCHAR, ForeignKey, BIGINT, INTEGER
from sqlalchemy.orm import relation, validates, foreign
from sqlalchemy.orm.base import NEVER_SET
from .._common import CallableList, GncImbalanceError
from .._common import GncValidationError, hybrid_property_gncnumeric, Recurrence
from .._declbase import DeclarativeBaseGuid
from ..sa_extra import _Date, _DateTime, mapped_to_slot_property, pure_slot_property
class Split(DeclarativeBaseGuid):
"""
A GnuCash Split.
.. note::
A split used in a scheduled transaction has its main attributes in form of slots.
Attributes:
transaction(:class:`piecash.core.transaction.Transaction`): transaction of the split
account(:class:`piecash.core.account.Account`): account of the split
lot(:class:`piecash.business.Lot`): lot to which the split pertains
memo(str): memo of the split
value(:class:`decimal.Decimal`): amount express in the currency of the transaction of the split
quantity(:class:`decimal.Decimal`): amount express in the commodity of the account of the split
reconcile_state(str): 'n', 'c' or 'y'
reconcile_date(:class:`datetime.datetime`): time
action(str): describe the type of action behind the split (free form string but with dropdown in the GUI
"""
__tablename__ = 'splits'
__table_args__ = {}
# column definitions
# the transaction_guid is not mandatory at construction time because it can be set through a tr.splits.append(...) operation
# however, in the validation of the object, we raise an error if there is no transaction set at that time
transaction_guid = Column('tx_guid', VARCHAR(length=32), ForeignKey('transactions.guid'), index=True)
account_guid = Column('account_guid', VARCHAR(length=32), ForeignKey('accounts.guid'), nullable=False, index=True)
memo = Column('memo', VARCHAR(length=2048), nullable=False)
action = Column('action', VARCHAR(length=2048), nullable=False)
reconcile_state = Column('reconcile_state', VARCHAR(length=1), nullable=False)
reconcile_date = Column('reconcile_date', _DateTime())
_value_num = Column('value_num', BIGINT(), nullable=False)
_value_denom = Column('value_denom', BIGINT(), nullable=False)
_value_denom_basis = None
value = hybrid_property_gncnumeric(_value_num, _value_denom)
_quantity_num = Column('quantity_num', BIGINT(), nullable=False)
_quantity_denom = Column('quantity_denom', BIGINT(), nullable=False)
_quantity_denom_basis = None
quantity = hybrid_property_gncnumeric(_quantity_num, _quantity_denom)
lot_guid = Column('lot_guid', VARCHAR(length=32), ForeignKey('lots.guid'))
# relation definitions
account = relation('Account', back_populates='splits')
lot = relation('Lot', back_populates='splits')
transaction = relation('Transaction', back_populates='splits')
def __init__(self,
account,
value,
quantity=None,
transaction=None,
memo="",
action="",
reconcile_date=None,
reconcile_state="n",
lot=None,
):
self.transaction = transaction
self.account = account
self.value = value
self.quantity = value if quantity is None else quantity
self.memo = memo
self.action = action
self.reconcile_date = reconcile_date
self.reconcile_state = reconcile_state
self.lot = lot
def __unirepr__(self):
try:
cur = self.transaction.currency.mnemonic
acc = self.account
com = acc.commodity.mnemonic
if com == "template":
# case of template split from scheduled transaction
sched_xaction = self["sched-xaction"]
credit = sched_xaction["credit-formula"].value
debit = sched_xaction["debit-formula"].value
return u"SplitTemplate<{} {} {}>".format(sched_xaction["account"].value,
"credit={}".format(credit) if credit else "",
"debit={}".format(debit) if debit else "",
)
elif cur == com:
# case of same currency split
return u"Split<{} {} {}>".format(acc,
self.value, cur)
else:
# case of non currency split
return u"Split<{} {} {} [{} {}]>".format(acc,
self.value, cur,
self.quantity, com)
except AttributeError:
return u"Split<{}>".format(self.account)
def object_to_validate(self, change):
yield self
if self.transaction:
yield self.transaction
if self.lot:
yield self.lot
def validate(self):
old = self.get_all_changes()
if old["STATE_CHANGES"][-1] == "deleted":
return
if '_quantity_num' in old or '_value_num' in old:
self.transaction._recalculate_balance = True
if self.transaction_guid is None:
raise GncValidationError("The split is not linked to a transaction")
if self.transaction.currency == self.account.commodity:
if self.quantity != self.value:
raise GncValidationError("The split has a quantity diffeerent from value "
"while the transaction currency and the account commodity is the same")
else:
if self.quantity is None:
raise GncValidationError("The split quantity is not defined while the split is on a commodity different from the transaction")
if self.quantity.is_signed() != self.value.is_signed():
raise GncValidationError("The split quantity has not the same sign as the split value")
# everything is fine, let us normalise the value with respect to the currency/commodity precisions
self._quantity_denom_basis = self.account.commodity_scu
self._value_denom_basis = self.transaction.currency.fraction
if self.transaction.currency != self.account.commodity:
# let us also add a Price
# TODO: check if price already exist at that tme
from piecash import Price
value = (self.value / self.quantity).quantize(Decimal("0.000001"))
try:
# find existing price if any
pr = self.book.prices(commodity=self.account.commodity,
currency=self.transaction.currency,
date=self.transaction.post_date,
type="transaction",
source="user:split-register")
pr.value = value
except KeyError:
pr = Price(commodity=self.account.commodity,
currency=self.transaction.currency,
date=self.transaction.post_date,
value=value,
type="transaction",
source="user:split-register")
# and an action if not yet defined
if self.action == "":
self.action = "Sell" if self.quantity.is_signed() else "Buy"
class Transaction(DeclarativeBaseGuid):
"""
A GnuCash Transaction.
Attributes:
currency (:class:`piecash.core.commodity.Commodity`): currency of the transaction. This attribute is
write-once (i.e. one cannot change it after being set)
description (str): description of the transaction
enter_date (:class:`datetime.datetime`): time at which transaction is entered
post_date (:class:`datetime.datetime`): day on which transaction is posted
num (str): user provided transaction number
splits (list of :class:`Split`): list of the splits of the transaction
scheduled_transaction (:class:`ScheduledTransaction`): scheduled transaction behind the transaction
notes (str): notes on the transaction (provided via a slot)
"""
__tablename__ = 'transactions'
__table_args__ = {}
# column definitions
currency_guid = Column('currency_guid', VARCHAR(length=32), ForeignKey('commodities.guid'), nullable=False)
num = Column('num', VARCHAR(length=2048), nullable=False)
_post_date = Column('post_date', _DateTime, index=True)
post_date = mapped_to_slot_property(_post_date,
slot_name="date-posted",
slot_transform=lambda x: x.date() if x else None)
enter_date = Column('enter_date', _DateTime)
description = Column('description', VARCHAR(length=2048))
notes = pure_slot_property('notes')
scheduled_transaction = pure_slot_property('from-sched-xaction')
# relation definitions
currency = relation('Commodity',
back_populates='transactions',
)
splits = relation('Split',
back_populates="transaction",
cascade='all, delete-orphan',
collection_class=CallableList,
)
def __init__(self,
currency,
description="",
notes=None,
splits=None,
enter_date=None,
post_date=None,
num="",
):
assert enter_date is None or isinstance(enter_date, datetime.datetime), "enter_date should be a datetime object"
assert post_date is None or isinstance(post_date, datetime.datetime), "post_date should be a datetime object"
self.currency = currency
self.description = description
self.enter_date = enter_date if enter_date else datetime.datetime.today().replace(microsecond=0)
self.post_date = post_date if post_date \
else datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
self.num = num
if notes is not None:
self.notes = notes
if splits:
self.splits = splits
def __unirepr__(self):
return u"Transaction<[{}] '{}' on {:%Y-%m-%d}{}>".format(self.currency.mnemonic,
self.description,
self.post_date,
" (from sch tx)" if self.scheduled_transaction else "")
def object_to_validate(self, change):
yield self
def validate(self):
old = self.get_all_changes()
if old["STATE_CHANGES"][-1] == "deleted":
return
if self.currency.namespace != "CURRENCY":
raise GncValidationError("You are assigning a non currency commodity to a transaction")
# check all accounts related to the splits of the transaction are not placeholder(=frozen)
for sp in self.splits:
if sp.account.placeholder != 0:
raise GncValidationError("Account '{}' used in the transaction is a placeholder".format(sp.account))
# check same currency
if "currency" in old and old["currency"] is not NEVER_SET:
raise GncValidationError("You cannot change the currency of a transaction once it has been set")
# validate the splits
if hasattr(self, "_recalculate_balance"):
del self._recalculate_balance
value_imbalance, quantity_imbalances = self.calculate_imbalances()
if value_imbalance:
# raise exception instead of creating an imbalance entry as probably an error
# (in the gnucash GUI, another decision taken because need of "save unfinished transaction")
raise GncImbalanceError("The transaction {} is not balanced on its value".format(self))
if any(quantity_imbalances.values()) and self.book.use_trading_accounts:
self.normalize_trading_accounts()
def calculate_imbalances(self):
"""Calculate value and quantity imbalances of a transaction"""
value_imbalance = Decimal(0) # hold imbalance on split.value
quantity_imbalances = defaultdict(Decimal) # hold imbalance on split.quantity per cdty
# collect imbalance information
for sp in self.splits:
value_imbalance += sp.value
quantity_imbalances[sp.account.commodity] += sp.quantity
return value_imbalance, quantity_imbalances
def normalize_trading_accounts(self):
# collect imbalance information
classic_splits = defaultdict(list)
trading_splits = defaultdict(list)
trading_target_value = defaultdict(Decimal)
trading_target_quantity = defaultdict(Decimal)
for sp in self.splits:
cdty = sp.account.commodity
if sp.account.type == "TRADING":
trading_splits[cdty].append(sp)
else:
classic_splits[cdty].append(sp)
trading_target_value[cdty] += sp.value
trading_target_quantity[cdty] += sp.quantity
root = self.book.root_account
# imbalance in quantities to be settled using trading accounts
for cdty, v in trading_target_value.items():
q = trading_target_quantity[cdty]
# if commodity is balanced, do not do anything
if (v == q == 0): continue
# otherwise, look if there is some trading imbalance (ie a split with the trading account already exists!)
if cdty in trading_splits:
# and adjust the related split to rebalance
sp, = trading_splits[cdty]
sp.value -= v
sp.quantity -= q
else:
# otherwise, we must create the split related to the trading account
# assume trading account exists
t_acc = self.book.trading_account(cdty)
sp = Split(account=t_acc,
value=-v,
quantity=-q,
transaction=self,
)
class ScheduledTransaction(DeclarativeBaseGuid):
"""
A GnuCash Scheduled Transaction.
Attributes
adv_creation (int) : days to create in advance (0 if disabled)
adv_notify (int) : days to notify in advance (0 if disabled)
auto_create (bool) :
auto_notify (bool) :
enabled (bool) :
start_date (:class:`datetime.datetime`) : date to start the scheduled transaction
last_occur (:class:`datetime.datetime`) : date of last occurence of the schedule transaction
end_date (:class:`datetime.datetime`) : date to end the scheduled transaction (num/rem_occur should be 0)
instance_count (int) :
name (str) : name of the scheduled transaction
num_occur (int) : number of occurences in total (end_date should be null)
rem_occur (int) : number of remaining occurences (end_date should be null)
template_account (:class:`piecash.core.account.Account`): template account of the transaction
"""
__tablename__ = 'schedxactions'
__table_args__ = {}
# column definitions
guid = Column('guid', VARCHAR(length=32), primary_key=True, nullable=False, default=lambda: uuid.uuid4().hex)
name = Column('name', VARCHAR(length=2048))
enabled = Column('enabled', INTEGER(), nullable=False)
start_date = Column('start_date', _Date())
end_date = Column('end_date', _Date())
last_occur = Column('last_occur', _Date())
num_occur = Column('num_occur', INTEGER(), nullable=False)
rem_occur = Column('rem_occur', INTEGER(), nullable=False)
auto_create = Column('auto_create', INTEGER(), nullable=False)
auto_notify = Column('auto_notify', INTEGER(), nullable=False)
adv_creation = Column('adv_creation', INTEGER(), nullable=False)
adv_notify = Column('adv_notify', INTEGER(), nullable=False)
instance_count = Column('instance_count', INTEGER(), nullable=False)
template_act_guid = Column('template_act_guid', VARCHAR(length=32), ForeignKey('accounts.guid'), nullable=False)
# relation definitions
template_account = relation('Account')
recurrence = relation('Recurrence',
primaryjoin=guid == foreign(Recurrence.obj_guid),
cascade='all, delete-orphan',
uselist=False,
)
def __unirepr__(self):
return u"ScheduledTransaction<'{}' {}>".format(self.name, self.recurrence)
class Lot(DeclarativeBaseGuid):
"""
A GnuCash Lot. Each lot is linked to an account. Splits in this account can be associated to a Lot. Whenever
the balance of the splits goes to 0, the Lot is closed (otherwise it is opened)
Attributes:
is_closed (int) : 1 if lot is closed, 0 otherwise
account (:class:`piecash.core.account.Account`): account of the Lot
splits (:class:`piecash.core.transaction.Split`): splits associated to the Lot
"""
__tablename__ = 'lots'
__table_args__ = {}
# column definitions
account_guid = Column('account_guid', VARCHAR(length=32), ForeignKey('accounts.guid'))
is_closed = Column('is_closed', INTEGER(), nullable=False)
title = pure_slot_property('title')
notes = pure_slot_property('notes')
# relation definitions
account = relation('Account', back_populates='lots', )
splits = relation('Split',
back_populates='lot',
collection_class=CallableList,
)
def __init__(self,
title,
account,
notes="",
splits=None,
is_closed=0):
self.title = title
self.account = account
self.notes = notes
if splits:
self.splits[:] = splits
self.is_closed = is_closed
@validates("splits", "account")
def check_no_change_if_lot_is_close(self, key, value):
if self.is_closed:
raise ValueError("Lot is closed and cannot be changed (adding splits or changing account")
return value
def object_to_validate(self, change):
yield self
def validate(self):
# check all splits have same account
for sp in self.splits:
if sp.account != self.account:
raise ValueError("Split {} is not in the same commodity of the lot {}".format(sp, self))
def __unirepr__(self):
return u"Lot<'{}' on {}>".format(self.title, self.account.name)
| [
"sdementen@gmail.com"
] | sdementen@gmail.com |
783baa95357f68bfd54d8de1f7a78e0eb040e477 | d2c80cd70f3220165c7add7ed9a103c0ed1ab871 | /python/HOMEWORK/3rd-4th_Session/Answers/Class/1/T-1.py | 613da30acc5698289383cf02ced274be749c1997 | [] | no_license | nervaishere/DashTeam | 2a786af8a871200d7facfa3701a07f97230b706e | a57b34a601f74b06a7be59f2bfe503cbd2a6c15f | refs/heads/master | 2023-08-24T12:24:18.081164 | 2021-10-09T21:10:54 | 2021-10-09T21:10:54 | 393,689,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | x=input("Enter first Number:")
print(int(float(x)))
| [
"athenajafari34@gmail.com"
] | athenajafari34@gmail.com |
cf68c743a79485c7d70e3f054c624e33b93dcc81 | c67627680973129e8f3cefa8778c12366a11621d | /test/unit/test_cli_utils.py | 66c58c5a02653b51c83d59eb27445f65f8f79dd7 | [
"Apache-2.0"
] | permissive | eprtvea/curator | f3fb0d5a02f3ed33c23da5153f8e192609b75f62 | f6d25bca20d66437d367956c812988c6d88431c4 | refs/heads/master | 2020-12-01T01:08:35.279466 | 2016-02-11T00:34:22 | 2016-02-11T00:34:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,565 | py | from datetime import datetime, timedelta
from unittest import TestCase
from mock import Mock
import sys
import click
from click import testing as clicktest
import logging
logger = logging.getLogger(__name__)
import curator
named_indices = [ "index1", "index2" ]
named_alias = 'alias_name'
alias_retval = { "pre_aliased_index": { "aliases" : { named_alias : { }}}}
aliases_retval = {
"index1": { "aliases" : { named_alias : { } } },
"index2": { "aliases" : { named_alias : { } } },
}
fake_fail = Exception('Simulated Failure')
repo_name = 'repo_name'
test_repo = {repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/repo_name'}}}
test_repos = {'TESTING': {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/TESTING'}},
repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/rmp/repos/repo_name'}}}
snap_name = 'snap_name'
snapshot = { 'snapshots': [
{
'duration_in_millis': 60000, 'start_time': '2015-01-01T00:00:00.000Z',
'shards': {'successful': 4, 'failed': 0, 'total': 4},
'end_time_in_millis': 0, 'state': 'SUCCESS',
'snapshot': snap_name, 'end_time': '2015-01-01T00:00:01.000Z',
'indices': named_indices,
'failures': [], 'start_time_in_millis': 0
}]}
snapshots = { 'snapshots': [
{
'duration_in_millis': 60000, 'start_time': '2015-01-01T00:00:00.000Z',
'shards': {'successful': 4, 'failed': 0, 'total': 4},
'end_time_in_millis': 0, 'state': 'SUCCESS',
'snapshot': snap_name, 'end_time': '2015-01-01T00:00:01.000Z',
'indices': named_indices,
'failures': [], 'start_time_in_millis': 0
},
{
'duration_in_millis': 60000, 'start_time': '2015-01-01T00:00:02.000Z',
'shards': {'successful': 4, 'failed': 0, 'total': 4},
'end_time_in_millis': 0, 'state': 'SUCCESS',
'snapshot': 'snapshot2', 'end_time': '2015-01-01T00:00:03.000Z',
'indices': named_indices,
'failures': [], 'start_time_in_millis': 0
}]}
snap_body_all = {
"ignore_unavailable": False,
"include_global_state": True,
"partial": False,
"indices" : "_all"
}
snap_body = {
"ignore_unavailable": False,
"include_global_state": True,
"partial": False,
"indices" : "index1,index2"
}
class TestExitMsg(TestCase):
def test_exit_msg_positive(self):
with self.assertRaises(SystemExit) as cm:
curator.exit_msg(True)
self.assertEqual(cm.exception.code, 0)
def test_exit_msg_negative(self):
with self.assertRaises(SystemExit) as cm:
curator.exit_msg(False)
self.assertEqual(cm.exception.code, 1)
class TestCheckVersion(TestCase):
def test_check_version_positive(self):
client = Mock()
client.info.return_value = {'version': {'number': '1.1.1'} }
self.assertIsNone(curator.check_version(client))
def test_check_version_less_than(self):
client = Mock()
client.info.return_value = {'version': {'number': '0.90.3'} }
with self.assertRaises(SystemExit) as cm:
curator.check_version(client)
self.assertEqual(cm.exception.code, 1)
def test_check_version_greater_than(self):
client = Mock()
client.info.return_value = {'version': {'number': '3.0.1'} }
with self.assertRaises(SystemExit) as cm:
curator.check_version(client)
self.assertEqual(cm.exception.code, 1)
class TestCheckMaster(TestCase):
def test_check_master_positive(self):
client = Mock()
client.nodes.info.return_value = {
'nodes': { "foo" : "bar"}
}
client.cluster.state.return_value = {
"master_node" : "foo"
}
self.assertIsNone(curator.check_master(client, master_only=True))
def test_check_master_negative(self):
client = Mock()
client.nodes.info.return_value = {
'nodes': { "bad" : "mojo"}
}
client.cluster.state.return_value = {
"master_node" : "foo"
}
with self.assertRaises(SystemExit) as cm:
curator.check_master(client, master_only=True)
self.assertEqual(cm.exception.code, 0)
class TestInList(TestCase):
def test_in_list_positive(self):
v = ['a', 'b']
s = ['a', 'b', 'c', 'd']
self.assertEqual(v, curator.in_list(v, s))
def test_in_list_negative(self):
v = ['a', 'b', 'q']
s = ['a', 'b', 'c', 'd']
self.assertEqual(['a', 'b'], curator.in_list(v, s))
class TestGetClient(TestCase):
def test_certificate_logic(self):
client = Mock()
kwargs = { 'use_ssl' : True, 'certificate' : 'mycert.pem' }
with self.assertRaises(SystemExit) as cm:
curator.get_client(**kwargs)
self.assertEqual(sys.stdout.getvalue(),'ERROR: Connection failure.\n')
self.assertEqual(cm.exception.code, 1)
| [
"aaron@mildensteins.com"
] | aaron@mildensteins.com |
d4179f61ac0365aa5d0843892018d942a29ed243 | e972dc486e62152981177f85b5f9cff919ac0867 | /sams/tmp/category_db.py | 744d848a0aa3b864e1139359886c0c5aed4d9f96 | [] | no_license | yeongsunpark/cute | d81b9b03f747f65bed742b10b2f9a59f69efea96 | d69f918f9a1f1d6db70bc62272fc0ce582d7bf50 | refs/heads/master | 2020-03-27T12:43:41.728918 | 2019-04-29T04:41:47 | 2019-04-29T04:41:47 | 146,564,948 | 0 | 2 | null | 2018-11-06T07:45:59 | 2018-08-29T07:52:20 | Python | UTF-8 | Python | false | false | 5,664 | py | import os, sys
import pymysql
import logging
import ys.cute.sams.ys_logger as ys_logger
import concurrent.futures
sys.path.append(os.path.abspath('..'))
logger = logging.getLogger('root')
logger.setLevel("INFO")
logger.addHandler(ys_logger.MyHandler())
logger.info("Finish setting logger")
class SquadDb():
def __init__(self):
self.db_cnf_dict = {"host": '10.122.64.71', "usr": "root", "pwd": "root",
"db": "cmdb", "encoding": "utf8"}
self.con = None
self.cur = None
self.connect_db()
self.f2 = open("c_change.txt", "w", newline="\n")
# self.f3 = open("/home/msl/ys/cute/sams/tmp/180824_17년11월1일이후기사랜덤3만개(다씀)_id만.txt","r")
self.q_id_1 = ""
self.q_id_2 = ""
self.question_1 = ""
self.question_2 = ""
self.result_list = []
self.test = False
def easy_mysql(self, cfg_dict, encoding='utf8', autocommit=False):
self.con = pymysql.connect(host=cfg_dict['host'], user=cfg_dict['usr'],
passwd=cfg_dict['pwd'], db=cfg_dict['db'], charset=encoding)
self.cur = self.con.cursor()
if autocommit is True:
self.con.autocommit(True)
def connect_db(self):
try: # try to connect to project db
cfg_dict = dict(host=self.db_cnf_dict['host'], usr=self.db_cnf_dict['usr'],
pwd=self.db_cnf_dict['pwd'], db=self.db_cnf_dict['db'])
self.easy_mysql(cfg_dict, encoding=self.db_cnf_dict['encoding'],
autocommit=True) # turn-on autocummit, be careful!
self.cur.execute("SET NAMES utf8")
except Exception as e:
pass
def change(self):
logger.info("Start Selection")
if self.test is True:
select_sql = 'select REG_DATE, TITLE, CONTENT, CATEGORY, ID from NEWS '\
'where REG_DATE >20180000000000 AND CATEGORY != "연예" '\
'order by rand() limit 10'
else:
select_sql ='select REG_DATE, TITLE, CONTENT, CATEGORY, ID from NEWS '\
'where REG_DATE >20180000000000 AND CATEGORY != "연예" '\
'order by rand() limit 20000'
self.cur.execute(select_sql)
logger.info("Selected")
select_data = self.cur.fetchall()
logger.info(len(select_data))
economy = ["금융기타", "대출", "증권", "취업"]
science = ["IT기타", "게임", "과학기타", "날씨", "모바일", "비만성장", "생활가전", "성형외과", "소프트웨어", "수송기기", "영상음향가전", "의료기타", "자동차",
"제약", "피부과", "하드웨어", "항공"]
society = ["결혼", "교육", "사회기타", "생활용품", "육아", "종교"]
sports = ["경마", "골프", "동계스포츠", "레저스포츠", "스포츠기타", "야구", "축구"]
normal = ["국방", "기호식품", "복권", "부동산", "쇼핑", "숙박", "식품기타", "애완", "여행기타", "연금보험", "인테리어", "재해", "정치", "탈모", "패션",
"화장품", "공연", "영화", "예술"]
for sd in select_data:
cate = sd[3]
if cate in economy:
c = "경제"
elif cate in science:
c = "과학"
elif cate in society:
c = "사회"
elif cate in sports:
c = "스포츠"
elif cate in normal:
c = "일반"
elif cate == "":
c = "null"
else:
c = "error"
# print (sd)
if 300 <= len(sd[2]) <= 3000:
self.f2.write("\t".join([sd[0], sd[1], sd[2], c, sd[4]]))
self.f2.write("\n")
self.f2.close()
# self.f3.close()
logger.info("Finish")
def count_data(self):
logger.info("count start")
try:
count_sql = 'select count(*) from NEWS'
self.cur.execute(count_sql)
select_count_row = self.cur.fetchall()
logger.info(select_count_row)
self.con.commit()
except:
logger.info("cannnot user_information")
def split():
c = dict()
science = []
society = []
sports = []
general = []
economy = []
etc = []
f2 = open("c_change.txt", "r")
for line1 in f2:
item = line1.split("\t")
if len(item) == 5:
category = item[3]
if category == "과학":
science.append(line1)
elif category == "사회":
society.append(line1)
elif category == "스포츠":
sports.append(line1)
elif category == "일반":
general.append(line1)
elif category == "경제":
economy.append(line1)
else:
etc.append(line1)
category_list = [science, society, sports, general, economy, etc]
str_category_list = ["science", "society", "sports", "general", "economy", "etc"]
for cl1, cl2 in zip(category_list, str_category_list):
with open("{}.txt".format(cl2), "w") as f3:
for l in cl1:
item = l.split("\t")
# c_id, title, context(marker), q_id_1, question_1, q_id_2, question_2, answer
f3.write(l)
# f3.write("\n")
if __name__ == "__main__":
# j = SquadDb()
# j.connect_db()
# j.change()
split() | [
"ylunar@naver.com"
] | ylunar@naver.com |
54f3b6909a21adf8a488b8156ea3dd3eff4e9bce | f2428051b3f7d77dc4cb2d61ee18cc31fe5eaa67 | /tiddlywebplugins/docs.py | 2afab067e1c13c5fcb7bb688dd7bb7e5300c03c6 | [
"BSD-3-Clause"
] | permissive | tiddlyweb/tiddlywebplugins.docs | b306ab081957c81c0f172fd2a11752f3f3252842 | af5df2369c794fea6f44ff8823f9ab0958909a80 | refs/heads/master | 2021-01-18T23:48:12.217948 | 2011-11-10T17:18:35 | 2011-11-10T17:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,898 | py | """
Autogenerate TiddlyWeb API docs via a serialization.
"""
from tiddlyweb.serializer import (Serializer, NoSerializationError,
TiddlerFormatError, BagFormatError, RecipeFormatError)
from tiddlyweb.serializations import SerializationInterface
from tiddlywebplugins.templates import get_template
import urllib
__version__ = '0.5'
EXTENSION_TYPES = {
'x-doc': 'text/x-tiddlyweb-docs'
}
SERIALIZERS = {
'text/x-tiddlyweb-docs': ['tiddlywebplugins.docs',
'text/html; charset=UTF-8'],
}
def init(config):
if 'selector' in config:
config['extension_types'].update(EXTENSION_TYPES)
config['serializers'].update(SERIALIZERS)
class Serialization(SerializationInterface):
def __init__(self, environ=None):
SerializationInterface.__init__(self, environ)
self.extensions = {}
self.serializations = []
self._build_serializers()
# XXX surely I can dry this up?
def recipe_as(self, recipe):
return self._all_info('recipe_as', 'as_recipe')
def bag_as(self, bag):
return self._all_info('bag_as', 'as_bag')
def tiddler_as(self, tiddler):
return self._all_info('tiddler_as', 'as_tiddler')
def list_recipes(self, recipes):
return self._all_info('list_recipes')
def list_bags(self, bags):
return self._all_info('list_bags')
def list_tiddlers(self, tiddlers):
return self._all_info('list_tiddlers')
def _build_serializers(self):
for extension, mime in (self.environ['tiddlyweb.config']
['extension_types'].iteritems()):
self.extensions[mime] = extension
for mime, outputter in (self.environ['tiddlyweb.config']
['serializers'].iteritems()):
module, _ = outputter
if module == __name__ or mime == 'default':
continue
try:
self.serializations.append((self.extensions[mime],
Serializer(module, self.environ).serialization))
except KeyError:
# we got a mime type for which there is not an
# extension so let's skip it
pass
def _matches(self, method):
matches = []
for serialization in self.serializations:
if hasattr(serialization[1], method):
matches.append(serialization)
return matches
def _all_info(self, out_method, in_method=None):
method_info = self._method_info()
out_serialization_info = self._serialization_info(out_method)
if in_method and 'PUT' in method_info['method']:
in_serialization_info = self._serialization_info(in_method)
else:
in_serialization_info = {}
# Disable HTMLPresenter if it is in the stack.
if 'tiddlyweb.title' in self.environ:
del self.environ['tiddlyweb.title']
template = get_template(self.environ, 'tiddlywebdocs.html')
return template.generate({'outserialization': out_serialization_info,
'inserialization': in_serialization_info,
'method': method_info})
def _serialization_info(self, method):
serializers = self._matches(method)
info = {}
for serializer in serializers:
try:
try:
getattr(serializer[1], method)([])
except TypeError:
getattr(serializer[1], method)('', '')
except NoSerializationError:
continue
except (AttributeError, TiddlerFormatError, BagFormatError,
RecipeFormatError):
pass # wow!
info[serializer[1].__module__] = {
'doc': getattr(serializer[1], method).__doc__,
'ext': serializer[0]}
return info
def _method_info(self):
methods = self.environ.get('selector.methods', [])
path = self.environ.get('SCRIPT_NAME', 'Unavailable')
matched_path = self.environ.get('selector.matches', [path])[0]
selector = self.environ['tiddlyweb.config'].get('selector', None)
if '.x-doc' in path:
cleanpath = path.rsplit('.x-doc')[0]
else:
cleanpath = path
query_string = self.environ.get('QUERY_STRING', '')
if query_string:
query_string = '?%s' % query_string
info = {'path': path.decode('utf-8'),
'cleanpath': urllib.quote(cleanpath),
'method': {},
'query': query_string}
if selector:
for method in sorted(methods):
handler = selector.select(matched_path, method)[0]
info['method'][method] = ('%s:%s' % (handler.__module__,
handler.__name__), '%s' % handler.__doc__)
return info
| [
"chris.dent@gmail.com"
] | chris.dent@gmail.com |
44b136ab3b9b77bb0ad55b20ac6da575b4601836 | 4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422 | /_1725_Number_Of_Rectangles_That_Can_Form_The_Largest_Square.py | ae7a0dd9651d89effd3109888fcae48736d25659 | [] | no_license | mingweihe/leetcode | a2cfee0e004627b817a3c0321bb9c74128f8c1a7 | edff905f63ab95cdd40447b27a9c449c9cefec37 | refs/heads/master | 2021-06-19T07:46:46.897952 | 2021-05-02T05:13:17 | 2021-05-02T05:13:17 | 205,740,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from collections import Counter
class Solution(object):
def countGoodRectangles(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: int
"""
return max(Counter(map(min, rectangles)).items())[1]
| [
"hemingwei2017@gmail.com"
] | hemingwei2017@gmail.com |
8ca7c327e6dcb11955cc3b53b864c85fe94c2207 | f250ee8189a91b9cc12d57665dfb09a34c343d38 | /setup.py | 16512f4d5054ee5e87cb774f96dd7d384736f418 | [] | no_license | duckworthd/optim | 4abb64b74c8df32175580b70d450963dbd099865 | 510e8fb81342fb145e140194dad0957724d124f7 | refs/heads/master | 2021-01-25T08:38:39.295589 | 2014-07-21T05:22:18 | 2014-07-21T05:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from setuptools import setup, find_packages
import os
def version(name):
fname = os.path.join(name, '_version.py')
environ = {}
execfile(fname, environ)
return environ['__version__']
if __name__ == '__main__':
NAME = 'optim'
setup(
name = NAME,
version = version(NAME),
author = 'Daniel Duckworth',
author_email = 'duckworthd@gmail.com',
description = 'Reference implementations of optimization algorithms',
license = 'BSD',
keywords = 'optimization',
url = 'http://github.com/duckworthd/optim',
packages = find_packages(),
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
install_requires = [
'matplotlib',
'numpy',
],
tests_require = [
'nose',
]
)
| [
"duckworthd@gmail.com"
] | duckworthd@gmail.com |
99216479731815d1597e49465789a7c49782de16 | 345fdc5971db81240722901cbd1fef619b271676 | /chapter8/snippets/views.py | 4690d46532cbffce84863775e9a821a8b80a8d38 | [] | no_license | hisakin/practical-django | 093c449f42fe428320f5dce80db5b6708619e45d | a25e00d6283e1eb7a79728bbb99141f13c0f4692 | refs/heads/main | 2023-07-30T15:37:26.492059 | 2021-09-16T14:49:57 | 2021-09-16T14:49:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from snippets.forms import SnippetForm
from snippets.models import Snippet
def top(request):
snippets = Snippet.objects.all()
context = {"snippets": snippets}
return render(request, "snippets/top.html", context)
@login_required
def snippet_new(request):
if request.method == 'POST':
form = SnippetForm(request.POST)
if form.is_valid():
snippet = form.save(commit=False)
snippet.created_by = request.user
snippet.save()
return redirect(snippet_detail, snippet_id=snippet.pk)
else:
form = SnippetForm()
return render(request, "snippets/snippet_new.html", {'form': form})
@login_required
def snippet_edit(request, snippet_id):
snippet = get_object_or_404(Snippet, pk=snippet_id)
if snippet.created_by_id != request.user.id:
return HttpResponseForbidden("このスニペットの編集は許可されていません。")
if request.method == "POST":
form = SnippetForm(request.POST, instance=snippet)
if form.is_valid():
form.save()
return redirect('snippet_detail', snippet_id=snippet_id)
else:
form = SnippetForm(instance=snippet)
return render(request, 'snippets/snippet_edit.html', {'form': form})
def snippet_detail(request, snippet_id):
snippet = get_object_or_404(Snippet, pk=snippet_id)
return render(request, 'snippets/snippet_detail.html',
{'snippet': snippet})
| [
"contact@c-bata.link"
] | contact@c-bata.link |
9819935bdeb231eee38a6eb8298167a33dd66791 | a9b24a31f27afc42736d923b7ba4df300e13a8cf | /qidian_book/start.spec | 06a4f3fef319e84fc683ec0a381d53cd51f3024f | [] | no_license | 520wsl/python-scrapy-test | 8dcb216a91c8f03266ae29d3b9590b124088eb67 | 2f93e161c849aabfe9efb90e719906c9ae5bee1c | refs/heads/master | 2022-12-13T00:47:44.177505 | 2020-05-23T05:05:57 | 2020-05-23T05:05:57 | 184,033,077 | 2 | 0 | null | 2022-12-08T05:25:05 | 2019-04-29T08:40:13 | HTML | UTF-8 | Python | false | false | 823 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['start.py'],
pathex=['E:\\GIT\\python-scrapy-test\\qidian_book'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='start',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"395548460@qq.com"
] | 395548460@qq.com |
3bf2cae52d2734325db33a0646b8d648195c5ee6 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc054/A/4891080.py | da5c0c65d36312ef415307fd75b5c38f13c0880a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | A, B = map(int, input().split())
if (A == B):
print('Draw')
elif (A == 1):
print('Alice')
elif (B == 1):
print('Bob')
elif (A > B):
print('Alice')
else:
print('Bob') | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
b2b7dee8ab41cc55ea64d657f49a4921db71a32e | ffbc91f86fcb9331f124dfc3d02caa44c1c93a3a | /06_train/src/tf_bert_reviews.py | d3667fecb8be0301d3d9f8094de0f3fae0060251 | [] | no_license | Alberto2K/workshop | 69c2713712d9d042ca790bde8b919265c5716394 | ac001b604510845d45adbd97cc87a2dc702529b7 | refs/heads/master | 2021-05-25T17:47:17.506596 | 2020-04-07T05:22:02 | 2020-04-07T05:22:02 | 253,852,888 | 1 | 0 | null | 2020-04-07T16:36:34 | 2020-04-07T16:36:33 | null | UTF-8 | Python | false | false | 15,260 | py | import time
import random
import pandas as pd
from glob import glob
import argparse
import json
import subprocess
import sys
import os
import tensorflow as tf
#subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'tensorflow==2.0.0'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'transformers'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'sagemaker-tensorflow==2.0.0.1.1.0'])
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'smdebug==0.7.2'])
from transformers import DistilBertTokenizer
from transformers import TFDistilBertForSequenceClassification
from transformers import TextClassificationPipeline
from transformers.configuration_distilbert import DistilBertConfig
CLASSES = [1, 2, 3, 4, 5]
def select_data_and_label_from_record(record):
x = {
'input_ids': record['input_ids'],
'input_mask': record['input_mask'],
'segment_ids': record['segment_ids']
}
y = record['label_ids']
print(y)
return (x, y)
def file_based_input_dataset_builder(channel,
input_filenames,
pipe_mode,
is_training,
drop_remainder,
batch_size,
epochs,
steps_per_epoch,
max_seq_length):
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if pipe_mode:
print('***** Using pipe_mode with channel {}'.format(channel))
from sagemaker_tensorflow import PipeModeDataset
dataset = PipeModeDataset(channel=channel,
record_format='TFRecord')
else:
print('***** Using input_filenames {}'.format(input_filenames))
dataset = tf.data.TFRecordDataset(input_filenames)
dataset = dataset.repeat(epochs * steps_per_epoch)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
name_to_features = {
"input_ids": tf.io.FixedLenFeature([max_seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([max_seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
"is_real_example": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
return tf.io.parse_single_example(record, name_to_features)
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder,
num_parallel_calls=tf.data.experimental.AUTOTUNE))
dataset.cache()
if is_training:
dataset = dataset.shuffle(seed=42,
buffer_size=1000,
reshuffle_each_iteration=True)
return dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-data',
type=str,
default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--validation-data',
type=str,
default=os.environ['SM_CHANNEL_VALIDATION'])
parser.add_argument('--test-data',
type=str,
default=os.environ['SM_CHANNEL_TEST'])
parser.add_argument('--model-dir',
type=str,
default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--output-data-dir',
type=str,
default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--hosts',
type=list,
default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host',
type=str,
default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--num-gpus',
type=int,
default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--use-xla',
type=bool,
default=False)
parser.add_argument('--use-amp',
type=bool,
default=False)
parser.add_argument('--max-seq-length',
type=int,
default=128)
parser.add_argument('--train-batch-size',
type=int,
default=128)
parser.add_argument('--validation-batch-size',
type=int,
default=256)
parser.add_argument('--test-batch-size',
type=int,
default=256)
parser.add_argument('--epochs',
type=int,
default=2)
parser.add_argument('--train-steps-per-epoch',
type=int,
default=1000)
parser.add_argument('--validation-steps',
type=int,
default=1000)
parser.add_argument('--test-steps',
type=int,
default=1000)
parser.add_argument('--freeze-bert-layer',
type=bool,
default=False)
parser.add_argument('--enable-sagemaker-debugger',
type=bool,
default=False)
args, _ = parser.parse_known_args()
print(args)
train_data = args.train_data
validation_data = args.validation_data
test_data = args.test_data
model_dir = args.model_dir
output_data_dir = args.output_data_dir
hosts = args.hosts
current_host = args.current_host
num_gpus = args.num_gpus
use_xla = args.use_xla
use_amp = args.use_amp
max_seq_length = args.max_seq_length
train_batch_size = args.train_batch_size
validation_batch_size = args.validation_batch_size
test_batch_size = args.test_batch_size
epochs = args.epochs
train_steps_per_epoch = args.train_steps_per_epoch
validation_steps = args.validation_steps
test_steps = args.test_steps
freeze_bert_layer = args.freeze_bert_layer
enable_sagemaker_debugger = args.enable_sagemaker_debugger
# Determine if PipeMode is enabled
pipe_mode_str = os.environ.get('SM_INPUT_DATA_CONFIG', '')
print('pipe_mode_str {}'.format(pipe_mode_str))
pipe_mode = (pipe_mode_str.find('Pipe') >= 0)
print('pipe_mode {}'.format(pipe_mode))
# Model Output
transformer_pretrained_model_path = os.path.join(model_dir, 'transformer/pretrained')
os.makedirs(transformer_pretrained_model_path, exist_ok=True)
# SavedModel Output
tensorflow_saved_model_path = os.path.join(model_dir, 'saved_model/0')
os.makedirs(tensorflow_saved_model_path, exist_ok=True)
# Tensorboard Logs
tensorboard_logs_path = os.path.join(output_data_dir, 'tensorboard')
os.makedirs(tensorboard_logs_path, exist_ok=True)
distributed_strategy = tf.distribute.MirroredStrategy()
with distributed_strategy.scope():
tf.config.optimizer.set_jit(use_xla)
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": use_amp})
train_data_filenames = glob(os.path.join(train_data, '*.tfrecord'))
print('train_data_filenames {}'.format(train_data_filenames))
train_dataset = file_based_input_dataset_builder(
channel='train',
input_filenames=train_data_filenames,
pipe_mode=pipe_mode,
is_training=True,
drop_remainder=False,
batch_size=train_batch_size,
epochs=epochs,
steps_per_epoch=train_steps_per_epoch,
max_seq_length=max_seq_length).map(select_data_and_label_from_record)
validation_data_filenames = glob(os.path.join(validation_data, '*.tfrecord'))
print('validation_data_filenames {}'.format(validation_data_filenames))
validation_dataset = file_based_input_dataset_builder(
channel='validation',
input_filenames=validation_data_filenames,
pipe_mode=pipe_mode,
is_training=False,
drop_remainder=False,
batch_size=validation_batch_size,
epochs=epochs,
steps_per_epoch=validation_steps,
max_seq_length=max_seq_length).map(select_data_and_label_from_record)
test_data_filenames = glob(os.path.join(test_data, '*.tfrecord'))
print('test_data_filenames {}'.format(test_data_filenames))
test_dataset = file_based_input_dataset_builder(
channel='test',
input_filenames=test_data_filenames,
pipe_mode=pipe_mode,
is_training=False,
drop_remainder=False,
batch_size=test_batch_size,
epochs=epochs,
steps_per_epoch=test_steps,
max_seq_length=max_seq_length).map(select_data_and_label_from_record)
tf.config.optimizer.set_jit(use_xla)
tf.config.optimizer.set_experimental_options({'auto_mixed_precision': use_amp})
tokenizer = None
config = None
model = None
# This is required when launching many instances at once... the urllib request seems to get denied periodically
successful_download = False
retries = 0
while (retries < 5 and not successful_download):
try:
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
config = DistilBertConfig.from_pretrained('distilbert-base-uncased',
num_labels=len(CLASSES))
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased',
config=config)
successful_download = True
print('Sucessfully downloaded after {} retries.'.format(retries))
except:
retries = retries + 1
random_sleep = random.randint(1, 30)
print('Retry #{}. Sleeping for {} seconds'.format(retries, random_sleep))
time.sleep(random_sleep)
if not tokenizer or not model or not config:
print('Not properly initialized...')
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08)
if use_amp:
# loss scaling is currently required when using mixed precision
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(optimizer, 'dynamic')
callbacks = []
if enable_sagemaker_debugger:
import smdebug.tensorflow as smd
callback = smd.KerasHook(out_dir=output_data_dir,
export_tensorboard=True,
tensorboard_dir=tensorboard_logs_path,
save_config=smd.SaveConfig(save_interval=100),
# save_all=True,
include_collections=['metrics',
'losses',
'sm_metrics'],
include_workers='all')
callbacks.append(callback)
# Lightweight wrapper on the original optimizer
optimizer = callback.wrap_optimizer(optimizer)
else:
callback = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_logs_path)
callbacks.append(callback)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
train_and_validation_history = model.fit(train_dataset,
shuffle=True,
epochs=epochs,
steps_per_epoch=train_steps_per_epoch,
validation_data=validation_dataset,
validation_steps=validation_steps,
callbacks=callbacks)
print(train_and_validation_history)
print('Trained model {}'.format(model))
# test_history = model.evaluate(test_dataset,
# steps=test_steps,
# callbacks=callbacks)
# print(test_history)
# Save the Model
model.save_pretrained(transformer_pretrained_model_path)
model.save(tensorflow_saved_model_path, save_format='tf')
loaded_model = TFDistilBertForSequenceClassification.from_pretrained(transformer_pretrained_model_path,
id2label={
0: 1,
1: 2,
2: 3,
3: 4,
4: 5
},
label2id={
1: 0,
2: 1,
3: 2,
4: 3,
5: 4
})
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
if num_gpus >= 1:
inference_device = 0 # GPU 0
else:
inference_device = -1 # CPU
print('inference_device {}'.format(inference_device))
inference_pipeline = TextClassificationPipeline(model=loaded_model,
tokenizer=tokenizer,
framework='tf',
device=inference_device)
print("""I loved it! I will recommend this to everyone.""", inference_pipeline("""I loved it! I will recommend this to everyone."""))
print("""It's OK.""", inference_pipeline("""It's OK."""))
print("""Really bad. I hope they don't make this anymore.""", inference_pipeline("""Really bad. I hope they don't make this anymore."""))
| [
"chris@fregly.com"
] | chris@fregly.com |
b1fa11cb1e9e3e99ea81b0ac5ea8466267d71a9a | f4ff27b8a5ab314659925eaf4be83151a1846bb5 | /cachetools_ext/fs.py | 744ff797e789a18e0a734d98e990346eb2afa9bd | [
"MIT"
] | permissive | thanakijwanavit/cachetools_ext | 0e9934ad9d264f3eb157d20973ed1c2751501e57 | 4355393d660c047ef3a286411f17795905476c91 | refs/heads/master | 2023-02-09T04:42:43.904587 | 2021-01-06T01:45:57 | 2021-01-06T01:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,429 | py | import datetime
import os
import pickle
import shutil
from collections.abc import MutableMapping
from pathlib import Path
from typing import Any, Optional, Union
class FSLRUCache(MutableMapping):
"""Filesystem LRU cache with optional TTL"""
def __init__(
self,
maxsize: int,
path: Optional[Union[Path, str]] = None,
ttl: Optional[Union[int, float]] = None,
clear_on_start=False,
):
if not ((path is None) or isinstance(path, (str, Path))):
raise TypeError("path must be str or None")
if not ((ttl is None) or isinstance(ttl, (int, float))):
raise TypeError("ttl must be int, float or None")
if not isinstance(maxsize, int):
raise TypeError("maxsize must be int or None")
# Absolute path to the cache
path = Path(path).absolute() if path else Path(".") / "cache"
# Create the directory if not exists
path.mkdir(parents=True, exist_ok=True)
self.path = path
self.ttl: Optional[int] = ttl
self.maxsize = maxsize
self.clear_on_start = clear_on_start
if clear_on_start:
# Clear the cache
shutil.rmtree(self.path)
path.mkdir(parents=True, exist_ok=True)
# Delete any existing expired entries
self.__delete_expired_entries()
def key_to_path(self, key) -> Path:
return self.path / f"{key}.pkl"
def path_to_key(self, path) -> str:
return path.name.strip(".pkl")
def __getitem__(self, key):
self.__delete_expired_entries()
value_path = self.key_to_path(key)
try:
value = pickle.loads(value_path.read_bytes())
return value
except Exception:
pass
return self.__missing__(key)
def __missing__(self, key):
raise KeyError(key)
def __setitem__(self, key: Any, value: Any) -> None:
self.__delete_expired_entries()
value_size = 1
current_size = len(self)
if value_size > self.maxsize:
raise ValueError("value too large")
while current_size + value_size > self.maxsize:
self.popitem()
current_size = len(self)
value_path = self.key_to_path(key)
value_path.write_bytes(pickle.dumps(value))
def __delitem__(self, key):
value_path = self.key_to_path(key)
try:
value_path.unlink()
except Exception:
pass
def __contains__(self, key) -> bool:
self.__delete_expired_entries()
value_path = self.key_to_path(key)
if value_path.is_file():
return True
return False
def __len__(self):
self.__delete_expired_entries()
return len([x for x in self.path.glob("*")])
def __iter__(self):
self.__delete_expired_entries()
for x in self.path.glob("*"):
yield self.path_to_key(x)
def items(self):
self.__delete_expired_entries()
for key in self.__iter__():
try:
value = self[key]
yield key, value
except KeyError:
continue
def keys(self):
for key in self:
yield key
def values(self):
for _, value in self.items():
yield value
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used."""
file_to_ts = {path: os.stat(path).st_atime_ns for path in self.path.glob("*")}
ordered_file_to_ts = sorted(file_to_ts.items(), key=lambda x: x[1])
for path, ts in ordered_file_to_ts:
try:
key = self.path_to_key(path)
return (key, self.pop(key))
except KeyError:
pass
raise KeyError("Cache is empty")
def __delete_expired_entries(self):
"""Delete entries with an expired ttl"""
if self.ttl is None:
return
now = datetime.datetime.now().timestamp()
for path in self.path.glob("*"):
try:
created_ts = os.stat(path).st_ctime
except FileNotFoundError:
continue
print(now, created_ts)
if now - created_ts > self.ttl:
try:
path.unlink()
except FileNotFoundError:
continue
| [
"github@oliverrice.com"
] | github@oliverrice.com |
10a042b54434cd65b7b1f5ac8d959ac31181fc38 | a3e626f9893982c549d1f8d98237e9601c2ddfef | /importXPZcurve.py | 9ca5f8b16b7eab9faf2fd57411761780b9f48b31 | [] | no_license | richstoner/connectivity-blend | e515ef19e57179cdd30c98aa235f3feb586095c0 | dae27b67e6de58a33354b200b34cf045d37fa035 | refs/heads/master | 2020-04-25T22:20:33.640843 | 2013-09-24T18:22:36 | 2013-09-24T18:22:36 | 8,784,866 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,333 | py | print("Hello")
from mathutils import Vector
import bpy
import glob
import csv
import math
import struct
import os
import urllib.request
w=1
def clearAllCurves():
# gather list of items of interest.
candidate_list = [item.name for item in bpy.data.objects if item.type == "CURVE"]
# select them only.
for object_name in candidate_list:
bpy.data.objects[object_name].select = True
# remove all selected.
bpy.ops.object.delete()
# remove the meshes, they have no users anymore.
for item in bpy.data.meshes:
bpy.data.meshes.remove(item)
print('Cleared curves')
def clearAllMeshes():
# gather list of items of interest.
candidate_list = [item.name for item in bpy.data.objects if item.type == "MESH"]
# select them only.
for object_name in candidate_list:
bpy.data.objects[object_name].select = True
# remove all selected.
bpy.ops.object.delete()
# remove the meshes, they have no users anymore.
for item in bpy.data.meshes:
bpy.data.meshes.remove(item)
print('Cleared meshes')
def makeMaterial(name, diffuse, specular, alpha):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_color = specular
mat.specular_shader = 'COOKTORR'
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
return mat
def setMaterial(ob, mat):
me = ob.data
me.materials.append(mat)
def printSummary():
for object in bpy.data.objects:
# print(object.type)
if object.type == 'CAMERA':
print('Camera location: ' + str(object.location))
if object.type == 'LAMP':
print('Sun location: ' + str(object.location))
def setSun():
rx = 0
ry = 180
rz = 180
pi = 3.14159265
sun = bpy.data.objects['Sun']
sun.location = [math.floor(133/2), math.floor(81/2), 0]
sun.rotation_mode = 'XYZ'
sun.rotation_euler[0] = rx*(pi/180.0)
sun.rotation_euler[1] = ry*(pi/180.0)
sun.rotation_euler[2] = rz*(pi/180.0)
sun.data.distance = 500
sun.data.falloff_type = 'CONSTANT'
def moveSag():
cam = bpy.data.objects['Camera']
cam.location = [math.floor(133/2), math.floor(81/2), -150]
rx = 0
ry = 180
rz = 180
pi = 3.14159265
cam.rotation_mode = 'XYZ'
cam.rotation_euler[0] = rx*(pi/180.0)
cam.rotation_euler[1] = ry*(pi/180.0)
cam.rotation_euler[2] = rz*(pi/180.0)
#cam.data.type = 'PERSP'
cam.data.type = 'ORTHO'
cam.data.ortho_scale = 250
def moveCoronal():
cam = bpy.data.objects['Camera']
cam.location = [-150, math.floor(81/2), math.floor(115/2)]
rx = 0
ry = 90
rz = 180
pi = 3.14159265
cam.rotation_mode = 'XYZ'
cam.rotation_euler[0] = rx*(pi/180.0)
cam.rotation_euler[1] = ry*(pi/180.0)
cam.rotation_euler[2] = rz*(pi/180.0)
#cam.data.type = 'PERSP'
cam.data.type = 'ORTHO'
cam.data.ortho_scale = 250
def moveAxial():
cam = bpy.data.objects['Camera']
cam.location = [math.floor(133/2), -115, math.floor(115/2)]
rx = 90
ry = 0
rz = 0
pi = 3.14159265
cam.rotation_mode = 'XYZ'
cam.rotation_euler[0] = rx*(pi/180.0)
cam.rotation_euler[1] = ry*(pi/180.0)
cam.rotation_euler[2] = rz*(pi/180.0)
#cam.data.type = 'PERSP'
cam.data.type = 'ORTHO'
cam.data.ortho_scale = 250
def addRootGroup(list_of_roots):
group_indexes = range(50,100)
print("there are %d roots" % len(list_of_roots))
#for rootgroup in list_of_roots[group_indexes]:
for group_in in group_indexes:
if group_in > len(list_of_roots):
break
rootgroup = list_of_roots[group_in].split('\\')[1]
urlstring = 'http://localhost:8888/series/%s' % rootgroup
url = urllib.request.urlopen(urlstring)
mybytes = url.read()
colorstring = mybytes.decode("utf8")
url.close()
print(colorstring)
csplit = colorstring.split(',')
r = float(csplit[0])
g = float(csplit[1])
b = float(csplit[2])
mtlname = rootgroup + '.mtl'
red = makeMaterial(mtlname, (r,g,b), (1,1,1), 1)
i = 0
print(rootgroup)
group_list = glob.glob(binary_location + '/' + rootgroup + '*')
for mes in group_list:
if i % 100 == 0: print(i)
vec_list = []
import os
f = open(mes,'rb')
filesize = os.fstat(f.fileno()).st_size
for k in range(0,int(filesize/12)):
vals = struct.unpack('fff', f.read(12))
vec = Vector(vals)
vec_list.append(vec)
def MakePolyLine(objname, curvename, cList):
curvedata = bpy.data.curves.new(name=curvename, type='CURVE')
curvedata.dimensions = '3D'
curvedata.bevel_depth = 0.025
objectdata = bpy.data.objects.new(objname, curvedata)
objectdata.location = (0,0,0) #object origin
bpy.context.scene.objects.link(objectdata)
polyline = curvedata.splines.new('POLY')
polyline.points.add(len(cList)-1)
for num in range(len(cList)):
x, y, z = cList[num]
polyline.points[num].co = (x, y, z, w)
MakePolyLine("%s-%04d" % (rootgroup,i), "%s-%04d" % (rootgroup,i), vec_list)
ob = bpy.data.objects.get("%s-%04d" % (rootgroup,i))
ob.select = True
i+=1
bpy.context.scene.objects.active = bpy.data.objects[("%s-0000" % (rootgroup))]
bpy.ops.object.join()
bpy.data.curves[("%s-0000" % (rootgroup))].bevel_depth = 0.025
setMaterial(bpy.context.active_object, red)
bpy.ops.object.select_all( action='DESELECT' )
add_cube = bpy.ops.mesh.primitive_cube_add
bpy.ops.object.select_all( action='DESELECT' )
binary_location = '/Users/Administrator/connectivity-blend/bindata'
raw_location = '/Users/Administrator/connectivity-blend/rawdata'
list_of_binmesh = glob.glob(binary_location + '/*')
list_of_roots = []
for mes in list_of_binmesh:
ms = mes.split('/')[-1].split('.')[0].split('-')[0]
if ms not in list_of_roots:
list_of_roots.append(ms)
print(list_of_roots[0])
#clearAllMeshes()
#clearAllCurves()
addRootGroup(list_of_roots)
layerList = [False]*20
layerList[0] = True
import math
shouldAddCube = 0
if shouldAddCube:
add_cube(location=(0, 0, 0,), layers=layerList)
ob = bpy.data.objects['Cube']
print(ob.location)
space = [133, 81, 115]
ob.scale = [133/2, 81/2, 115/2]
ob.location = [math.floor(133/2), math.floor(81/2), math.floor(115/2)]
printSummary()
#moveCoronal()
moveSag()
#moveAxial()
setSun()
| [
"stonerri@gmail.com"
] | stonerri@gmail.com |
59afa20535e5200ed4863696e830c84019f868a0 | 67f86bb3d09cbc86cac698b3f0abaf01457a966a | /master/nameko-master/nameko-master/test/standalone/test_event_dispatcher.py | 52657eb8d8a0c8264c82b40e327629c03d339992 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | tied/DevArtifacts | efba1ccea5f0d832d4227c9fe1a040cb93b9ad4f | 931aabb8cbf27656151c54856eb2ea7d1153203a | refs/heads/master | 2020-06-06T01:48:32.149972 | 2018-12-08T15:26:16 | 2018-12-08T15:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | import pytest
from amqp.exceptions import NotFound
from mock import Mock, patch
from six.moves import queue
from nameko.amqp import UndeliverableMessage
from nameko.events import event_handler
from nameko.standalone.events import event_dispatcher, get_event_exchange
from nameko.testing.services import entrypoint_waiter
handler_called = Mock()
class Service(object):
name = 'destservice'
@event_handler('srcservice', 'testevent')
def handler(self, msg):
handler_called(msg)
def test_dispatch(container_factory, rabbit_config):
config = rabbit_config
container = container_factory(Service, config)
container.start()
msg = "msg"
dispatch = event_dispatcher(config)
with entrypoint_waiter(container, 'handler', timeout=1):
dispatch('srcservice', 'testevent', msg)
handler_called.assert_called_once_with(msg)
class TestMandatoryDelivery(object):
""" Test and demonstrate mandatory delivery.
Dispatching an event should raise an exception when mandatory delivery
is requested and there is no destination queue, as long as publish-confirms
are enabled.
"""
@pytest.fixture(autouse=True)
def event_exchange(self, container_factory, rabbit_config):
# use a service-based dispatcher to declare an event exchange
container = container_factory(Service, rabbit_config)
container.start()
def test_default(self, rabbit_config):
# events are not mandatory by default;
# no error when routing to a non-existent handler
dispatch = event_dispatcher(rabbit_config)
dispatch("srcservice", "bogus", "payload")
def test_mandatory_delivery(self, rabbit_config):
# requesting mandatory delivery will result in an exception
# if there is no bound queue to receive the message
dispatch = event_dispatcher(rabbit_config, mandatory=True)
with pytest.raises(UndeliverableMessage):
dispatch("srcservice", "bogus", "payload")
def test_mandatory_delivery_no_exchange(self, rabbit_config):
# requesting mandatory delivery will result in an exception
# if the exchange does not exist
dispatch = event_dispatcher(rabbit_config, mandatory=True)
with pytest.raises(NotFound):
dispatch("bogus", "bogus", "payload")
@patch('nameko.amqp.publish.warnings')
def test_confirms_disabled(self, warnings, rabbit_config):
# no exception will be raised if confirms are disabled,
# even when mandatory delivery is requested,
# but there will be a warning raised
dispatch = event_dispatcher(
rabbit_config, mandatory=True, use_confirms=False
)
dispatch("srcservice", "bogus", "payload")
assert warnings.warn.called
class TestConfigurability(object):
"""
Test and demonstrate configuration options for the standalone dispatcher
"""
@pytest.yield_fixture
def get_producer(self):
with patch('nameko.amqp.publish.get_producer') as get_producer:
yield get_producer
@pytest.fixture
def producer(self, get_producer):
producer = get_producer().__enter__.return_value
# make sure we don't raise UndeliverableMessage if mandatory is True
producer.channel.returned_messages.get_nowait.side_effect = queue.Empty
return producer
@pytest.mark.parametrize("parameter", [
# delivery options
'delivery_mode', 'mandatory', 'priority', 'expiration',
# message options
'serializer', 'compression',
# retry policy
'retry', 'retry_policy',
# other arbitrary publish kwargs
'correlation_id', 'user_id', 'bogus_param'
])
def test_regular_parameters(
self, parameter, mock_container, producer
):
""" Verify that most parameters can be specified at instantiation time.
"""
config = {'AMQP_URI': 'memory://localhost'}
value = Mock()
dispatch = event_dispatcher(config, **{parameter: value})
dispatch("service-name", "event-type", "event-data")
assert producer.publish.call_args[1][parameter] == value
def test_restricted_parameters(
self, mock_container, producer
):
""" Verify that providing routing parameters at instantiation
time has no effect.
"""
config = {'AMQP_URI': 'memory://localhost'}
exchange = Mock()
routing_key = Mock()
dispatch = event_dispatcher(
config, exchange=exchange, routing_key=routing_key
)
service_name = "service-name"
event_exchange = get_event_exchange(service_name)
event_type = "event-type"
dispatch(service_name, event_type, "event-data")
assert producer.publish.call_args[1]['exchange'] == event_exchange
assert producer.publish.call_args[1]['routing_key'] == event_type
| [
"alexander.rogalsky@yandex.ru"
] | alexander.rogalsky@yandex.ru |
949312734036c00781d898b942bfafcd063a4d23 | a3fba5e8ecc502ff262b737d05f5b719e1cd4148 | /SlackWorkflows.py | 87db380e13a643f85a2702bae0537fe51abd266a | [] | no_license | cthacker-udel/Python-Slack-API | 6eccfbd97d564c4d8d4325fba22fab4db721a146 | 1bee3d77c4bf3179a348e83d760284bab3c13d24 | refs/heads/master | 2023-06-16T15:23:46.224602 | 2021-07-15T06:13:51 | 2021-07-15T06:13:51 | 378,105,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | from SlackClient import SlackClient
class SlackWorkflows(SlackClient):
def __init__(self):
self.workflow_step_execute_id = None
self.outputs = None
self.inputs = None
self.step_image_url = None
self.step_name = None
def generate_queries(self):
body = {}
if self.workflow_step_execute_id != None:
body['workflow_step_execute_id'] = self.workflow_step_execute_id
if self.outputs != None:
body['outputs'] = self.outputs
if self.inputs != None:
body['inputs'] = self.inputs
if self.step_image_url != None:
body['step_image_url'] = self.step_image_url
if self.step_name != None:
body['step_name'] = self.step_name
return body
def clear_queries(self):
self.workflow_step_execute_id = None
self.outputs = None
self.inputs = None
self.step_image_url = None
self.step_name = None | [
"cthacker@udel.edu"
] | cthacker@udel.edu |
eb8e8ca7dac3dc3988a7a6219e8a6e4e15e4a9af | 0cba5529e387ba0f077b4e8ddeb96f914004f5df | /setup-gpu.py | 3bc6c47fe699a7d4a30bdddecd315bf0b0a3e42e | [
"MIT"
] | permissive | AsyrafAzlan/Malaya | dc78398ee6880578f40c5646a48882a5913217ae | 3d5166173cf74881f7a56fffaaf391813c55d4f1 | refs/heads/master | 2021-05-21T22:47:41.863857 | 2020-04-03T15:00:21 | 2020-04-03T15:00:21 | 252,841,526 | 1 | 0 | MIT | 2020-04-03T21:04:44 | 2020-04-03T21:04:44 | null | UTF-8 | Python | false | false | 1,367 | py | import setuptools
__packagename__ = 'malaya-gpu'
setuptools.setup(
name = __packagename__,
packages = setuptools.find_packages(),
version = '3.4',
python_requires = '>=3.6.*',
description = 'Natural-Language-Toolkit for bahasa Malaysia, powered by Deep Learning Tensorflow. GPU Version',
author = 'huseinzol05',
author_email = 'husein.zol05@gmail.com',
url = 'https://github.com/huseinzol05/Malaya',
download_url = 'https://github.com/huseinzol05/Malaya/archive/master.zip',
keywords = ['nlp', 'bm'],
install_requires = [
'dateparser',
'sklearn',
'scikit-learn',
'requests',
'unidecode',
'tensorflow-gpu>=1.15.2',
'numpy',
'scipy',
'PySastrawi',
'ftfy',
'networkx',
'sentencepiece',
'bert-tensorflow',
'tqdm',
'herpetologist',
'youtokentome',
],
license = 'MIT',
classifiers = [
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Text Processing',
],
package_data = {
'malaya': [
'_utils/web/*.html',
'_utils/web/static/*.js',
'_utils/web/static/*.css',
]
},
)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
92eaf915624d637170e3361fa06c58d5ed27e110 | 0c8a267966edd260177106beb04daad8622ba07f | /enron_dataset_project/finance_regression.py | 9419f85914fa454b4f6b9321f0eee48201439ef3 | [] | no_license | BrianSipple/Machine_Learning | 9665cec5ba9cc94e0dc06db346ddf18cff19d6a6 | f0848183dba64000ff26a32ec45e97531d2bc758 | refs/heads/master | 2021-01-19T00:44:21.123062 | 2015-02-09T04:17:40 | 2015-02-09T04:17:40 | 29,520,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,377 | py | #!/usr/bin/python
"""
Loads up/formats a modified version of the dataset
(modified, as in "outlier-free" for this particular exercise)
Dcatterplot of the training/testing data
"""
import numpy as np
import math
import sys
import pickle
from sklearn.cross_validation import train_test_split
sys.path.append("../tools/")
DICTIONARY = pickle.load(open('data/enron_dataset.pkl', 'r'))
DICTIONARY.pop('TOTAL', 0)
def percentile(N, percent, key=lambda x:x):
"""
Find the percentile of a list of values.
@parameter N - a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1
def create_regression_for_feature_outcome_pair(feature, outcome):
from feature_format import featureFormat, labelFeatureSplit
#DICTIONARY = pickle.load( open("data/enron_dataset.pkl", "r") )
### list the features you want to look at--first item in the
### list will be the "label" feature
#data = featureFormat( DICTIONARY, [outcome, feature], remove_any_zeroes=True)#, "long_term_incentive"], remove_any_zeroes=True )
data = featureFormat(DICTIONARY, [outcome, feature])
label, features = labelFeatureSplit( data )
### training-testing split needed in regression, just like classification
feature_train, feature_test, label_train, label_test = train_test_split(features, label, test_size=0.5, random_state=42)
return feature_train, feature_test, label_train, label_test
def classify(feature_train, label_train):
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(feature_train, label_train)
return reg
def make_plot(feature_train, label_train, feature_test, label_test, classifier, x_label, y_label):
"""
draw the scatterplot, with color-coded training and testing points
"""
train_color = "#00fffd"
test_color = "#6600ff"
import matplotlib.pyplot as plt
for feature, label in zip(feature_test, label_test):
plt.scatter( feature, label, color=test_color )
for feature, label in zip(feature_train, label_train):
plt.scatter( feature, label, color=train_color )
### labels for the legend
plt.scatter(feature_test[0], label_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], label_test[0], color=train_color, label="train")
try:
plt.plot( feature_test, classifier.predict(feature_test) )
except NameError:
pass
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend()
plt.show()
def find_largest_outliers_names(outliers, errors, feature_attr_name, label_attr_name):
"""
As we're computing and cleaning outliers,
we can still gain valuable insights by processing
those values in various ways
"""
outlier_names = []
feature_outliers, label_outliers = zip(*outliers)[0:2]
# Create a list of the outliers' errors and the values that caused them...
# sorted from largest to smallest
outlier_label_error_pairs = [(outlier_label, outlier_error) for outlier_label, outlier_error in zip(label_outliers, errors)]
outlier_label_error_pairs.sort(key=lambda tup: -1 * tup[1])
for i in range(len(outlier_label_error_pairs)):
label_val_responsible_for_error = outlier_label_error_pairs[i][0]
for person_name in DICTIONARY:
if DICTIONARY[person_name].get(label_attr_name) == label_val_responsible_for_error:
outlier_names.append(person_name)
#print outlier_names
return outlier_names
def find_largest_outlier_entry(outliers, errors, feature_attr_name, label_attr_name):
"""
Finds the entry corresponding to the outlier with the highest-valued
label.
"""
feature_outliers, label_outliers = zip(*outliers)[0:2]
largest_outlier_label_pos = np.where(errors==max(errors))[0][0]
larget_outlier_val = label_outliers[largest_outlier_label_pos]
for i in range(len(label_outliers)):
for person_name in DICTIONARY:
if DICTIONARY[person_name].get(label_attr_name) == larget_outlier_val:
return { person_name: DICTIONARY[person_name] }
def clean_outliers(predictions, feature_values, label_values, feature_attr_name, label_attr_name):
x_y_pairs = zip(feature_values, label_values)
pred_outcome_pairs = zip(predictions, label_values)
errors = abs(predictions - label_values)
cleaned_data = zip(feature_values, label_values, errors)
###sort the uncleaned data by error
cleaned_data.sort(key=lambda tup: tup[2])
errors.sort()
## Remove values with top 10% of errors
cutoff = int(math.floor(len(cleaned_data) * .90))
outliers = cleaned_data[cutoff:]
outlier_errors = errors[cutoff:]
outlier_names = find_largest_outliers_names(
outliers,
outlier_errors,
feature_attr_name,
label_attr_name
)
largest_outlier_entry = find_largest_outlier_entry(
outliers,
outlier_errors,
feature_attr_name,
label_attr_name
)
#print largest_outlier_entry
cleaned_data = cleaned_data[:cutoff]
#print len(feature_values)
#print len(cleaned_data)
#print (cleaned_data)
return cleaned_data
if __name__ == "__main__":
################
### Set the feature along with the outcome that it will predict
#feature = "long_term_incentive"
feature = "salary"
outcome = "bonus"
################
feature_train, feature_test, label_train, label_test = create_regression_for_feature_outcome_pair(feature, outcome)
reg = classify(feature_train, label_train)
slope = reg.coef_[0]
intercept = reg.intercept_
# X_train = np.reshape(np.array(feature_train), (len(feature_train), 1))
# Y_train = np.reshape(np.array(label_train), (len(label_train), 1))
#
# X_test = np.reshape(np.array(feature_test), (len(feature_test), 1))
# Y_test = np.reshape(np.array(label_test), (len(label_test), 1))
train_score = reg.score(feature_train, label_train)
test_score = reg.score(feature_test, label_test)
print "Slope: {}".format(slope)
print "Intercept: {}".format(intercept)
#print "Mean Squared Error: {}".format(mse)
print "Prediction Score on training data: {}".format(train_score)
print "Prediction Score on testing data: {}".format(test_score)
#make_plot(feature_train, label_train, feature_test, label_test, reg, feature, outcome)
### Now, to attempt to account for outliters,
### we can remove the training items with the top 10% of residual error,
### and retrain.
cleaned_data = clean_outliers(
reg.predict(feature_train),
feature_train,
label_train,
feature_attr_name=feature,
label_attr_name=outcome
)
if len(cleaned_data) >= 0:
new_feature_data, new_label_data, errors = zip(*cleaned_data)
new_feature_data = np.reshape(np.array(new_feature_data), (len(new_feature_data), 1))
new_label_data = np.reshape(np.array(new_label_data), (len(new_label_data), 1))
new_feature_train, new_feature_test, new_label_train, new_label_test = train_test_split(
new_feature_data,
new_label_data,
test_size=0.5,
random_state=42
)
reg = classify(new_feature_train, new_label_train)
slope = reg.coef_[0]
intercept = reg.intercept_
train_score = reg.score(new_feature_train, new_label_train)
test_score = reg.score(new_feature_test, new_label_test)
print "Slope, after cleaning outliers: {}".format(slope)
print "Intercept, after cleaning outliers: {}".format(intercept)
#print "Mean Squared Error: {}".format(mse)
print "Prediction Score on training data, after cleaning outliers: {}".format(train_score)
print "Prediction Score on testing data, after cleaning outliers: {}".format(test_score)
#make_plot(new_feature_train, new_label_train, new_feature_test, new_label_test, reg, feature, outcome)
| [
"Bsipple57@gmail.com"
] | Bsipple57@gmail.com |
e8d1c7aff054a58b187c129b972d475533373d3d | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/if9.py | eee16a6339bdc8ad546573f44e5ee7a5852583ba | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'iF9':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
a34fa47c41a776883e5322039c8a4ea490ae734f | f2604a924b5cc6638dba6e246a6aea38d335f3b1 | /gym_splendor_code/envs/mechanics/splendor_observation_space.py | fab06fe86b21933bdca0b151313882be48691a6b | [
"MIT"
] | permissive | StanczakDominik/gym-splendor | 379507c066dc8756f5514c3760000bed6bf28020 | b7d6b0356d96ad1c528371f52412b81687a0ecc6 | refs/heads/master | 2020-08-28T00:38:06.076814 | 2019-10-25T13:09:17 | 2019-10-25T13:09:17 | 217,536,093 | 0 | 0 | MIT | 2019-10-25T13:09:40 | 2019-10-25T13:09:39 | null | UTF-8 | Python | false | false | 2,426 | py | from gym.spaces import Space
class SplendorObservationSpace(Space):
"""This class contains all information we want to share with the agents playing Splendor. The difference between
SplendorObservationSpace and State is that State contains all information about the state of game (including list
of cards that are not yet revealed and class SplendorObservationSpace contains only some part of it that is
accessible by the player. By modifying this class we can change what agent knows about the state of the game."""
def __init__(self):
super().__init__()
def state_to_observation(self, state):
cards_on_board_names = {card.name for card in state.board.cards_on_board}
gems_on_board = state.board.gems_on_board.__copy__()
active_player_id = state.active_player_id
players_hands = [{'cards_possessed_names': {card.name for card in players_hand.cards_possessed},
'cards_reserved_names' : {card.name for card in players_hand.cards_reserved},
'gems_possessed_names' : players_hand.gems_possessed.__copy__()} for players_hand in state.list_of_players_hands]
return {'cards_on_board_names' : cards_on_board_names, 'gems_on_board' : gems_on_board,
'active_player_id' : active_player_id, 'players_hands' : players_hands}
def __repr__(self):
return 'Observation space in Splendor. It contains all information accessible to one player (so for example in \n' \
'a default setting in does not contain the list of hidden cards. One observation has the following structure: \n' \
'It is a dictionary with keys: \n' \
'1) cards_on_board_names - a set of names of card lying on the board \n' \
'2) gems_on_board - a collection of gems on board \n ' \
'3) active_player_id - a number that indicates which player is active in the current state \n' \
'4) players_hands - a list of dictionaries refering to consective players hands. Each dictionary in this \n' \
'list contains the following keys:' \
'a) cards_possessed_names - set of names of cards possesed by the players hand \n'\
'b) cards_reserved_names - set of names of cards reserved by the players hand \n' \
'c) gems_possessed - collection of gems possessed by the players hand' | [
"tomeko314@gmail.com"
] | tomeko314@gmail.com |
35a57323cabffcc1db0210600ddf6b6b8f538a2c | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba2727.pngMap.py | 54977ad5cb30d2621c02168724fee4cc2592bd28 | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba2727.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000001011100010000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000001001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000011111111111000000000000000000000000000000000000000000000000001111110000000000000000000000000000',
'00000000000000000000000000000000111111111111000000000000000000000000000000000000000000000000001111100000000000000000000000000000',
'00000000000000000000000000000001111111111111000000000000000000000000000000000000000001111011111111111100000000000000000000000000',
'00000000000000000000000000000011111111111111000000000000000000000000000000000000000000111111111111111000000000000000000000000000',
'00000000000000000000000000000000111111111110000000000000000000000000000000000000000000000111111111110000000000000000000000000000',
'00000000000000000000000000000000111111111110000000000000000000000000000000000000000000001111111111110000000000000000000000000000',
'00000000000000000000000000000000001111111111000000000000000000000000000000000000000000001111111111100000000000000000000000000000',
'00000000000000000000000000000000001111111110000000000000000000000000000000000000000000001111111111100000000000000000000000000000',
'00000000000000000000000000000000011111111111000000000000000000000000000000000000000000001111111111110000000000000000000000000000',
'00000000000000000000000000000000111111111111000000000000000000000000000000000000000000001111111000011000000000000000000000000000',
'00000000000000000000000000000001111111111111000000000000000000000000000000000000000000000111111100000000000000000000000000000000',
'00000000000000000000000000000011111111111110000000000000000000000000000000000000000000001111111000000000000000000000000000000000',
'00000000000000000000000000000111111111111100000000000000000000000000000000000000000000111111111000000000000000000000000000000000',
'00000000000000000000000000000011111111111100000000000000000000000000000000000000000000111111111000000000000000000000000000000000',
'00000000000000000000000000000011111111111100000000000000000000000000000000000000000100111111111100000000000000000000000000000000',
'00000000000000000000000000000111111111111100000000000000000000000000000000000000000000111111111100000000000000000000000000000000',
'00000000000000000000000000000111111111111111000000000000000000000000000000000000001010111111110011000000000000000000000000000000',
'00000000000000000000000000000011111111111111000000000000000000000000000000000000001001111111110011000000000000000000000000000000',
'00000000000000000000000000001111111111111111100000000000000000000000000000000000001100111111100011000000000000000000000000000000',
'00000000000000000000000000001111111111111111000000000000000000000000000000000000001111111111100011100000000000000000000000000000',
'00000000000000000000000000001111111111111111000000000000000000000000000000000000001111111111111111000000000000000000000000000000',
'00000000000000000000000000001111111111111111010000000000000000000000000000000000001111111111111111000000000000000000000000000000',
'00000000000000000000000000001111111111111111110000000000000000000000000000000000000011111111111100000000000000000000000000000000',
'00000000000000000000000000001111111111111111110000000000000000000000000000000000000011111111111100000000000000000000000000000000',
'00000000000000000000000000011111111111111111100000000000000000000000000000000000000001111111110000000000000000000000000000000000',
'00000000000000000000000000001111111111111111111000000000000000000000000000000000000011111111111000000000000000000000000000000000',
'00000000000000000000000000001111111111111111110000000000000000000000000000000000000011111111111000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111000000000000000000000000000000000000011111111111000000000000000000000000000000000',
'00000000000000000000000000111111111111111111110000000000000000000000000000000000000011111111111100000000000000000000000000000000',
'00000000000000000000000000111111111111111111110000000000000000000000000000000000000011111111111100000000000000000000000000000000',
'00000000000000000000000000111111111111111111111000000000000000000000000000000000000011111111111100000000000000000000000000000000',
'00000000000000000000000000111111111111111111111000000000000000000000000000000000001011111111111100000000000000000000000000000000',
'00000000000000000000000000111111111111111111111000000000000000000000000000000000000011111111111110000000000000000000000000000000',
'00000000000000000000000000111111111111111111111000000000000000000000000000000000000001111111111110000000000000000000000000000000',
'00000000000000000000000000111111111111111111111000000000000000000000000000000000000000110000111100000000000000000000000000000000',
'00000000000000000000000000111111111111111111111000000000000000000000000000000000000000110000110000000000000000000000000000000000',
'00000000000000000000000000111111111111111111111110000000000000000000000000000000000000110000110000000000000000000000000000000000',
'00000000000000000000000011111111111111111111111110000000000000000000000000000000000001110001110000000000000000000000000000000000',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
ac4dad4eb9fe62c08acc70ad3022a95b11ea530d | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /aptamers_mlpd/simulation/utils.py | addb9740cbb0a633a07ab031f48e0ace1eddf5c8 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 3,197 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for aptamer simulations.
"""
import numpy
# numpy.random.RandomState uses uint32 seeds
RANDOM_SEED_MAX = 2 ** 32
def random_seed_stream(random_seed=None):
"""Yield an infinite stream of numbers for seeding random number generators.
This method is not proven to be cryptographically secure, and only explores a
small portion of the state space for NumPy's random number generator. Still,
it's a useful shortcut for writing decoupled functions that rely on random
state. See this thread for extensive discussion of its merits and the
alternatives:
https://mail.scipy.org/pipermail/numpy-discussion/2016-May/075487.html
Example:
>>> seed_gen = random_seed_stream(42)
>>> next(seed_gen)
1608637542
Args:
random_seed: optional integer used to seed this stream of random seeds.
Yields:
Integer seeds suitable for use in numpy.random.RandomState. Each seed is
independent and psuedo-randomly generated from the `random_seed` argument.
"""
rs = numpy.random.RandomState(random_seed)
seed = rs.randint(RANDOM_SEED_MAX)
while True:
yield seed
# Incrementing is better than generating new seeds with a call to randint,
# because with random seeds collisions are likely after only around 2 ** 16
# samples due to the birthday paradox.
seed = (seed + 1) % RANDOM_SEED_MAX
def target_occupancy(target_affinity,
serum_affinity,
target_concentration,
serum_concentration):
"""Calculate target site occupancy in the presence of serum.
Assumes that the amount of target and serum are very large (compared to the
amount of aptamers), such that their concentration can be treated as fixed.
TODO(mdimon): Validate this assumption.
All argument should be provided with the same units.
Args:
target_affinity: number or ndarray-like giving affinity for the target site.
serum_affinity: number or ndarray-like giving serum affinity.
target_concentration: number or ndarray-like giving target concentration.
serum_concentration: number or ndarray-like giving serum concentration.
Returns:
Number or ndarray-like giving the fraction of bound target sites.
"""
# see Equation (7) from:
# https://en.wikipedia.org/wiki/Competitive_inhibition#Derivation
numerator = serum_affinity * target_concentration
denominator = (target_affinity * serum_affinity
+ serum_affinity * target_concentration
+ target_affinity * serum_concentration)
return numerator / denominator
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
c98ff5d1a2f8fc0e63b88fcd46ef26943ecb54f6 | 419e095e38b0c9cf9591d0333a89de1781bf8edf | /CodeUp/6051 : [기초-비교연산] 정수 2개 입력받아 비교하기4(설명).py | 7ab36ee4b3c703e907114c32a9e3a02ec6f9214e | [] | no_license | mjxong/CodingTest | bc6b7f9e48b9ccb9ae16b6f12d611a805e978b28 | 5fc9e5f1dec6214a16658f0ff69d2d6b807dc489 | refs/heads/master | 2023-03-28T23:16:29.270628 | 2021-04-04T11:20:50 | 2021-04-04T11:20:50 | 352,362,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | a, b = map(int, input().split())
print(a!=b) | [
"replituser@example.com"
] | replituser@example.com |
a7d3deb5e20d6442018c8bb519bec6820cd71cf0 | e3eead40e93fdf5186269536edefab4f08e9a5a2 | /LeetCode/393-utf8_validation.py | 9d69670d9f142e05f0ee5a6645188ff7b8527595 | [] | no_license | davll/practical-algorithms | bbc930b42363cae00ce39e8a686854c19131d334 | 0e35e4cc87bd41144b8e34302aafe776fec1b356 | refs/heads/master | 2021-08-22T13:12:34.555074 | 2020-03-28T08:56:13 | 2020-03-28T08:56:13 | 147,224,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | def validate_utf8(data):
n = len(data)
i = 0
while i < n:
nb = _nsucc_bits(data[i])
if nb < 0:
return False
i += 1
for _ in range(nb):
if i < n:
if not _follow_check(data[i]):
return False
i += 1
else:
return False
return True
_HEADER_MASK = [
int('10000000', base=2),
int('11100000', base=2),
int('11110000', base=2),
int('11111000', base=2)
]
_HEADER_VALUE = list(map(lambda x: (x << 1) & 0xFF, _HEADER_MASK))
_FOLLOW_MASK = int('11000000', base=2)
_FOLLOW_VALUE = int('10000000', base=2)
#print(', '.join(map(bin, _HEADER_MASK)))
#print(', '.join(map(bin, _HEADER_VALUE)))
def _nsucc_bits(x):
for i, (m, v) in enumerate(zip(_HEADER_MASK, _HEADER_VALUE)):
if (x & m) == v:
return i
return -1
def _follow_check(x):
return (x & _FOLLOW_MASK) == _FOLLOW_VALUE
class Solution:
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
return validate_utf8(data)
| [
"davll.xc@gmail.com"
] | davll.xc@gmail.com |
e88aae5ad5a2fd54f63aeec3e9e2ec2f17efeae8 | de33d709be6667a1972322fcd514edca80cfa6a0 | /snipps/check_mode.py | b20227c7ee97409be989525f4c0593c4f51b97ad | [
"MIT"
] | permissive | akshaynagpal/number_recognition | 8ecbc6affc970a9e9ffeb70cc290db9a4ed43489 | 363606205ccfe4a43320c2452c0ae0dd4e026ec2 | refs/heads/master | 2020-05-09T16:19:08.562084 | 2015-10-03T10:01:14 | 2015-10-03T10:01:14 | 30,975,567 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | import PIL
from PIL import Image
image_name = raw_input("enter name of image to open!")
imgfile=Image.open(image_name)
print imgfile.mode
raw_input()
| [
"akshay2626@gmail.com"
] | akshay2626@gmail.com |
819d61a9de591bf744f7ebad7563c9fd8559d4dc | e8dba002d8916a468e559a52f254c0d92532d6b2 | /homeassistant/components/airnow/config_flow.py | 67bce66e1673ab24e42ba7ed70d4ad657b614040 | [
"Apache-2.0"
] | permissive | thomasgermain/home-assistant | 32b0f4d888220f4ce49dc85e506d0db39445c6c0 | 9673b93842ddcecc7e6a6d65e6d4f5b8a1089c43 | refs/heads/vaillant | 2023-08-21T23:50:24.679456 | 2020-05-20T21:01:18 | 2023-08-03T07:11:35 | 197,781,893 | 8 | 4 | Apache-2.0 | 2023-02-10T06:56:47 | 2019-07-19T13:57:53 | Python | UTF-8 | Python | false | false | 3,692 | py | """Config flow for AirNow integration."""
import logging
from pyairnow import WebServiceAPI
from pyairnow.errors import AirNowError, EmptyResponseError, InvalidKeyError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
client = WebServiceAPI(data[CONF_API_KEY], session=session)
lat = data[CONF_LATITUDE]
lng = data[CONF_LONGITUDE]
distance = data[CONF_RADIUS]
# Check that the provided latitude/longitude provide a response
try:
test_data = await client.observations.latLong(lat, lng, distance=distance)
except InvalidKeyError as exc:
raise InvalidAuth from exc
except AirNowError as exc:
raise CannotConnect from exc
except EmptyResponseError as exc:
raise InvalidLocation from exc
if not test_data:
raise InvalidLocation
# Validation Succeeded
return True
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for AirNow."""
VERSION = 1
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
# Set a unique id based on latitude/longitude
await self.async_set_unique_id(
f"{user_input[CONF_LATITUDE]}-{user_input[CONF_LONGITUDE]}"
)
self._abort_if_unique_id_configured()
try:
# Validate inputs
await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except InvalidLocation:
errors["base"] = "invalid_location"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
# Create Entry
return self.async_create_entry(
title=(
f"AirNow Sensor at {user_input[CONF_LATITUDE]},"
f" {user_input[CONF_LONGITUDE]}"
),
data=user_input,
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Optional(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Optional(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Optional(CONF_RADIUS, default=150): int,
}
),
errors=errors,
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
class InvalidLocation(exceptions.HomeAssistantError):
"""Error to indicate the location is invalid."""
| [
"noreply@github.com"
] | thomasgermain.noreply@github.com |
c6526829039453f2cd279cccdd18b6d5e6844b8a | 1ebe5a07e7f6260c2c2ceb6ca00dcf2a0341e544 | /op_impl/built-in/ai_core/tbe/impl/dynamic/slice.py | d61346ea6f4566518203eebc550f23fe6dc7f588 | [] | no_license | gekowa/ascend-opp | f5e09905336d85f9974d555d03d37a75cb8185c1 | 5c28a2faf9d2a117ea6f0923efe35fcd53904dd2 | refs/heads/master | 2023-04-09T12:14:40.337104 | 2021-04-19T23:00:59 | 2021-04-19T23:00:59 | 359,620,865 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use
this file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
strided slice
"""
from __future__ import absolute_import
import te.lang.dynamic
from topi.cce import util
from impl import common_util
from te.utils.op_utils import *
from .strided_slice import StridedSlice
# pylint: disable=locally-disabled,too-many-arguments,
# pylint: unused-argument,too-many-locals
@te.op.register_operator("Slice")
@check_op_params(REQUIRED_INPUT, REQUIRED_INPUT,
REQUIRED_INPUT, REQUIRED_OUTPUT, KERNEL_NAME)
def slice(x, offsets, size, y, kernel_name="slice"):
"""
algorithm: slice
calculating: this operation extracts a slice of size size
from a tensor input
starting at the location specified by begin.
Parameters
----------
x: dict
contains shape and dtype information of input tensor
y: dict
contains shape and dtype information of output tensor
offsets: dict
represents the index of the first value to select
size: dict
represents the shape of output tensor
kernel_name: str
cce kernel name, default value is "slice".
Returns
-------
tik instance
"""
# dynamic slice does not use offsets, end params.
strided_slice_instance = StridedSlice(x, None, 0, 0, 0, 0, 0, kernel_name)
strided_slice_instance.strided_slice()
inst = strided_slice_instance.tik_instance
opt_config = {"out_of_bound_sync_check": True}
inst.BuildCCE(kernel_name=strided_slice_instance.kernel_name,
inputs=(strided_slice_instance.input_gm,
strided_slice_instance.begin_gm,
strided_slice_instance.end_gm),
outputs=(strided_slice_instance.output_gm,),
flowtable=[strided_slice_instance.tiling_param.tiling_gm],
config=opt_config,
enable_l2=False)
te.op.add_compile_info("vars", {"block_dim": strided_slice_instance.aicore_num})
return inst
| [
"gekowa@gmail.com"
] | gekowa@gmail.com |
51e2085c83cb053c92ea24c9e86320bb8b126d03 | 8e3a3c845ca3320483b233e8a0db4081aa3b8664 | /clases/migrations/0005_auto_20160623_0039.py | bee89b25480c363feacc339deffa53fc94c63a41 | [] | no_license | sofide/loiprocesos | 7d56398395e6f3302f4d9ec3627ed1b4c24bc17a | 4047fa02d0cfbcf744c80d59e3402215f8b294d3 | refs/heads/master | 2021-07-08T03:26:55.171459 | 2020-08-04T03:23:10 | 2020-08-04T03:23:10 | 61,167,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-23 03:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grupos', '0001_initial'),
('clases', '0004_auto_20160623_0015'),
]
operations = [
migrations.AddField(
model_name='exposicion',
name='grupo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grupos.Grupo'),
),
migrations.AddField(
model_name='pregunta',
name='grupo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grupos.Grupo'),
),
]
| [
"sofi.denner@gmail.com"
] | sofi.denner@gmail.com |
c99be8ab1fbd55dd291d94871960eb9885eac72f | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/communication/azure-communication-email/samples/send_email_to_multiple_recipients_sample.py | 4009aaf0f805e47a8f8b3d1e2f8df2a6da58972b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 3,050 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: send_email_to_multiple_recipient_sample.py
DESCRIPTION:
This sample demonstrates sending an email to multiple recipients. The Email client is
authenticated using a connection string.
USAGE:
python send_email_to_single_recipient_sample.py
Set the environment variable with your own value before running the sample:
1) COMMUNICATION_CONNECTION_STRING - the connection string in your ACS resource
2) SENDER_ADDRESS - the address found in the linked domain that will send the email
3) RECIPIENT_ADDRESS - the address that will receive the email
4) SECOND_RECIPIENT_ADDRESS - the second address that will receive the email
"""
import os
import sys
from azure.core.exceptions import HttpResponseError
from azure.communication.email import EmailClient
sys.path.append("..")
class EmailMultipleRecipientSample(object):
connection_string = os.getenv("COMMUNICATION_CONNECTION_STRING_EMAIL")
sender_address = os.getenv("SENDER_ADDRESS")
recipient_address = os.getenv("RECIPIENT_ADDRESS")
second_recipient_address = os.getenv("SECOND_RECIPIENT_ADDRESS")
def send_email_to_multiple_recipients(self):
# creating the email client
email_client = EmailClient.from_connection_string(self.connection_string)
# creating the email message
message = {
"content": {
"subject": "This is the subject",
"plainText": "This is the body",
"html": "html><h1>This is the body</h1></html>"
},
"recipients": {
"to": [
{"address": self.recipient_address, "displayName": "Customer Name"},
{"address": self.second_recipient_address, "displayName": "Customer Name 2"}
],
"cc": [
{"address": self.recipient_address, "displayName": "Customer Name"},
{"address": self.second_recipient_address, "displayName": "Customer Name 2"}
],
"bcc": [
{"address": self.recipient_address, "displayName": "Customer Name"},
{"address": self.second_recipient_address, "displayName": "Customer Name 2"}
]
},
"senderAddress": self.sender_address
}
try:
# sending the email message
poller = email_client.begin_send(message)
response = poller.result()
print("Operation ID: " + response['id'])
except HttpResponseError as ex:
print(ex)
pass
if __name__ == '__main__':
sample = EmailMultipleRecipientSample()
sample.send_email_to_multiple_recipients()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
77ba68c5ea388336d8038d5e9e64d2d07a52abb5 | 5342c87436c514f6428524d8f9fca33f6745a791 | /ros2cli/ros2param/ros2param/verb/get.py | 1a7c3f1943742ebee688e9b2ed5beb9385c2aec3 | [
"Apache-2.0"
] | permissive | hfz-Nick/ROS | 9d64cb220539a29c65fb6ae8ae0f5e42c5ad955b | 1c8909c9709a0cbaed7f3084557ee4c3fb1ff380 | refs/heads/main | 2022-12-28T05:01:44.255695 | 2020-10-10T01:24:43 | 2020-10-10T01:24:43 | 302,788,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,227 | py | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rcl_interfaces.msg import ParameterType
from ros2cli.node.direct import DirectNode
from ros2cli.node.strategy import add_arguments
from ros2cli.node.strategy import NodeStrategy
from ros2node.api import get_node_names
from ros2node.api import NodeNameCompleter
from ros2param.api import call_get_parameters
from ros2param.verb import VerbExtension
class GetVerb(VerbExtension):
"""Get parameter."""
def add_arguments(self, parser, cli_name): # noqa: D102
add_arguments(parser)
arg = parser.add_argument(
'node_name', help='Name of the ROS node')
arg.completer = NodeNameCompleter(
include_hidden_nodes_key='include_hidden_nodes')
parser.add_argument(
'--include-hidden-nodes', action='store_true',
help='Consider hidden nodes as well')
parser.add_argument(
'name', help='Name of the parameter')
parser.add_argument(
'--hide-type', action='store_true',
help='Hide the type information')
def main(self, *, args): # noqa: D102
with NodeStrategy(args) as node:
node_names = get_node_names(
node=node, include_hidden_nodes=args.include_hidden_nodes)
if args.node_name not in node_names:
return 'Node not found'
with DirectNode(args) as node:
response = call_get_parameters(
node=node, node_name=args.node_name,
parameter_names=[args.name])
assert len(response.values) <= 1
# requested parameter not set
if not response.values:
return 'Parameter not set'
# extract type specific value
pvalue = response.values[0]
if pvalue.type == ParameterType.PARAMETER_BOOL:
label = 'Boolean value is:'
value = pvalue.bool_value
elif pvalue.type == ParameterType.PARAMETER_INTEGER:
label = 'Integer value is:'
value = pvalue.integer_value
elif pvalue.type == ParameterType.PARAMETER_DOUBLE:
label = 'Double value is:'
value = pvalue.double_value
elif pvalue.type == ParameterType.PARAMETER_STRING:
label = 'String value is:'
value = pvalue.string_value
elif pvalue.type == ParameterType.PARAMETER_BYTE_ARRAY:
label = 'Byte values are:'
value = pvalue.byte_array_value
elif pvalue.type == ParameterType.PARAMETER_BOOL_ARRAY:
label = 'Boolean values are:'
value = pvalue.bool_array_value
elif pvalue.type == ParameterType.PARAMETER_INTEGER_ARRAY:
label = 'Integer values are:'
value = pvalue.integer_array_value
elif pvalue.type == ParameterType.PARAMETER_DOUBLE_ARRAY:
label = 'Double values are:'
value = pvalue.double_array_value
elif pvalue.type == ParameterType.PARAMETER_STRING_ARRAY:
label = 'String values are:'
value = pvalue.string_array_value
elif pvalue.type == ParameterType.PARAMETER_NOT_SET:
label = 'Parameter not set.'
value = None
else:
return "Unknown parameter type '{pvalue.type}'" \
.format_map(locals())
# output response
if not args.hide_type:
print(label, value) if value is not None else print(label)
else:
print(value)
| [
"you@example.com"
] | you@example.com |
681cb012173559ca0073167310544329505a424b | a7266a2c39e309bdc0fdd4c771942412465d0fb5 | /McCoy Group Code Academy/Exercises/LegendreDVR.py | 308ba21907ec13ef8c94b648285571a964cb0c65 | [] | no_license | McCoyGroup/References | 68a930280e865d3efd4d7d29d7a961126258494d | 7bcf80bebfed92f7967135cc909e7280b2365680 | refs/heads/gh-pages | 2023-04-28T11:24:37.881869 | 2022-08-18T15:38:31 | 2022-08-18T15:38:31 | 127,345,918 | 1 | 6 | null | 2023-04-12T05:21:47 | 2018-03-29T20:49:01 | Mathematica | UTF-8 | Python | false | false | 534 | py | # THIS IS A PLACEHOLDER: AS WE ADD THINGS WE'LL FILL THIS OUT
"""
Goal: ...
Fundamentals: ... (Comma-separated list)
Related Exercises: ... (Comma-separated list)
"""
## Imports: put all import statments here
## Exports: put all the names things we might want to use in other scripts here
__all__ = [
]
## Objects: put all the classes we're defining here
...
## Functions: put all the functions we're defining here
...
## Run Script: put the script we'd want to run from the command line here
if __name__ == '__main__':
... | [
"b3m2a1@gmail.com"
] | b3m2a1@gmail.com |
ddab472e1041d209d6ee9169203e5b8e135d0abe | bd3a1843e2b0dc15837628c77f73e95a9bb1264f | /PyFunceble/helpers/file.py | 9f4d1b0cfbd3975feb10c42d58ddc346c3e4026d | [
"Apache-2.0"
] | permissive | funilrys/PyFunceble | 404c64d1b281d4ae06a939b54f4088d63e12b828 | 214a57d0eca3df7c4ed3421937aaff9998452ba6 | refs/heads/dev | 2023-06-24T18:39:29.372775 | 2023-06-18T13:15:39 | 2023-06-18T13:15:39 | 106,995,518 | 267 | 62 | Apache-2.0 | 2023-08-25T16:05:17 | 2017-10-15T08:25:14 | Python | UTF-8 | Python | false | false | 6,582 | py | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the file helpers.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2022, 2023 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
from typing import Any, Optional
from PyFunceble.helpers.directory import DirectoryHelper
class FileHelper:
"""
Simplify the file manipulations.
:param str path: The file path to work with.
"""
_path: Optional[str] = None
def __init__(self, path: Optional[str] = None):
if path:
self.path = path
@property
def path(self) -> Optional[str]:
"""
Provides the current state of the :code:`_path` attribute.
"""
return self._path
@path.setter
def path(self, value: str) -> None:
"""
Sets the path to work with.
:param value:
The path to work with.
:raise TypeError:
When :code:`value` is a :py:class:`value`.
"""
if not isinstance(value, str):
raise TypeError(f"<value> should be {str}, {type(value)} is given.")
self._path = value
def set_path(self, value: str) -> "FileHelper":
"""
Sets the path to work with.
:param value:
The path to work with.
"""
self.path = value
return self
def join_path(self, *args) -> str:
"""
Joins the given arguments with the given path.
"""
return os.path.join(self.path, *args)
def exists(self) -> bool:
"""
Checks if the given file path exists.
"""
return os.path.isfile(self.path)
def get_size(self) -> int:
"""
Provides the size (in bytes) of the
given file path.
"""
return os.stat(self.path).st_size
def is_empty(self) -> bool:
"""
Checks if the given file path is empty.
"""
return self.get_size() <= 0
def delete(self) -> "FileHelper":
"""
Deletes the given file path if it exists.
"""
if self.exists():
os.remove(self.path)
return self
def write(
self, data: Any, *, overwrite: bool = False, encoding: str = "utf-8"
) -> "FileHelper":
"""
Write the given data into the given file path.
:param data: The data to write.
:param encoding: The encoding to use while opening the file.
"""
if overwrite or not self.exists():
DirectoryHelper(os.path.dirname(self.path)).create()
with self.open("w", encoding=encoding) as file_stream:
file_stream.write(data)
else:
with self.open("a", encoding=encoding) as file_stream:
file_stream.write(data)
return self
def read(self, *, encoding: str = "utf-8") -> Optional[str]:
"""
Read the given file path and return it's content.
:param str encoding: The encoding to use.
"""
data = None
if self.exists():
with self.open("r", encoding=encoding) as file_stream:
data = file_stream.read()
return data
def read_bytes(self) -> Optional[bytes]:
"""
Read the given file ath and returns it's bytes contetn.
"""
data = None
if self.exists():
with self.open("rb") as file_stream:
data = file_stream.read()
return data
def open(self, *args, **kwargs) -> "open":
"""
A wrapper for the built-in :py:class:`open` function.
"""
return open(self.path, *args, **kwargs) # pylint: disable=unspecified-encoding
def copy(self, destination: str) -> "FileHelper":
"""
Copy the globaly given file path to the given destination.
:param str destination: The destination of the copy.
"""
if self.exists():
shutil.copy(self.path, destination)
return self
def move(self, destination) -> "FileHelper":
"""
Move the globally given file path to the given destination.
:param str destination: The destination of the file.
"""
if self.exists():
shutil.move(self.path, destination)
return self
| [
"contact@funilrys.com"
] | contact@funilrys.com |
82ce73e6415e6d017e3700546ee09e7625280e80 | 3c114c083af073421fc0becfa4b4471ba1d77de5 | /google/sparse_matrix.py | 0dd73b7847553e67b60991e24b0e2ed383dac254 | [] | no_license | alonsovidales/interview_questions | 99f757c7e35c5ede450be25d3bebd54a18b1312b | 5e63e238950c2f6bdfd3ff48311d6c69a676d382 | refs/heads/master | 2021-01-17T12:06:48.419891 | 2018-03-25T08:44:14 | 2018-03-25T08:44:14 | 30,909,319 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | """
Given a sparse matrix, implement below two methods:
void set(int row, int col, int val) /*Update value at given row and col*/
int sum(int row, int col) /*give sum from top left corner to given row, col sub-matrix*/
"""
class SortedArray(object):
def __init__(self):
self._dirty = False
self._arr = []
self._elems = {}
def set(self, val, score):
self._elems[score] = val
self._arr.append((val, score))
self._dirty = True
def get_by_score(self, score):
return self._elems.get(score)
def get_to_score(self, score):
if self._dirty:
self._arr = sorted(self._arr, key=lambda x: x[1])
self._dirty = False
result = []
for i in xrange(len(self._arr)):
if self._arr[i][1] > score:
return result
result.append(self._arr[i][0])
return result
class SparseMatrix(object):
def __init__(self):
self._rows = SortedArray()
def set(self, row, col, v):
cols = self._rows.get_by_score(row)
if cols is None:
cols = SortedArray()
self._rows.set(cols, row)
cols.set(v, col)
def sum(self, row, col):
total = 0
for cols in self._rows.get_to_score(row):
for value in cols.get_to_score(col):
total += value
return total
import unittest
class TestSparseMatrix(unittest.TestCase):
def test_set_sum(self):
sm = SparseMatrix()
sm.set(1, 2, 1)
sm.set(3, 2, 2)
sm.set(9, 1, 3)
sm.set(3, 8, 4)
self.assertEqual(sm.sum(1, 2), 1)
self.assertEqual(sm.sum(9, 9), 10)
self.assertEqual(sm.sum(3, 2), 3)
if __name__ == '__main__':
unittest.main()
| [
"alonso.vidales@tras2.es"
] | alonso.vidales@tras2.es |
9a13592fb7c388eae0315d097c09293ad3beca18 | 238c16de19b2b5928eeba6ca35abffdbfaa961e4 | /tests/conftest.py | d730fbb8a3dfcce7244fa8d564c373d6d0aec9b9 | [] | no_license | multiscripter/random-phrase-fastapi | dd7637d25ea7326659d8dfb7925697ab37d14c8f | 5cba715f898309530fa393a4cf434d45725ba6ed | refs/heads/master | 2022-12-28T23:35:08.435193 | 2020-09-28T21:00:18 | 2020-09-28T21:00:18 | 299,430,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | import pytest
from Database import Database
from db import PhraseInput
@pytest.hookimpl()
def pytest_sessionstart(session):
"""Actions before all tests."""
db = Database()
for a in range(1, 4):
data = {
'author': f'test-author-{a}',
'text': f'test-text-{a}'
}
phrase = PhraseInput(**data)
db.add(phrase)
print('created:')
print(list(db.items.keys()))
@pytest.hookimpl()
def pytest_sessionfinish(session, exitstatus):
"""Actions after all tests."""
db = Database()
for key in db.items.scan_iter(f'phrase*'):
db.items.delete(key)
print('deletion completed')
print(list(db.items.keys()))
| [
"ILL-JAH@yandex.ru"
] | ILL-JAH@yandex.ru |
20b01ef20b8f2d6b57de84d0d28e2bc0e71557c9 | e707164df1aa8edb5d276179538bd1eb1805f759 | /CODE/fedora_application/env/lib/python2.7/site-packages/rube/core/__init__.py | fcfa0e415c10c22ed928fa448f8bd7ebcccd9801 | [] | no_license | beckastar/cleaner_markov | af5816c14c94a8cb7924728179470e7db9ed2bc0 | a6de3fd87db77c0d80789cbce0ff409c222b4e67 | refs/heads/master | 2021-01-02T22:52:08.989862 | 2013-11-10T04:51:04 | 2013-11-10T04:51:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | # -*- coding: utf-8 -*-
# This file is part of Rube.
#
# Rube is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rube is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rube. If not, see <http://www.gnu.org/licenses/>.
import logging
import unittest
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support.expected_conditions import title_is
from pyvirtualdisplay import Display
from selenium import webdriver
from testconfig import config
from utils import (
prompt_for_auth,
expects_zmqmsg,
tolerant,
skip_logout,
collect_har,
ensures_after,
)
selenium_logger = logging.getLogger("selenium.webdriver")
selenium_logger.setLevel(logging.INFO)
display = None
driver = None
proxy = None
def get_driver_and_proxy():
global display
global driver
global proxy
if not driver:
if int(config.get('browsermob', {}).get('collect-har', 0)):
from browsermobproxy import Server
server = Server(config['browsermob']['path'])
server.start()
proxy = server.create_proxy()
if int(config.get('xconfig', {}).get('headless', 0)):
display = Display(visible=0, size=(800, 600))
display.start()
profile = webdriver.FirefoxProfile()
if proxy:
profile.set_proxy(proxy.selenium_proxy())
driver = webdriver.Firefox(firefox_profile=profile)
driver.implicitly_wait(60)
return driver, proxy
def tearDown():
global display
global driver
global proxy
if driver:
driver.close()
if display:
display.stop()
if proxy:
proxy.close()
class RubeTest(unittest.TestCase):
base = None
title = None
logout_url = None
timeout = 20000
# If you subclass and set this to True, then we won't prompt you for auth.
no_auth = False
# Change this in your subclass to use a different realm in the keyring.
realm = None
# Internally used to skip logout and whatnot during teardown
_no_teardown = []
def setUp(self):
self.driver, self.proxy = get_driver_and_proxy()
self.driver.delete_all_cookies()
# not no_auth ~= yes auth
if not self.no_auth and self.realm:
self.auth = prompt_for_auth(self.realm)
def tearDown(self):
if self._testMethodName in self._no_teardown:
return # skip the teardown
if not self.no_auth and self.logout_url:
self.driver.get(self.logout_url)
def wait_for(self, target):
wait = ui.WebDriverWait(self.driver, self.timeout)
wait.until(lambda d: target in d.page_source)
@collect_har()
@tolerant()
def test_title(self):
self.driver.get(self.base)
assert title_is(self.title), self.driver.title
__all__ = [
'RubeTest',
'expects_zmqmsg',
'tolerant',
'get_driver',
'skip_logout'
]
| [
"rebecca.robbins.et@gmail.com"
] | rebecca.robbins.et@gmail.com |
e170d3ca2867026517e00e298e216461dc4fc6e2 | a9dc42e9f54b549fcdd695817e347abfd8f2869f | /old/bin/clt_ar5_model_data_preprocess.py | 55d61322a3a3fb5d7b12531f576a4c10c6755c4c | [
"MIT"
] | permissive | yusheng-wang/downscale | 2e77d070115ead3034c154d29f1c533976228f13 | 3fe8ea1774cf82149d19561ce5f19b25e6cba6fb | refs/heads/master | 2023-04-10T03:25:08.806859 | 2019-09-21T17:34:35 | 2019-09-21T17:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,238 | py | #!/usr/bin/python2
# #
# pre-processing of raw downloaded CMIP5 data from the PCMDI portal to something that is standardized for
# use in later downscaling to ALFRESCO AK/Canada extent and resolution needs.
# # # # #
def group_input_filenames( prefix, root_dir ):
import fnmatch, functools, itertools, os, glob
import pandas as pd
''' function that wraps some ugliness regarding returning the files we want to process '''
def find_files( dir_path, patterns ):
"""
Returns a generator yielding files matching the given patterns
:type dir_path: [str]
:type patterns: [str]
:rtype : [str]
:param dir_path: Directory to search for files/directories under. Defaults to current dir.
:param patterns: Patterns of files to search for. Defaults to ["*"]. Example: ["*.json", "*.xml"]
"""
import itertools, functools
path = dir_path
if not patterns:
path_patterns = [ "*" ]
else:
path_patterns = patterns
for root_dir, dir_names, file_names in os.walk( path ):
filter_partial = functools.partial(fnmatch.filter, file_names)
for file_name in itertools.chain( *map( filter_partial, path_patterns ) ):
yield os.path.join( root_dir, file_name )
def version_grouper( x ):
''' groupby function for grouping by filenames '''
dir_path = os.path.dirname( x )
fn, _ = os.path.splitext( os.path.basename( x ) )
# remove dates from filename -- they have a hyphen
fn_base = '_'.join([ i for i in fn.split( '_' ) if '-' not in i ])
# return the path element that startswith 'v' this is the version attribute
version = [ x for x in dir_path.split( os.path.sep ) if x.startswith( 'v' ) ]
return '_'.join([ fn_base, version[0] ])
def drop_old_versions( df ):
rows,cols = df.shape
if rows > 1 & rows < 3:
version_nums = df[ df.columns[-1] ].apply( lambda x : int( x.replace( 'v', '' ) ) )
# max( version_nums )
return df.drop( df[ df[ df.columns[-1] ] != 'v' + str( max( version_nums ) )].index )
elif rows > 3:
# potentially unnecessary
None
else:
return df
# [ !ML CHANGED! ]
# get all matches with prefix
matches = pd.Series([ match for match in find_files( root_dir, [ prefix ] ) ])
input_paths = matches.apply( os.path.dirname )
# # group by version
# grouped = dict([ group for group in matches.groupby( matches.apply( version_grouper ) )])
# group keys to DataFrame
fn_list = matches.apply( lambda x: os.path.splitext( os.path.basename( x ) )[0] ).apply( lambda x: x.split( '_' )[3] )
grouped = matches.groupby( fn_list )
final_out = { group:files for group,files in grouped }
# keys_df = pd.DataFrame({ key:key.split( '_' ) for key in grouped.keys() }).T
# parse the keys / values and keep only latest versions
# keys_df_grouped = pd.concat([ drop_old_versions(i[1]) for i in keys_df.groupby( keys_df.columns[-3] ) ])
# # make a new dictionary holding the filenames grouped the way we want
# final_out = { k:v for k,v in grouped.iteritems() if k in keys_df_grouped.index.tolist() }
return final_out
def get_file_years( filename ):
path, fn = os.path.split( filename )
fn, ext = os.path.splitext( fn )
split = fn.split( '_' )
dates = split[ len( split ) - 1 ] # grab last element
begin, end = dates.split( '-' )
return [begin, end]
def get_modelname( filename ):
path, fn = os.path.split( filename )
return [ i for i in path.split( '/' ) if i in models ][0]
def concat_to_nc( filelist, output_filename, dim='time', begin_time=None, end_time=None, nc_format='NETCDF4', **kwargs ):
'''
take list of consecutive netcdf files (made for CMIP5 data) and stack them into a
single larger netcdf file. This was necessary to overcome some bugginess in how
MFDataset is dealing with different calendar units on different files. This is
technically valid CF-Compliant metadata, but is tricky to work with. This hack allows
us to get around some of this unpredictable behavior.
PARAMETERS:
-----------
filelist = [list] list of string file paths to the sorted netcdf files to stack together
output_filename = [str] path to and name of the output file to be generated (.nc extension)
dim = [str] dimension to stack on -- default is 'time'
begin_time = [str] PANDAS style datetime string syntax -- used in xray
end_time = [str] PANDAS style datetime string syntax -- used in xray
format = [str] output NetCDF format desired. valid strings are:
'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'
default is 'NETCDF4'
**kwargs -- potential future arguments or overloaded args to pass through (none implemented)
RETURNS:
--------
output_filename as string, with the important side-effect of writing data to disk
'''
import xray
with xray.concat([ xray.open_dataset( i ).load() for i in filelist ], dim ) as ds:
# time slicer condition
if begin_time != None and end_time != None:
ds = ds.loc[ { dim:slice( begin_time, end_time ) } ]
if os.path.exists( output_filename ):
os.remove( output_filename )
ds.to_netcdf( output_filename, mode='w', format=nc_format )
return output_filename
def year_greater_yearlimit_workaround( xray_dataset, desired_year_begin, desired_year_end, file_year_begin, file_year_end ):
'''
very specific function to deal with an issue in how PANDAS deals with datetime.
its max datetime value in 64-bit nanoseconds from somewhere near year 1100, ends
in roughly 2200. This is not very ideal for working with some model outputs that
put the data in files ranging from 2006-2300. Soo this workaround solves the issue
by subsetting the data using some desired years (it is assumed 12 month FULL years)
to subset to these data ranges and return a new xray.dataset.
PANDAS date_range functionality < 2300-07-06 00:00:00
PARAMETERS:
-----------
ds = xray.Dataset object with year values outside the time limits of the package -- PANDAS
desired_year_begin = [int] 4 digit year begin
desired_year_end = [int] 4 digit year end
file_year_begin = [int] 4 digit year begin in file
file_year_end = [int] 4 digit year end in file
RETURNS:
--------
new xray.Dataset object subset to the years of interest using integer indexing instead of year
slicing with strings.
'''
years = np.repeat(range( file_year_begin, file_year_end+1 ), 12 ) # 12 is for 12 months
year_min = min( years )
if desired_year_begin < year_min:
begin_idx = ( year_min - desired_year_begin )
else:
begin_idx = np.min( np.where(years == desired_year_begin ))
end_idx = np.max( np.where( years == desired_year_end ))
return xray_dataset[ dict( time=range( begin_idx, end_idx + 1 ) ) ]
if __name__ == '__main__':
import os, sys, re, glob, xray
import numpy as np
import pandas as pd
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
parser.add_argument( "-p", "--base_path", action='store', dest='base_path', type=str, help="path to parent directory with a subdirector(ies)y storing the data" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="string of the model name to use in the filename search i.e 'GISS-E2-R', 'IPSL-CM5A-LR',..." )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string of the variable name to use in the filename search i.e. 'tas', 'hur',..." )
# parse and unpack args
args = parser.parse_args()
model = args.model
variable = args.variable
base_path = args.base_path
# start an error log for any problem files
output_base_path = os.path.join( base_path, 'prepped' )
if not os.path.exists( output_base_path ):
os.makedirs( output_base_path )
problem_files_log = open( os.path.join( output_base_path, 'error_files.txt' ), mode='w' )
# make some filters and filter the files we want into groups
fn_prefix_filter = variable + '_*' + model + '*'
file_groups = group_input_filenames( fn_prefix_filter, base_path ) # [ !ML CHANGED! ]
for files in file_groups.values():
try:
files = sorted( files ) # [ !ML CHANGED! ]
fn = files[ 0 ]
# model = get_modelname( fn )
output_path = os.path.join( output_base_path, model, variable )
begin_year ='185001' # hardwired
end_year = '210012' # hardwired
# a handler for the historical (1850-2005) and the modeled (2006-2100) naming
if 'historical' in os.path.basename( fn ):
begin_year_fnout = '185001' # hardwired
end_year_fnout = '200512' # hardwired
else:
begin_year_fnout = '200601' # hardwired
end_year_fnout = '210012' # hardwired
# this logic can be fine tuned to subset the data down to only the files we need
# for this project it is 1850-2100.
df = pd.DataFrame([ get_file_years(fn) for fn in files ])
# this is the way to interrogate that dataframe for the values we want
df = df.astype( int )
begin_idx = (np.abs(df[0] - int( begin_year ) ) ).argmin()
end_idx = (np.abs(df[1] - int( end_year ) ) ).argmin()
# return the files between the desired date ranges
if begin_idx == end_idx:
files = [ files[ begin_idx ] ]
else:
files = files[ begin_idx:end_idx + 1 ]
print files
print '\n'
begin_year_in = str(df.ix[ begin_idx ][0])
end_year_in = str(df.ix[ end_idx ][1])
# set up some vars for the output naming standardization
cmor_table = os.path.splitext( os.path.basename( fn ) )[ 0 ].split( '_' )[ 1 ]
experiment = scenario = os.path.splitext( os.path.basename( fn ) )[ 0 ].split( '_' )[ -2 ]
scenario = os.path.splitext( os.path.basename( fn ) )[ 0 ].split( '_' )[ -3 ]
if not os.path.exists( output_path ):
os.makedirs( output_path )
# run the concatenation and the output to a new netcdf file
# --> and we are writing in a hack to get around the darn issue with GFDL-CM3
# we could just run them all with the reduce workaround, but I will keep both
# in hopes that the library improves.
if 'GFDL' in model:
ds = reduce( lambda x,y: xray.concat( [x,y], 'time'), (xray.open_dataset( i ) for i in files) )
else:
ds = xray.concat([ xray.open_dataset( i ).load() for i in files ], 'time' )
new_ds = year_greater_yearlimit_workaround( ds, int( begin_year_fnout[:4] ), int( end_year_fnout[:4] ), int(str(begin_year_in)[:4]), int(str(end_year_in)[:4]) )
begin_year_fnout = str(int(begin_year_fnout[:4]) + (int(begin_year_in[:4]) - int(begin_year_fnout[:4]) )) + '01' # to update the output naming
# output name generation
new_fn_base = '_'.join([ variable, cmor_table, model, scenario, experiment, begin_year_fnout, end_year_fnout ]) + '.nc'
output_filename = os.path.join( output_path, new_fn_base )
new_ds.to_netcdf( output_filename, mode='w' )
# cleanup
ds.close()
ds = None
new_ds.close()
new_ds = None
# legacy version of file concatenation that is not ready for prime-time due to the quirky nature of the xray library
# in its young state.
# concat_to_nc( files, output_filename, dim='time', begin_time=begin_year[:4], end_time=end_year[:4] )
except:
print '\n--> ERROR !!!\n\n%s\n\n' % files
problem_files_log.writelines( files )
pass
problem_files_log.close()
# EXAMPLE OF USE:
# some setup
# import os
# models = [ 'GISS-E2-R', 'IPSL-CM5A-LR', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
# variables = [ 'tas', 'hur' ]
# base_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data'
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs' )
# for model in models:
# for variable in variables:
# os.system( ' '.join(['python','hur_ar5_model_data_preprocess.py','-p', base_path, '-m', model, '-v', variable]) )
# # # # # # #
# --> special clt prep due to ESGF being down for the past months.
# import os
# models = [ 'GISS-E2-R', 'IPSL-CM5A-LR', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
# variables = [ 'clt' ]
# base_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/cmip5_clt_nonstandard'
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# for model in models:
# for variable in variables:
# os.system( ' '.join(['python','clt_ar5_model_data_preprocess.py','-p', base_path, '-m', model, '-v', variable]) )
| [
"lindgren.mike@gmail.com"
] | lindgren.mike@gmail.com |
6a39bf88ed26fd334ca58f0d9fa50928ff9d0a6a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_1/208.py | 3cff053a0d8355a115d24f870f7da1280b84ee66 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/python
import sys
def handle_case(case_no, engiens, words):
saw ={}
answer = 0
for w in words:
if w not in saw.keys():
if len(saw.keys()) == (len(engiens)-1):
answer+=1
saw = {}
saw[w] = 1
print "Case #%d: %s" % (case_no, answer)
def main():
filename = sys.argv[1]
fsock = open(filename, "r")
size = int(fsock.readline())
for case in range(1,size+1):
engiens_no = int(fsock.readline())
engiens= []
for e in range(1,engiens_no+1):
engiens.append(fsock.readline().rstrip("\n"))
words_no = int(fsock.readline())
words = []
for w in range(1,words_no+1):
words.append(fsock.readline().rstrip("\n"))
handle_case(case, engiens, words)
fsock.close()
if __name__ == "__main__":
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
aec1b46ee90e6ad349ec1ef3880aafad45dd2339 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_17926.py | 3f65a0747b3ebe817fa6bb84dae8bed99654f41d | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # Django-Models: TypeError: coercing to Unicode: need string or buffer, User found
def __unicode__(self):
return '%s' % (self.user)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
9558aff2a7a109b33beb176497844e5998bf15cd | d88397be1c6a31985bc2283280e743fd3b988dd1 | /beta/examples/tensorflow/common/optimizer.py | ebf3e1b4079c43f388b1f9256596c3a7c9103cec | [
"Apache-2.0"
] | permissive | sshyran/openvino-nncf-pytorch | f5e09066a216fa786927937a91a0e6742f347660 | fd02652950cd803a36f5283f5a5df999bb45433b | refs/heads/develop | 2023-04-18T06:58:54.646669 | 2021-03-12T15:41:39 | 2021-03-12T15:41:39 | 347,374,166 | 0 | 0 | Apache-2.0 | 2023-04-03T23:52:21 | 2021-03-13T13:11:32 | null | UTF-8 | Python | false | false | 3,837 | py | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
import tensorflow_addons as tfa
from beta.examples.tensorflow.common.logger import logger
def build_optimizer(config, scheduler):
optimizer_config = config.get('optimizer', {})
optimizer_type = optimizer_config.get('type', 'adam').lower()
optimizer_params = optimizer_config.get("optimizer_params", {})
logger.info('Building %s optimizer with params %s', optimizer_type, optimizer_params)
if optimizer_type == 'sgd':
logger.info('Using SGD optimizer')
nesterov = optimizer_params.get('nesterov', False)
optimizer = tf.keras.optimizers.SGD(learning_rate=scheduler,
nesterov=nesterov)
elif optimizer_type == 'momentum':
logger.info('Using momentum optimizer')
nesterov = optimizer_params.get('nesterov', False)
momentum = optimizer_params.get('momentum', 0.9)
optimizer = tf.keras.optimizers.SGD(learning_rate=scheduler,
momentum=momentum,
nesterov=nesterov)
elif optimizer_type == 'rmsprop':
logger.info('Using RMSProp')
rho = optimizer_params.get('rho', 0.9)
momentum = optimizer_params.get('momentum', 0.9)
epsilon = optimizer_params.get('epsilon', 1e-07)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=scheduler,
rho=rho,
momentum=momentum,
epsilon=epsilon)
elif optimizer_type == 'adam':
logger.info('Using Adam')
beta_1 = optimizer_params.get('beta_1', 0.9)
beta_2 = optimizer_params.get('beta_2', 0.999)
epsilon = optimizer_params.get('epsilon', 1e-07)
optimizer = tf.keras.optimizers.Adam(learning_rate=scheduler,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
elif optimizer_type == 'adamw':
logger.info('Using AdamW')
weight_decay = optimizer_params.get('weight_decay', 0.01)
beta_1 = optimizer_params.get('beta_1', 0.9)
beta_2 = optimizer_params.get('beta_2', 0.999)
epsilon = optimizer_params.get('epsilon', 1e-07)
optimizer = tfa.optimizers.AdamW(weight_decay=weight_decay,
learning_rate=scheduler,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
else:
raise ValueError('Unknown optimizer %s' % optimizer_type)
moving_average_decay = optimizer_params.get('moving_average_decay', 0.)
if moving_average_decay > 0.:
logger.info('Including moving average decay.')
optimizer = tfa.optimizers.MovingAverage(
optimizer,
average_decay=moving_average_decay,
num_updates=None)
if optimizer_params.get('lookahead', None):
logger.info('Using lookahead optimizer.')
optimizer = tfa.optimizers.Lookahead(optimizer)
return optimizer
| [
"noreply@github.com"
] | sshyran.noreply@github.com |
234c2b61255e3d404e666a25ecc316deb34ed85f | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/core/data/kb/vuln_templates/tests/test_base_template.py | 8108d8d84b9422931d1e1c5e55f63f073e4fecaa | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | """
test_base_template.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from mock import Mock
from w3af.core.data.kb.vuln_templates.base_template import BaseTemplate
class BaseTemplateTest(unittest.TestCase):
def test_basic(self):
bt = BaseTemplate()
options_list = bt.get_options()
name = options_list['name']
url = options_list['url']
data = options_list['data']
method = options_list['method']
vulnerable_parameter = options_list['vulnerable_parameter']
name.set_value('SQL injection')
url.set_value('http://host.tld/foo.php')
data.set_value('id=3')
method.set_value('GET')
vulnerable_parameter.set_value('id')
bt.get_vulnerability_name = Mock(return_value='unittest')
bt.set_options(options_list)
one = bt.get_vuln_id()
two = bt.get_vuln_id()
self.assertEqual(one + 1, two)
| [
"everping@outlook.com"
] | everping@outlook.com |
7eff3f83f034b7bb0330f386c24b489e1bcf3e28 | 45ddd3d0d568b3d28c25c2023839933496753d52 | /Bai34-multitaskLearning/predict.py | d2f931a016260ca73f857a1c73eacd52fe4fb4e6 | [] | no_license | phamdinhkhanh/khanhBlogTurtorial | 7d65259066b1eb6f48a2c7ef78840005d61c9fdb | 0685ef989f72057581b0268bd6c9e01981833549 | refs/heads/master | 2022-12-13T08:30:18.866016 | 2020-05-05T01:26:44 | 2020-05-05T01:26:44 | 248,902,988 | 21 | 20 | null | 2022-12-12T11:57:04 | 2020-03-21T03:57:22 | Jupyter Notebook | UTF-8 | Python | false | false | 2,173 | py | # USAGE
# python predict.py --model fashion_multitask_learning.h5 --labelbin mlb.pkl --image examples/example_01.jpg
# import the necessary packages
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import argparse
import pickle
import cv2
import os
import requests
import matplotlib.pyplot as plt
IMAGE_DIMS = (96, 96, 2)
# Khởi tạo ArgumentParser
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to trained model model")
ap.add_argument("-l", "--labelbin", required=True,
help="path to label binarizer")
ap.add_argument("-i", "--image", required=True,
help="url link to input image")
args = vars(ap.parse_args())
# Load model và multilabel
print("[INFO] loading network...")
model = load_model(args["model"])
mlb = pickle.loads(open(args["labelbin"], "rb").read())
# read image
def _downloadImage(url):
img = cv2.imread(url)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
# dự báo image
def _predict_image(image, model, mlb):
# Lấy kích thước 3 kênh của image
(w, h, c) = image.shape
# Nếu resize width = 400 thì height resize sẽ là
height_rz = int(h*400/w)
# Resize lại ảnh để hiện thị
output = cv2.resize(image, (height_rz, 400))
# Resize lại ảnh để dự báo
image = cv2.resize(image, IMAGE_DIMS[:2])/255.0
# Dự báo xác suất của ảnh
prob = model.predict(np.expand_dims(image, axis=0))[0]
# Trích ra 2 xác suất cao nhất
argmax = np.argsort(prob)[::-1][:2]
# Show classes và probability ra ảnh hiển thị
for (i, j) in enumerate(argmax):
# popup nhãn và xác suất dự báo lên ảnh hiển thị
label = "{}: {:.2f}%".format(mlb.classes_[j], prob[j] * 100)
cv2.putText(output, label, (5, (i * 20) + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (225, 0, 0), 2)
# show the output image
output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
# plt.imshow(output)
# cv2.imwrite('predict.jpg', output)
cv2.imshow("Output", output)
cv2.waitKey(0)
image = _downloadImage(args['image'])
_predict_image(image, model, mlb)
| [
"phamdinhkhanh.tkt53.neu@gmail.com"
] | phamdinhkhanh.tkt53.neu@gmail.com |
b53181ff1835a17047d6c94d41b850630c4e82a4 | a08d85552ed0db1a906c3b31ed99f56bae857c60 | /PythonCourse/d2e2.py | fd961bf13b6f7ec843e3d610fd509829890c3ca1 | [] | no_license | MagdalenaZZ/Python_ditties | 90866e53f9aafa603f05735e2ceb094cf5518a18 | 757d8de1df0e53d38d4ba9854b092eabe6ec6570 | refs/heads/master | 2023-02-20T12:23:09.778092 | 2023-02-07T10:06:55 | 2023-02-07T10:06:55 | 136,293,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py |
def maximum (x,y):
"""Assumes two numbers and returns the biggest one"""
if x > y:
return x
else:
return y
| [
"magz@MacBook-Air.local"
] | magz@MacBook-Air.local |
2d0464c5c1df613a7ff2b22103ad294d1b865050 | dc3f29ca73e30f449cb991fa48575a8420f75099 | /homeassistant/components/energy/data.py | e8c62da0c3c8dad4e0a9c5c9cfb718c24951c1c3 | [
"Apache-2.0"
] | permissive | EverythingSmartHome/core | 16951da19d63f0eec80715ec47e2ba7bad329c0b | 2169d839ce24c6e2b9fa215c5cabbd48b8a4022f | refs/heads/dev | 2023-01-07T14:52:07.206460 | 2022-07-13T09:08:23 | 2022-07-13T09:08:23 | 513,561,926 | 2 | 0 | Apache-2.0 | 2022-07-13T14:51:08 | 2022-07-13T14:51:07 | null | UTF-8 | Python | false | false | 9,493 | py | """Energy data."""
from __future__ import annotations
import asyncio
from collections import Counter
from collections.abc import Awaitable, Callable
from typing import Literal, TypedDict, Union
import voluptuous as vol
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, singleton, storage
from .const import DOMAIN
STORAGE_VERSION = 1
STORAGE_KEY = DOMAIN
@singleton.singleton(f"{DOMAIN}_manager")
async def async_get_manager(hass: HomeAssistant) -> EnergyManager:
"""Return an initialized data manager."""
manager = EnergyManager(hass)
await manager.async_initialize()
return manager
class FlowFromGridSourceType(TypedDict):
"""Dictionary describing the 'from' stat for the grid source."""
# statistic_id of a an energy meter (kWh)
stat_energy_from: str
# statistic_id of costs ($) incurred from the energy meter
# If set to None and entity_energy_from and entity_energy_price are configured,
# an EnergyCostSensor will be automatically created
stat_cost: str | None
# Used to generate costs if stat_cost is set to None
entity_energy_from: str | None # entity_id of an energy meter (kWh), entity_id of the energy meter for stat_energy_from
entity_energy_price: str | None # entity_id of an entity providing price ($/kWh)
number_energy_price: float | None # Price for energy ($/kWh)
class FlowToGridSourceType(TypedDict):
"""Dictionary describing the 'to' stat for the grid source."""
# kWh meter
stat_energy_to: str
# statistic_id of compensation ($) received for contributing back
# If set to None and entity_energy_from and entity_energy_price are configured,
# an EnergyCostSensor will be automatically created
stat_compensation: str | None
# Used to generate costs if stat_compensation is set to None
entity_energy_from: str | None # entity_id of an energy meter (kWh), entity_id of the energy meter for stat_energy_from
entity_energy_price: str | None # entity_id of an entity providing price ($/kWh)
number_energy_price: float | None # Price for energy ($/kWh)
class GridSourceType(TypedDict):
"""Dictionary holding the source of grid energy consumption."""
type: Literal["grid"]
flow_from: list[FlowFromGridSourceType]
flow_to: list[FlowToGridSourceType]
cost_adjustment_day: float
class SolarSourceType(TypedDict):
"""Dictionary holding the source of energy production."""
type: Literal["solar"]
stat_energy_from: str
config_entry_solar_forecast: list[str] | None
class BatterySourceType(TypedDict):
"""Dictionary holding the source of battery storage."""
type: Literal["battery"]
stat_energy_from: str
stat_energy_to: str
class GasSourceType(TypedDict):
"""Dictionary holding the source of gas storage."""
type: Literal["gas"]
stat_energy_from: str
# statistic_id of costs ($) incurred from the energy meter
# If set to None and entity_energy_from and entity_energy_price are configured,
# an EnergyCostSensor will be automatically created
stat_cost: str | None
# Used to generate costs if stat_cost is set to None
entity_energy_from: str | None # entity_id of an gas meter (m³), entity_id of the gas meter for stat_energy_from
entity_energy_price: str | None # entity_id of an entity providing price ($/m³)
number_energy_price: float | None # Price for energy ($/m³)
SourceType = Union[GridSourceType, SolarSourceType, BatterySourceType, GasSourceType]
class DeviceConsumption(TypedDict):
"""Dictionary holding the source of individual device consumption."""
# This is an ever increasing value
stat_consumption: str
class EnergyPreferences(TypedDict):
"""Dictionary holding the energy data."""
energy_sources: list[SourceType]
device_consumption: list[DeviceConsumption]
class EnergyPreferencesUpdate(EnergyPreferences, total=False):
"""all types optional."""
def _flow_from_ensure_single_price(
val: FlowFromGridSourceType,
) -> FlowFromGridSourceType:
"""Ensure we use a single price source."""
if (
val["entity_energy_price"] is not None
and val["number_energy_price"] is not None
):
raise vol.Invalid("Define either an entity or a fixed number for the price")
return val
FLOW_FROM_GRID_SOURCE_SCHEMA = vol.All(
vol.Schema(
{
vol.Required("stat_energy_from"): str,
vol.Optional("stat_cost"): vol.Any(str, None),
vol.Optional("entity_energy_from"): vol.Any(str, None),
vol.Optional("entity_energy_price"): vol.Any(str, None),
vol.Optional("number_energy_price"): vol.Any(vol.Coerce(float), None),
}
),
_flow_from_ensure_single_price,
)
FLOW_TO_GRID_SOURCE_SCHEMA = vol.Schema(
{
vol.Required("stat_energy_to"): str,
vol.Optional("stat_compensation"): vol.Any(str, None),
vol.Optional("entity_energy_to"): vol.Any(str, None),
vol.Optional("entity_energy_price"): vol.Any(str, None),
vol.Optional("number_energy_price"): vol.Any(vol.Coerce(float), None),
}
)
def _generate_unique_value_validator(key: str) -> Callable[[list[dict]], list[dict]]:
"""Generate a validator that ensures a value is only used once."""
def validate_uniqueness(
val: list[dict],
) -> list[dict]:
"""Ensure that the user doesn't add duplicate values."""
counts = Counter(flow_from[key] for flow_from in val)
for value, count in counts.items():
if count > 1:
raise vol.Invalid(f"Cannot specify {value} more than once")
return val
return validate_uniqueness
GRID_SOURCE_SCHEMA = vol.Schema(
{
vol.Required("type"): "grid",
vol.Required("flow_from"): vol.All(
[FLOW_FROM_GRID_SOURCE_SCHEMA],
_generate_unique_value_validator("stat_energy_from"),
),
vol.Required("flow_to"): vol.All(
[FLOW_TO_GRID_SOURCE_SCHEMA],
_generate_unique_value_validator("stat_energy_to"),
),
vol.Required("cost_adjustment_day"): vol.Coerce(float),
}
)
SOLAR_SOURCE_SCHEMA = vol.Schema(
{
vol.Required("type"): "solar",
vol.Required("stat_energy_from"): str,
vol.Optional("config_entry_solar_forecast"): vol.Any([str], None),
}
)
BATTERY_SOURCE_SCHEMA = vol.Schema(
{
vol.Required("type"): "battery",
vol.Required("stat_energy_from"): str,
vol.Required("stat_energy_to"): str,
}
)
GAS_SOURCE_SCHEMA = vol.Schema(
{
vol.Required("type"): "gas",
vol.Required("stat_energy_from"): str,
vol.Optional("stat_cost"): vol.Any(str, None),
vol.Optional("entity_energy_from"): vol.Any(str, None),
vol.Optional("entity_energy_price"): vol.Any(str, None),
vol.Optional("number_energy_price"): vol.Any(vol.Coerce(float), None),
}
)
def check_type_limits(value: list[SourceType]) -> list[SourceType]:
"""Validate that we don't have too many of certain types."""
types = Counter([val["type"] for val in value])
if types.get("grid", 0) > 1:
raise vol.Invalid("You cannot have more than 1 grid source")
return value
ENERGY_SOURCE_SCHEMA = vol.All(
vol.Schema(
[
cv.key_value_schemas(
"type",
{
"grid": GRID_SOURCE_SCHEMA,
"solar": SOLAR_SOURCE_SCHEMA,
"battery": BATTERY_SOURCE_SCHEMA,
"gas": GAS_SOURCE_SCHEMA,
},
)
]
),
check_type_limits,
)
DEVICE_CONSUMPTION_SCHEMA = vol.Schema(
{
vol.Required("stat_consumption"): str,
}
)
class EnergyManager:
"""Manage the instance energy prefs."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize energy manager."""
self._hass = hass
self._store = storage.Store[EnergyPreferences](
hass, STORAGE_VERSION, STORAGE_KEY
)
self.data: EnergyPreferences | None = None
self._update_listeners: list[Callable[[], Awaitable]] = []
async def async_initialize(self) -> None:
"""Initialize the energy integration."""
self.data = await self._store.async_load()
@staticmethod
def default_preferences() -> EnergyPreferences:
"""Return default preferences."""
return {
"energy_sources": [],
"device_consumption": [],
}
async def async_update(self, update: EnergyPreferencesUpdate) -> None:
"""Update the preferences."""
if self.data is None:
data = EnergyManager.default_preferences()
else:
data = self.data.copy()
for key in (
"energy_sources",
"device_consumption",
):
if key in update:
data[key] = update[key] # type: ignore[literal-required]
self.data = data
self._store.async_delay_save(lambda: data, 60)
if not self._update_listeners:
return
await asyncio.gather(*(listener() for listener in self._update_listeners))
@callback
def async_listen_updates(self, update_listener: Callable[[], Awaitable]) -> None:
"""Listen for data updates."""
self._update_listeners.append(update_listener)
| [
"noreply@github.com"
] | EverythingSmartHome.noreply@github.com |
d74048100f3b874b0c1191d48651eb82d6953fb7 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/request/AlipayOfflineMarketItemCreateRequest.py | d128c457124aef847e52dc2a5b9295ed9ab29ad2 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 3,973 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOfflineMarketItemCreateModel import AlipayOfflineMarketItemCreateModel
class AlipayOfflineMarketItemCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOfflineMarketItemCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayOfflineMarketItemCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.offline.market.item.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
f687f0ca5526f5ba9070037e195218aa01be0a95 | 339cc015ad260661e02ad32fe229807988a92487 | /accounting_addons/accounting_addons/accounting_addons/doctype/asset_depreciation_record/asset_depreciation_record.py | 172e3396e7b5bb0fe5bda91c2abe72601df8ee71 | [
"MIT"
] | permissive | bobzz-zone/gsi_accounting_addons | 009448596ca6ca114af27f0bdf35744f2835616b | 373ef3c7e8179fb7e4d003e8f4b809b36c4cfe1d | refs/heads/master | 2016-09-10T12:04:49.737175 | 2015-05-07T10:18:48 | 2015-05-07T10:18:48 | 35,213,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Myme and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AssetDepreciationRecord(Document):
pass
| [
"bobzz.zone@gmail.com"
] | bobzz.zone@gmail.com |
5a5ebf5680766142d1db98840c1843d5352700ce | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /tf3d/layers/sparse_voxel_net_utils.py | 5677106a3dbd588759d3ffad675295a2bbcc8a37 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 18,211 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sparse conv utility functions."""
import tensorflow as tf
ops_imported = False
if not ops_imported:
try:
import tensorflow_sparse_conv_ops as sparse_conv_ops # pylint: disable=g-import-not-at-top
except ImportError:
import tf3d.ops.tensorflow_sparse_conv_ops as sparse_conv_ops # pylint: disable=g-import-not-at-top
def compute_pooled_voxel_indices(voxel_xyz_indices, pooling_size):
"""Computes and returns the pooled voxel indices.
Applies the pooling based on the given `pooling_size` and computes
x, y, z indices for the pooled voxels. Also converts the x, y, z index
to a single number index where there is a one-on-one mapping between
each x, y, z index value and its corresponding single number index value.
Args:
voxel_xyz_indices: A tf.int32 tensor of size [N, 3] containing voxel
x, y, z indices.
pooling_size: The size of the pooling window in x, y, z dimensions in the
voxel grid. It should be either a tf.int32 tensor, a numpy array or a
list of size [3].
Returns:
pooled_xyz_indices: A tf.int32 tensor of size [N, 3] containing the x, y, z
index of the pooled voxel corresponding to each given voxel.
pooled_single_number_indices: A tf.int32 tensor of size [N] containing the
single number index of the pooled voxel corresponding to each given voxel.
"""
pooling_size = tf.convert_to_tensor(pooling_size, dtype=tf.int32)
pooled_xyz_indices = tf.cast(
tf.math.floordiv(voxel_xyz_indices, pooling_size), dtype=tf.int32)
pooled_xyz_indices_min = tf.reduce_min(pooled_xyz_indices, axis=0)
pooled_xyz_indices -= pooled_xyz_indices_min
pooled_xyz_indices_max = tf.reduce_max(pooled_xyz_indices, axis=0)
xyz_to_single_number_mapping_coefs = [
(pooled_xyz_indices_max[1] + 1) * (pooled_xyz_indices_max[2] + 1),
(pooled_xyz_indices_max[2] + 1), 1
]
pooled_single_number_indices = tf.reduce_sum(
pooled_xyz_indices * tf.expand_dims(
tf.stack(xyz_to_single_number_mapping_coefs), axis=0),
axis=1)
pooled_xyz_indices += pooled_xyz_indices_min
return pooled_xyz_indices, pooled_single_number_indices
def pool_features_given_indices(features, indices, segment_func):
"""Pools the features based on their indices.
If more than one feature have the same index, it will use the pooling method
to aggregate the features.
Args:
features: A tensor of size [N, F].
indices: A tf.int32 tensor of size [N].
segment_func: A tensorflow function that operates on segments. Examples
are one of tf.math.unsorted_segment_{min/max/mean/prod/sum}.
Returns:
pooled_features: A tensor of size [N', F] or [N', G, F] where G is the
number of points sampled per voxel.
segment_ids: A tf.int32 tensor of size [N].
num_segments: A tf.int32 scalar corresponding to number of segments.
Raises:
ValueError: If pooling method is unknown.
"""
segment_ids = None
if segment_ids is None: # using external implementation
_, segment_ids = tf.unique(indices)
num_segments = tf.reduce_max(segment_ids) + 1
# Each voxel might contain more than one point. Here we pool the point
# features either using mean or max.
pooled_features = segment_func(
data=features, segment_ids=segment_ids, num_segments=num_segments)
return pooled_features, segment_ids, num_segments
def voxel_pooling(voxel_features, voxel_xyz_indices, num_valid_voxels,
pooling_size, segment_func=tf.math.unsorted_segment_max):
"""Pools voxel features.
Args:
voxel_features: A tf.float32 tensor of size [batch_size, N, fd] where
fd is the feature size.
voxel_xyz_indices: A tf.int32 tensor of size [batch_size, N, 3] containing
the voxel index in each of the x, y, z dimensions.
num_valid_voxels: A tf.int32 tensor of size [batch_size].
pooling_size: A tf.int32 tensor of size [3] containing the pooling size.
segment_func: A function defining the pooling method.
Returns:
pooled_voxel_features: A tf.float32 tensor of size [batch_size, N', fd].
pooled_voxel_indices: A tf.int32 tensor of size [batch_size, N', 3].
num_valid_pooled_voxels: A tf.int32 tensor of size [batch_size].
index_mapping: A tf.int32 tensor of size [batch_size, N] containing the
mapping from voxel indices to pooled voxel indices.
Raises:
ValueError: If pooling method is unknown.
ValueError: If batch size or feature dimensions are unknown at graph
construction time.
"""
batch_size = voxel_xyz_indices.get_shape().as_list()[0]
if batch_size is None:
raise ValueError("batch_size is unknown at graph construction time.")
feature_dims = voxel_features.get_shape().as_list()[2]
if feature_dims is None:
raise ValueError("Feature dimension is unknown at graph construction time.")
num_voxels = tf.shape(voxel_features)[1]
def _slice_valid_voxels(i, num_valid_voxels_i):
"""Slices valid voxels and indices."""
voxel_features_i = tf.slice(
voxel_features,
begin=[i, 0, 0],
size=[1, num_valid_voxels_i, feature_dims])
voxel_features_i = tf.squeeze(voxel_features_i, axis=0)
voxel_xyz_indices_i = tf.slice(
voxel_xyz_indices, begin=[i, 0, 0], size=[1, num_valid_voxels_i, 3])
voxel_xyz_indices_i = tf.squeeze(voxel_xyz_indices_i, axis=0)
return voxel_features_i, voxel_xyz_indices_i
def _voxel_pooling_unbatched(voxel_features, voxel_xyz_indices, pooling_size,
segment_func):
"""Pools voxel features.
Args:
voxel_features: A tf.float32 tensor of size [N, fd] where fd is the
feature size.
voxel_xyz_indices: A tf.int32 tensor of size [N, 3] containing the voxel
index in each of the x, y, z dimensions.
pooling_size: A tf.int32 tensor of size [3] containing the pooling size.
segment_func: A function defining the pooling method.
Returns:
pooled_voxel_features: A tf.float32 tensor of size [N', fd].
pooled_voxel_indices: A tf.int32 tensor of size [N', 3].
index_mapping: A tf.int32 tensor of size [N] containing the mapping from
voxel indices to pooled voxel indices.
Raises:
ValueError: If pooling method is unknown.
"""
pooled_xyz_indices, pooled_single_number_indices = (
compute_pooled_voxel_indices(
voxel_xyz_indices=voxel_xyz_indices, pooling_size=pooling_size))
(pooled_voxel_features, segment_ids,
num_segments) = pool_features_given_indices(
features=voxel_features,
indices=pooled_single_number_indices,
segment_func=segment_func)
# The original pooled_xyz_indices where zeroed out so it becomes easier to
# compute a single index number that uniquely maps to each x, y, z index.
# Here we add the pooled_xyz_indices_min back to pooled_xyz_indices to
# offset that.
pooled_voxel_indices = tf.math.unsorted_segment_max(
data=pooled_xyz_indices,
segment_ids=segment_ids,
num_segments=num_segments)
return pooled_voxel_features, pooled_voxel_indices, segment_ids
def _pad_pooled_voxels(pooled_voxel_features_i, pooled_voxel_indices_i,
index_mapping_i, num_valid_voxels_i, num_voxels):
"""Pad pooled voxels helper function."""
num_valid_pooled_voxels_i = tf.shape(pooled_voxel_features_i)[0]
pooled_voxel_features_i = tf.pad(
pooled_voxel_features_i,
paddings=[[0, num_voxels - num_valid_pooled_voxels_i], [0, 0]])
pooled_voxel_indices_i = tf.pad(
pooled_voxel_indices_i,
paddings=[[0, num_voxels - num_valid_pooled_voxels_i], [0, 0]])
index_mapping_i = tf.pad(
index_mapping_i, paddings=[[0, num_voxels - num_valid_voxels_i]])
return (pooled_voxel_features_i, pooled_voxel_indices_i,
num_valid_pooled_voxels_i, index_mapping_i)
def fn(i):
"""Map function."""
num_valid_voxels_i = num_valid_voxels[i]
voxel_features_i, voxel_xyz_indices_i = _slice_valid_voxels(
i=i, num_valid_voxels_i=num_valid_voxels_i)
(pooled_voxel_features_i,
pooled_voxel_indices_i,
index_mapping_i) = _voxel_pooling_unbatched(
voxel_features=voxel_features_i,
voxel_xyz_indices=voxel_xyz_indices_i,
pooling_size=pooling_size,
segment_func=segment_func)
return _pad_pooled_voxels(
pooled_voxel_features_i=pooled_voxel_features_i,
pooled_voxel_indices_i=pooled_voxel_indices_i,
index_mapping_i=index_mapping_i,
num_valid_voxels_i=num_valid_voxels_i,
num_voxels=num_voxels)
(pooled_voxel_features, pooled_voxel_indices, num_valid_pooled_voxels,
index_mapping) = tf.map_fn(
fn=fn,
elems=tf.range(batch_size),
dtype=(tf.float32, tf.int32, tf.int32, tf.int32))
# Maximum number of valid_pooled_voxels across the batch.
max_num_valid_pooled_voxels = tf.reduce_max(num_valid_pooled_voxels)
pooled_voxel_features = tf.slice(
pooled_voxel_features,
begin=[0, 0, 0],
size=[batch_size, max_num_valid_pooled_voxels, feature_dims])
pooled_voxel_indices = tf.slice(
pooled_voxel_indices,
begin=[0, 0, 0],
size=[batch_size, max_num_valid_pooled_voxels, 3])
return (pooled_voxel_features, pooled_voxel_indices, num_valid_pooled_voxels,
index_mapping)
def voxel_upsampling(pooled_voxel_features, index_mapping):
"""Upsamples voxel features.
Args:
pooled_voxel_features: A tf.float32 tensor of size [batch_size, N', fd].
index_mapping: A tf.int32 tensor of size [batch_size, N] containing the
mapping from the original voxel indices to pooled voxel indices.
Returns:
voxel_features: A tf.float32 tensor of size [batch_size, N, fd] where
fd is the feature size.
"""
return tf.gather(pooled_voxel_features, index_mapping, batch_dims=1)
class MaskedBatchNorm(tf.keras.layers.Layer):
"""Applies batch norm to only valid input features."""
def __init__(self):
super().__init__()
self.bn = tf.keras.layers.BatchNormalization()
self.input_spec = [
tf.keras.layers.InputSpec(shape=(None, None, None), dtype=tf.float32),
tf.keras.layers.InputSpec(shape=(None,), dtype=tf.int32)
]
def build(self, input_shapes):
"""Masked batch norm build function."""
voxel_features_shape = input_shapes[0]
self.batch_size = voxel_features_shape[0]
def call(self, inputs, training=None):
"""Masked batch norm call function.
Args:
inputs: A list of tensors containing
[voxel_features] A tf.float32 tensor of size [batch_size, N, fd] where
fd is the feature size.
[num_valid_voxels] A tf.int32 tensor of size [batch_size] containing the
number of valid voxels for each example in the batch.
training: If the layer is being executed in training mode (useful for
the traditional batch normalization layer inside).
Returns:
voxel_features after applying batch norm.
"""
if len(inputs) != 2:
raise ValueError("inputs should have a length of 2.")
voxel_features, num_valid_voxels = inputs
if num_valid_voxels is None:
return self.bn(voxel_features, training=training)
num_voxels = tf.shape(voxel_features)[1]
unpadded_features = []
for i in range(self.batch_size):
unpadded_features.append(voxel_features[i, 0:num_valid_voxels[i], :])
unpadded_features = tf.concat(unpadded_features, axis=0)
unpadded_features = self.bn(unpadded_features, training=training)
num_valid_voxels_cumsum = tf.math.cumsum(num_valid_voxels)
num_valid_voxels_cumsum = tf.concat([
tf.constant([0], dtype=num_valid_voxels_cumsum.dtype),
num_valid_voxels_cumsum
],
axis=0)
padded_features = []
for i in range(self.batch_size):
unpadded_features_i = unpadded_features[
num_valid_voxels_cumsum[i]:num_valid_voxels_cumsum[i + 1], :]
padded_features_i = tf.pad(
unpadded_features_i,
paddings=[[0, num_voxels - num_valid_voxels[i]], [0, 0]])
padded_features.append(padded_features_i)
return tf.stack(padded_features, axis=0)
class SparseConvBlock3D(tf.keras.layers.Layer):
"""Applies a series of 3d sparse convolutions to the voxel features."""
def __init__(self,
num_convolution_channels_list,
conv_filter_size=3,
use_batch_norm=True,
dropout_prob=0.0,
apply_relu_to_last_conv=True,
normalize_sparse_conv=True):
"""3D sparse conv block constructor.
The block contains a sequence of 3d sparse convolutions.
Args:
num_convolution_channels_list: A list that contains the number of output
channels of the convolutions in the block. The length of
the list identifies the number of convolutions in the block.
conv_filter_size: The 3d convultion filter size. The 3d sparse convolution
op is highly optimized for a filter of size 3.
use_batch_norm: If True, it will train with batch norm.
dropout_prob: Dropout probability.
apply_relu_to_last_conv: If True, will apply relu to the last convolution
of the block.
normalize_sparse_conv: If True, performs a convolution on the 0-1 voxel
occupancy grid and normalizes the sparse conv output with that.
"""
super().__init__()
self.num_convolution_channels_list = num_convolution_channels_list
self.num_convolutions = len(num_convolution_channels_list)
self.conv_filter_size = conv_filter_size
self.use_batch_norm = use_batch_norm
self.dropout_prob = dropout_prob
self.apply_relu_to_last_conv = apply_relu_to_last_conv
self.normalize_sparse_conv = normalize_sparse_conv
self.use_tpu = False
self.batch_norm_fns = []
for _ in range(self.num_convolutions):
self.batch_norm_fns.append(MaskedBatchNorm())
self.dropout_fn = tf.keras.layers.Dropout(dropout_prob)
self.input_spec = [
tf.keras.layers.InputSpec(shape=(None, None, None), dtype=tf.float32),
tf.keras.layers.InputSpec(shape=(None, None, 3), dtype=tf.int32),
tf.keras.layers.InputSpec(shape=(None,), dtype=tf.int32)
]
def build(self, input_shapes):
"""Building layer weights."""
if len(input_shapes) != 3:
raise ValueError("input_shapes should have a length of 3.")
voxel_features_shape = input_shapes[0]
self.batch_size = voxel_features_shape[0]
num_channels = voxel_features_shape[2]
self.ws = []
self.normalizer_ws = []
for i in range(self.num_convolutions):
self.ws.append(
self.add_weight(
shape=(self.conv_filter_size, self.conv_filter_size,
self.conv_filter_size, num_channels,
self.num_convolution_channels_list[i]),
name=("conv_kernel_{}".format(i)),
initializer="random_uniform",
trainable=True))
num_channels = self.num_convolution_channels_list[i]
if self.normalize_sparse_conv:
self.normalizer_ws.append(
self.add_weight(
shape=(self.conv_filter_size, self.conv_filter_size,
self.conv_filter_size, 1, 1),
name=("normalizer_conv_kernel_{}".format(i)),
initializer="random_uniform",
trainable=True))
def call(self, inputs, training=None):
"""3D sparse conv block call function.
Args:
inputs: A list of tensors containing
[voxel_features] A tf.float32 tensor of size [batch_size, N, fd] where
fd is the feature size.
[voxel_xyz_indices] A tf.int32 tensor of size [batch_size, N, 3]
containing the voxel index in each of the x, y, z dimensions.
[num_valid_voxels] A tf.int32 tensor of size [batch_size] containing the
number of valid voxels for each example in the batch.
training: If the layer is being executed in training mode (useful for
the traditional batch normalization layer inside).
Returns:
convolved voxel features of size [batch_size, N, fd'] where fd' is the
number of output channels of last convolution in the block.
"""
if len(inputs) != 3:
raise ValueError("inputs should have a length of 3.")
voxel_features, voxel_xyz_indices, num_valid_voxels = inputs
num_voxels = tf.shape(voxel_features)[1]
rules = None
net = voxel_features
for i in range(self.num_convolutions):
if rules is None: # using external implementation
net = sparse_conv_ops.submanifold_sparse_conv3d(voxel_xyz_indices,
num_valid_voxels, net,
self.ws[i])
if self.normalize_sparse_conv:
net_normalizer = tf.ones(
tf.stack([self.batch_size, num_voxels, 1]), dtype=tf.float32)
if rules is None: # using external implementation
net_normalizer = sparse_conv_ops.submanifold_sparse_conv3d(
voxel_xyz_indices, num_valid_voxels, net_normalizer,
self.normalizer_ws[i])
net = tf.math.truediv(net, 1.0 + tf.math.abs(net_normalizer))
if self.use_batch_norm:
net = self.batch_norm_fns[i]([net, num_valid_voxels], training)
if self.apply_relu_to_last_conv or i < (self.num_convolutions - 1):
net = tf.nn.relu(net)
if self.dropout_prob > 0:
net = self.dropout_fn(net)
return net
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
e21d1e7bd1e31a11a1e66fd19884aee67580be4b | 5a4436884af5341ce855c0e84866b972a0f61c05 | /day4/classes/extras/syntax/5.py | 12b982042e027fe646d8e1ee85c066ce603ac8af | [] | no_license | sreejithev/pythoncodes | 74a420c4f025b893e27f17ba85632a4a096f17fd | 70df14871a9687916d1c4ada76c055607f13e8ce | refs/heads/master | 2021-01-21T20:59:47.056167 | 2017-06-19T09:43:17 | 2017-06-19T09:43:17 | 92,292,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | class foo:
x = 1
f = foo()
f.x = 10
print foo.x
| [
"sreejithevwyd@gmail.com"
] | sreejithevwyd@gmail.com |
652711257a3cd0e0ebee83693a0c9cef26803857 | a6390e0bbd5a7070c0abd2504afecc8ef6028997 | /indigo/nn/input.py | 4563f18522af8de264df296082c45aff9c167847 | [] | no_license | mlberkeley/indigo | 2f287a82c939a9d0adc41db23e59ae777fc88466 | c155b16265f13d87be0108fcf815517491b93a74 | refs/heads/master | 2021-06-14T01:21:06.702854 | 2020-05-22T08:10:47 | 2020-05-22T08:10:47 | 254,468,158 | 4 | 0 | null | 2020-04-09T20:08:16 | 2020-04-09T20:08:16 | null | UTF-8 | Python | false | false | 3,142 | py | from dataclasses import dataclass
from typing import Any
import tensorflow as tf
@dataclass
class AttentionInput(object):
"""Fields of a data class for computing multihead attention
in indigo.variables.attention.Attention
Arguments:
queries: tf.Tensor
the Queries tensor in a multihead attention mechanism
see 'Attention Is All You Need'
keys: tf.Tensor
the Keys tensor in a multihead attention mechanism
see 'Attention Is All You Need'
values: tf.Tensor
the Values tensor in a multihead attention mechanism
see 'Attention Is All You Need'
queries_mask: tf.Tensor
a boolean mask for the Queries tensor
in a multihead attention mechanism
values_mask: tf.Tensor
a boolean mask for the Keys and Values tensor
in a multihead attention mechanism
_keras_mask: tf.Tensor
a required placeholder for tf.layers.Sequential"""
# these are required for the network
queries: Any = None
keys: Any = None
values: Any = None
# if left unassigned these will not mask anything
queries_mask: Any = tf.constant([[True]])
values_mask: Any = tf.constant([[True]])
# this does not need to be set during construction
_keras_mask: Any = None
@dataclass
class TransformerInput(object):
"""Fields of a data class for computing multihead attention
in indigo.transformer.Transformer
Arguments:
queries: tf.Tensor
the Queries tensor in a multihead attention mechanism
see 'Attention Is All You Need'
values: tf.Tensor
the Keys and Values tensor in a multihead attention mechanism
see 'Attention Is All You Need'
queries_mask: tf.Tensor
a boolean mask for the Queries tensor
in a multihead attention mechanism
values_mask: tf.Tensor
a boolean mask for the Keys and Values tensor
in a multihead attention mechanism
_keras_mask: tf.Tensor
a required placeholder for tf.layers.Sequential"""
# these are required for the network
queries: Any = None
values: Any = None
# if left unassigned these will not mask anything
queries_mask: Any = tf.constant([[True]])
values_mask: Any = tf.constant([[True]])
# this does not need to be set during construction
_keras_mask: Any = None
@dataclass
class RegionFeatureInput(object):
"""Fields of a data class for computing multihead attention
in indigo.transformer.Transformer
Arguments:
features: tf.Tensor
the Keys and Values tensor in a multihead attention mechanism
see 'Attention Is All You Need'
boxes: tf.Tensor
the Keys and Values tensor in a multihead attention mechanism
see 'Attention Is All You Need'
detections: tf.Tensor
the Keys and Values tensor in a multihead attention mechanism
see 'Attention Is All You Need'"""
# these are required for the network
features: Any = None
boxes: Any = None
detections: Any = None
| [
"brandon@btrabucco.com"
] | brandon@btrabucco.com |
89a494a1ece606add63023592b124bfaf796cc21 | ebec8b55938903f97f66bc3629ce73db177b8bcc | /ultimatewebsite/members/forms.py | 2a879513f03f4cc84b77452c291fb16db54bd212 | [
"MIT"
] | permissive | NischalLal/class-ultimate-classof2020 | 284788f87c95e4889b10c2f9072c8e16daf15c4d | c069dc7211a640267e35c2e956ad9440a03e1ab8 | refs/heads/master | 2021-04-25T23:30:12.434127 | 2017-10-17T08:27:53 | 2017-10-17T08:27:53 | 107,240,286 | 1 | 0 | MIT | 2020-10-04T05:44:04 | 2017-10-17T08:32:21 | CSS | UTF-8 | Python | false | false | 2,464 | py | from django import forms
from members.models import Member
class MemberForm(forms.ModelForm):
favourite_quote = forms.CharField(
widget = forms.Textarea)
class Meta:
model = Member
fields = ('full_name', 'image', 'phone_number', 'email', 'hometown',
'favourite_quote', 'bio', 'your_website', 'facebook_url', 'twitter_url',
'github_url', 'instagram_url')
def clean_full_name(self):
full_name = self.cleaned_data.get('full_name')
length = len(full_name)
if length <= 3 or length >=30:
raise forms.ValidationError("WoW, Your Name is So Boring!!message")
return full_name
def clean_phone_number(self):
phone_number = self.cleaned_data.get('phone_number')
if len(phone_number) != 10 or not phone_number.startswith('9'):
raise forms.ValidationError("Sorry! We Cannot Accept This SHIT!!")
return phone_number
def clean_email(self):
email = self.cleaned_data.get('email')
if not '@' in email or '@.' in email:
raise forms.ValidationError("ERROR AT ITS BEST")
elif not '.' in email:
raise forms.ValidationError("Something Missing E.G '.com', '.edu', '.me', '.org'")
return email
def clean_facebook_url(self):
facebook_url = self.cleaned_data.get('facebook_url')
if facebook_url is not None:
if not 'facebook.com' in facebook_url:
raise forms.ValidationError("We don't think this is a facebook URL")
return facebook_url
def clean_twitter_url(self):
twitter_url = self.cleaned_data.get('twitter_url')
if twitter_url is not None:
if not 'twitter.com' in twitter_url:
raise forms.ValidationError("We don't think this is a twitter URL")
return twitter_url
def clean_instagram_url(self):
instagram_url = self.cleaned_data.get('instagram_url')
if instagram_url is not None:
if not 'instagram.com' in instagram_url:
raise forms.ValidationError("We don't think this is a instagram URL")
return instagram_url
def clean_github_url(self):
github_url = self.cleaned_data.get('github_url')
if github_url is not None:
if not 'github.com' in github_url:
raise forms.ValidationError("We don't think this is a Github URL")
return github_url | [
"aakrist666@gmail.com"
] | aakrist666@gmail.com |
e60f622fd08209dd0fccd070ecab295d750160cd | 03ec2daac0989f9b6936b1e87d8ca1b0d99f1bce | /optfn/plastic_linear.py | 58257991529ef71676ed0cdc4dd2f255adf46f1a | [] | no_license | SSS135/optfn | f7364dce8c1857baa90d2d6564316762c574a9ba | 48ae4f5439daa89ac54921a7642e612838c724eb | refs/heads/master | 2020-05-29T15:21:38.827291 | 2020-04-29T17:51:09 | 2020-04-29T17:51:09 | 189,217,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,014 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class PlasticLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, single_plastic_lr=False, initial_plastic_lr=0.1, oja_rule=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.single_plastic_lr = single_plastic_lr
self.initial_plastic_lr = initial_plastic_lr
self.oja_rule = oja_rule
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.plastic_scale = nn.Parameter(torch.Tensor(out_features, in_features))
self.plastic_lr = nn.Parameter(torch.Tensor(1) if single_plastic_lr else torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.plastic_scale.data.uniform_(-stdv, stdv)
if self.single_plastic_lr:
self.plastic_lr.data.fill_(self.initial_plastic_lr)
else:
self.plastic_lr.data.uniform_(min(self.initial_plastic_lr, 1e-6), self.initial_plastic_lr)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input: torch.Tensor, hebb: torch.Tensor):
if hebb is None:
hebb = input.new_zeros((input.shape[0], self.out_features, self.in_features))
out = input.unsqueeze(-2) @ (self.weight.unsqueeze(0) + self.plastic_scale.unsqueeze(0) * hebb).transpose(-1, -2)
out = out.squeeze(-2)
uin, uout = input.unsqueeze(-2), out.unsqueeze(-1)
if self.oja_rule:
hebb = hebb + self.plastic_lr * uout * (uin - uout * hebb)
else:
hebb = self.plastic_lr * uin * uout + (1 - self.plastic_lr) * hebb
if self.bias is not None:
out = out + self.bias
return out, hebb
class PlasticLinearRec(nn.Module):
def __init__(self, num_features, single_plastic_lr=True, initial_plastic_lr=0.01, oja_rule=True):
super().__init__()
self.num_features = num_features
self.single_plastic_lr = single_plastic_lr
self.initial_plastic_lr = initial_plastic_lr
self.oja_rule = oja_rule
self.weight = nn.Parameter(torch.Tensor(num_features, num_features))
self.plastic_scale = nn.Parameter(torch.Tensor(num_features, num_features))
self.plastic_lr = nn.Parameter(torch.Tensor(1) if single_plastic_lr else torch.Tensor(num_features, num_features))
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.plastic_scale.data.uniform_(-stdv, stdv)
self.plastic_scale.data -= torch.diag(torch.diag(self.plastic_scale.data))
if self.single_plastic_lr:
self.plastic_lr.data.fill_(self.initial_plastic_lr)
else:
self.plastic_lr.data.uniform_(min(self.initial_plastic_lr, 1e-6), self.initial_plastic_lr)
def forward(self, input: torch.Tensor, memory):
if memory is None:
last_out, hebb = input.new_zeros((input.shape[0], self.num_features)), \
input.new_zeros((input.shape[0], self.num_features, self.num_features))
else:
last_out, hebb = memory
out = last_out.unsqueeze(-2) @ (self.weight.unsqueeze(0) + self.plastic_scale.unsqueeze(0) * hebb).transpose(-1, -2)
out = F.tanh(out.squeeze(-2) + input)
uin, uout = last_out.unsqueeze(-2), out.unsqueeze(-1)
if self.oja_rule:
hebb = hebb + self.plastic_lr * uout * (uin - uout * hebb)
else:
hebb = self.plastic_lr * uin * uout + (1 - self.plastic_lr) * hebb
return out, (out, hebb) | [
"sss13594@gmail.com"
] | sss13594@gmail.com |
01c8f82a0e2570725b639af3a837aed5c0198892 | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/gui/HudElements/ForestallingPoint.py | 79091f51fde6a17f9469bf38b316f591b2728d8a | [] | no_license | SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | # Embedded file name: scripts/client/gui/HudElements/ForestallingPoint.py
import BigWorld
import GUI
from consts import *
from gui.HUDconsts import *
from EntityHelpers import isAvatar, isTeamObject
class ForestallingPoint:
def __init__(self, offsetMtx):
self.__offsetMtx = offsetMtx
self.__centerPointOffsetMtx = GUI.OffsetMp()
self.__inited = False
self.__matrixProvider = None
return
def setTarget(self, entity):
if not self.__inited:
self.__createTarget()
if entity is not None and isAvatar(entity):
self.__matrixProvider.target = entity.matrix
self.__deflectionTarget(entity)
self.__offsetMtx.target = self.__matrixProvider
self.__centerPointOffsetMtx.target = self.__matrixProvider
if COLLISION_RECORDER:
self.__matrixProvider.targetEntity = entity
else:
self.__matrixProvider.target = None
self.__deflectionTarget(None)
if entity is not None and TEAM_OBJECT_PARALLAX_ENABLED and isTeamObject(entity):
self.__offsetMtx.target = entity.matrix
self.__centerPointOffsetMtx.target = entity.matrix
else:
self.__offsetMtx.target = None
self.__centerPointOffsetMtx.target = None
if COLLISION_RECORDER:
self.__matrixProvider.targetEntity = None
return
def setBulletSpeed(self, bulletSpeed):
if not self.__inited:
self.__createTarget()
self.__matrixProvider.bulletSpeed = bulletSpeed
def destroy(self):
self.__inited = False
self.__matrixProvider = None
self.__offsetMtx.target = None
self.__offsetMtx = None
self.__centerPointOffsetMtx.target = None
self.__centerPointOffsetMtx = None
return
def __deflectionTarget(self, entity):
BigWorld.player().deflectionTargetsInProgress += 1
BigWorld.player().cell.setDeflectionTarget(entity.id if entity is not None else 0)
return
def __createTarget(self):
self.__matrixProvider = GUI.ForestallingMp()
self.__matrixProvider.source = BigWorld.player().fakeRealMatrix
self.__matrixProvider.target = None
self.__matrixProvider.offset = self.__offsetMtx
if COLLISION_RECORDER:
self.__matrixProvider.sourceEntity = BigWorld.player()
self.__matrixProvider.targetEntity = None
self.__inited = True
return | [
"55k@outlook.com"
] | 55k@outlook.com |
aeb2278cba6e6f5ab83eeea7a5279fdb438b5902 | b66e70a8bb3c53595acd01dceb23298694884b67 | /cloudy/settings/base.py | e14d1c5960cb86cbbfc54a2d682480988098d21e | [] | no_license | flupke/cloudy-release | d7735a38d79f816c52da3d983c714512a32919b1 | 6b160188a7067f125b107eb68dc8db4bbb4bfdf4 | refs/heads/master | 2016-09-06T05:23:40.856287 | 2013-02-23T18:17:16 | 2013-02-23T18:17:16 | 8,377,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,892 | py | import os.path as op
ROOT_DIR = op.abspath(op.join(op.dirname(__file__), '..'))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = op.join(ROOT_DIR, '..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
op.join(ROOT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h)q^_kvx$sg+%e%=lg^a+q1!z9a5-1x%vky5*76_j-_wx7am-m'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cloudy.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'cloudy.wsgi.application'
TEMPLATE_DIRS = (
op.join(ROOT_DIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from .local import *
except ImportError:
pass
| [
"luper.rouch@gmail.com"
] | luper.rouch@gmail.com |
808a9f59c4c857fb35b4ea766f82d0994b5016fd | bf1e6aa6ee7687363427c87b7e5bef1d157410fc | /backend/chat/api/v1/serializers.py | 4a6ada475bba6fc32ca69b58440701201883d804 | [] | no_license | crowdbotics-apps/chatme-27551 | 8cee7a31badc71575ba7c1a31c9d8638dadacd08 | 9c10dd5c0413ab2e10ffde4c8b90f650464cfa45 | refs/heads/master | 2023-05-04T12:18:31.110095 | 2021-05-29T10:40:55 | 2021-05-29T10:40:55 | 371,938,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | from rest_framework import serializers
from chat.models import (
MessageAction,
ThreadMember,
Thread,
Message,
ThreadAction,
ForwardedMessage,
)
class ForwardedMessageSerializer(serializers.ModelSerializer):
class Meta:
model = ForwardedMessage
fields = "__all__"
class ThreadMemberSerializer(serializers.ModelSerializer):
class Meta:
model = ThreadMember
fields = "__all__"
class ThreadActionSerializer(serializers.ModelSerializer):
class Meta:
model = ThreadAction
fields = "__all__"
class MessageActionSerializer(serializers.ModelSerializer):
class Meta:
model = MessageAction
fields = "__all__"
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = "__all__"
class ThreadSerializer(serializers.ModelSerializer):
class Meta:
model = Thread
fields = "__all__"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
352b6484d41dbe8b4caa795e4a050a3a6b7cc7aa | 553b34a101c54090e68f540d96369ac7d5774d95 | /python/python_koans/python2/koans/about_new_style_classes.py | 99f39683a5ef5d804c5ed0b6d1160ab2d6b98939 | [
"MIT"
] | permissive | topliceanu/learn | fd124e1885b5c0bfea8587510b5eab79da629099 | 1c5b1433c3d6bfd834df35dee08607fcbdd9f4e3 | refs/heads/master | 2022-07-16T19:50:40.939933 | 2022-06-12T15:40:20 | 2022-06-12T15:40:20 | 21,684,180 | 26 | 12 | MIT | 2020-03-26T20:51:35 | 2014-07-10T07:22:17 | JavaScript | UTF-8 | Python | false | false | 2,466 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"An old style class"
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"A new style class"
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(True, issubclass(self.NewStyleClass, object))
self.assertEqual(False, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(2, len(dir(self.OldStyleClass)))
self.assertEqual("An old style class", self.OldStyleClass.__doc__)
self.assertEqual('koans.about_new_style_classes', self.OldStyleClass.__module__)
self.assertEqual(18, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual('classobj', type(self.OldStyleClass).__name__)
try:
cls = self.OldStyleClass.__class__.__name__
except Exception as ex:
pass
# What was that error message from the exception?
self.assertMatch("class OldStyleClass has no attribute '__class__'", ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(type(self.NewStyleClass), self.NewStyleClass.__class__)
self.assertEqual(True,
type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual('OldStyleClass', old_style.__class__.__name__)
self.assertEqual('instance', type(old_style).__name__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual('NewStyleClass', new_style.__class__.__name__)
self.assertEqual(True, type(new_style) == new_style.__class__)
| [
"alexandru.topliceanu@gmail.com"
] | alexandru.topliceanu@gmail.com |
6f5712a2576ccb525f3600b7f0802178c4a366d1 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/machine_types/flags.py | 7446d105ac181ce93490efa9f4adda4468ca2080 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 1,181 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for the compute machine-types commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags as compute_flags
def MakeMachineTypeArg():
return compute_flags.ResourceArgument(
resource_name='machine type',
completer=completers.MachineTypesCompleter,
zonal_collection='compute.machineTypes',
zone_explanation=compute_flags.ZONE_PROPERTY_EXPLANATION)
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
c149c43ee2040d9c4aa7f7048a612c1e6297266f | 83280aa17b415138a6f55edf9cedfdd9a45916a2 | /src/stochastic_review/cli.py | 3f05a48e626b05227ae1806c286b518dadf3e258 | [] | no_license | RubenBranco/Stochastic-Continuous-Review | dfea2b55b7c8f0d41eac289b76b1d28ced31d0ce | 9015ebe6adfc6b812cb83c2b9df06fd3d20b6fcc | refs/heads/master | 2020-05-01T12:31:50.531958 | 2019-04-28T16:50:38 | 2019-04-28T16:50:38 | 177,467,859 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | from bullet import SlidePrompt, Bullet, Numbers
DISTRIBUTIONS = [
'Uniform',
'Normal',
]
cli = SlidePrompt(
[
Bullet(prompt="Choose the distribution(Y)", choices=DISTRIBUTIONS),
Numbers(prompt="Distribution mean(μ) / Starting point(a): ", type=float),
Numbers(prompt="Distribution standard deviation(σ) / End point(b): ", type=float),
Numbers(prompt="Delivery time(l): ", type=float),
Numbers(prompt="Fixed cost of the order(A): ", type=float),
Numbers(prompt="Unitary item cost(c): ", type=float),
Numbers(prompt="Storage cost per item per timestep(h): ", type=float),
Numbers(prompt="Out of stock cost per item(p'): ", type=float),
Numbers(prompt="Stopping rate of change(ε): ", type=float),
]
)
def get_args_from_cli(cli_obj):
args = cli_obj.launch()
return dict(
distribution=args[0][1],
mean=args[1][1],
std_deviation=args[2][1],
delivery_time=args[3][1],
order_cost=args[4][1],
unit_cost=args[5][1],
storage_cost=args[6][1],
out_of_stock=args[7][1],
stop_crit=args[8][1],
)
| [
"ruben.branco@outlook.pt"
] | ruben.branco@outlook.pt |
993d08a77bcc43f54ee71a7c8ec0e59ae70641c2 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/api/nn/test_hardsigmoid.py | f591329d91524bb078a0d9157a65d000a22cedf6 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 1,588 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test_hardsigmoid
"""
from apibase import APIBase
from apibase import randtool
import paddle
import pytest
import numpy as np
class TestHardsigmoid(APIBase):
"""
test
"""
def hook(self):
"""
implement
"""
self.types = [np.float32, np.float64]
# self.debug = True
# self.static = True
# enable check grad
self.delta = 1e-1
# self.enable_backward = True
obj = TestHardsigmoid(paddle.nn.Hardsigmoid)
@pytest.mark.api_nn_Hardsigmoid_vartype
def test_hardsigmoid_base():
"""
base
"""
x = randtool("float", -10, 10, [2, 2])
res = []
for i in range(len(x.flatten())):
if x.flatten()[i] <= -3:
res.append(0)
elif x.flatten()[i] >= 3:
res.append(1)
else:
res.append(x.flatten()[i] / 6 + 0.5)
res = np.array(res).reshape(x.shape)
# print(res)
obj.base(res=res, data=x)
@pytest.mark.api_nn_Hardsigmoid_parameters
def test_hardsigmoid():
"""
x = [[3, 3, 3], [-5, 0, 5], [-3, -3, -3]]
"""
x = np.array([[3, 3, 3], [-5, 0, 5], [-3, -3, -3]]).astype(np.float32)
# print(x)
res = []
for i in range(len(x.flatten())):
if x.flatten()[i] <= -3:
res.append(0)
elif x.flatten()[i] >= 3:
res.append(1)
else:
res.append(x.flatten()[i] / 6 + 0.5)
res = np.array(res).reshape(x.shape)
# print(res)
obj.run(res=res, data=x)
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
6973cb6fad77d9866816625849943c3739ebab02 | 45870a80cbe343efe95eb9e8d0bd47c8c88353d1 | /特殊的函数/venv/Lib/site-packages/tensorflow/tools/api/generator/api/gfile/__init__.py | d15998ef1791b0b3d2441ba82e50f147a68d311d | [] | no_license | pippichi/IntelliJ_PYTHON | 3af7fbb2c8a3c2ff4c44e66736bbfb7aed51fe88 | 0bc6ded6fb5b5d9450920e4ed5e90a2b82eae7ca | refs/heads/master | 2021-07-10T09:53:01.264372 | 2020-07-09T13:19:41 | 2020-07-09T13:19:41 | 159,319,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | """Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.lib.io.file_io import copy as Copy
from tensorflow.python.lib.io.file_io import create_dir as MkDir
from tensorflow.python.lib.io.file_io import delete_file as Remove
from tensorflow.python.lib.io.file_io import delete_recursively as DeleteRecursively
from tensorflow.python.lib.io.file_io import file_exists as Exists
from tensorflow.python.lib.io.file_io import get_matching_files as Glob
from tensorflow.python.lib.io.file_io import is_directory as IsDirectory
from tensorflow.python.lib.io.file_io import list_directory as ListDirectory
from tensorflow.python.lib.io.file_io import recursive_create_dir as MakeDirs
from tensorflow.python.lib.io.file_io import rename as Rename
from tensorflow.python.lib.io.file_io import stat as Stat
from tensorflow.python.lib.io.file_io import walk as Walk
from tensorflow.python.platform.gfile import FastGFile
from tensorflow.python.platform.gfile import GFile
from tensorflow.python.platform.gfile import GFile as Open | [
"874496049@qq.com"
] | 874496049@qq.com |
dab2470950559095359d56daae0b4daa36f036d0 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_write_1/vpc-endpoint_modify.py | 30909794ef3620c2cf2d203628e316109fd9fca1 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-vpc-endpoint.html
if __name__ == '__main__':
"""
create-vpc-endpoint : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-vpc-endpoint.html
delete-vpc-endpoints : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-vpc-endpoints.html
describe-vpc-endpoints : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-vpc-endpoints.html
"""
parameter_display_string = """
# vpc-endpoint-id : The ID of the endpoint.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "modify-vpc-endpoint", "vpc-endpoint-id", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
80135a8618b6f09f9c83cb98b5780e5a7adbdbb3 | 479924396152c6e6fe3de74fd46b16fbd8edeeec | /gui/qt/masternode_widgets.py | 84918a40f4bb5aec15b7c8c649122478a9255084 | [
"MIT"
] | permissive | pookieq/electrum-axe | 09d4601e97cbbd07ff69fb7369f8451f75f09b7f | a59028af29998bd91380d5fb46a062f6cac0a4af | refs/heads/master | 2021-09-09T09:46:53.164068 | 2018-03-14T21:20:54 | 2018-03-14T21:20:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,951 | py | """Masternode-related widgets."""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_axe import bitcoin
from electrum_axe.bitcoin import COIN
from electrum_axe.i18n import _
from electrum_axe.masternode import NetworkAddress, MasternodeAnnounce
import util
def masternode_status(status):
"""Get a human-friendly representation of status.
Returns a 3-tuple of (enabled, one_word_description, description).
"""
statuses = {
'PRE_ENABLED': (True, _('Enabling'), _('Waiting for masternode to enable itself.')),
'ENABLED': (True, _('Enabled'), _('Masternode is enabled.')),
'EXPIRED': (False, _('Disabled'), _('Masternode failed to ping the network and was disabled.')),
'VIN_SPENT': (False, _('Disabled'), _('Collateral payment has been spent.')),
'REMOVE': (False, _('Disabled'), _('Masternode failed to ping the network and was disabled.')),
}
if statuses.get(status):
return statuses[status]
elif status is False:
return (False, _('N/A'), _('Masternode has not been seen on the network.'))
return (False, _('Unknown'), _('Unknown masternode status.'))
class NetworkAddressWidget(QWidget):
"""Widget that represents a network address."""
def __init__(self, parent=None):
super(NetworkAddressWidget, self).__init__(parent)
self.ip_edit = QLineEdit()
self.port_edit = QSpinBox()
self.port_edit.setRange(0, 99999)
hbox = QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(QLabel(_('IP:')))
hbox.addWidget(self.ip_edit, stretch=1)
hbox.addWidget(QLabel(_('Port:')))
hbox.addWidget(self.port_edit, stretch=1)
self.setLayout(hbox)
@pyqtProperty(str)
def string(self):
return '%s:%d' % (str(self.ip_edit.text()), self.port_edit.value())
@string.setter
def string(self, value):
s = str(value).split(':')
ip = ''
port = 0
if len(s) > 0:
ip = s[0]
if len(s) > 1:
port = int(s[1])
self.ip_edit.setText(ip)
self.port_edit.setValue(port)
def get_addr(self):
"""Get a NetworkAddress instance from this widget's data."""
ip = str(self.ip_edit.text())
port = self.port_edit.value()
if self.validate_ip(ip):
return NetworkAddress(ip=ip, port=port)
return NetworkAddress()
# TODO IPv6 support.
def validate_ip(self, s):
try:
ip = s.split('.')
if len(ip) != 4:
raise Exception('Invalid length')
for i in ip:
if int(i) < 0 or int(i) > 255:
raise ValueError('Invalid IP byte')
except Exception:
return False
return True
class PrevOutWidget(QWidget):
"""Widget that represents a previous outpoint."""
def __init__(self, parent=None):
super(PrevOutWidget, self).__init__(parent)
self.vin = {}
self.hash_edit = QLineEdit()
self.hash_edit.setPlaceholderText(_('The TxID of your 1000 AXE output'))
self.index_edit = QLineEdit()
self.index_edit.setPlaceholderText(_('The output number of your 1000 AXE output'))
self.address_edit = QLineEdit()
self.address_edit.setPlaceholderText(_('The address that 1000 AXE was sent to'))
# Collection of fields so that it's easier to act on them all at once.
self.fields = (self.hash_edit, self.index_edit, self.address_edit)
for i in self.fields:
i.setFont(QFont(util.MONOSPACE_FONT))
form = QFormLayout()
form.setContentsMargins(0, 0, 0, 0)
form.addRow(_('TxID:'), self.hash_edit)
form.addRow(_('Output Index:'), self.index_edit)
form.addRow(_('Address:'), self.address_edit)
self.setLayout(form)
@pyqtProperty(str)
def string(self):
return self.get_str()
@string.setter
def string(self, value):
return self.set_str(str(value))
def get_str(self):
values = [str(self.hash_edit.text()), str(self.index_edit.text()), str(self.address_edit.text())]
values.append(str(self.vin.get('value', '')))
values.append(self.vin.get('scriptSig', ''))
return ':'.join(values)
def set_str(self, value):
s = str(value).split(':')
values = []
try:
values.append(('prevout_hash', s[0]))
values.append(('prevout_n', int(s[1])))
values.append(('address', s[2]))
values.append(('value', int(s[3])))
values.append(('scriptSig', s[4]))
# Don't fail if not all values are present.
except (IndexError, ValueError):
pass
vin = {k: v for k, v in values}
self.set_dict(vin)
def get_dict(self):
d = {}
txid = str(self.hash_edit.text())
if not txid:
return d
index = str(self.index_edit.text())
if not index:
index = '0'
address = str(self.address_edit.text())
d['prevout_hash'] = txid
d['prevout_n'] = int(index)
d['address'] = address
if self.vin:
d['value'] = int(self.vin.get('value', '0'))
d['scriptSig'] = self.vin.get('scriptSig', '')
return d
def set_dict(self, d):
self.hash_edit.setText(d.get('prevout_hash', ''))
self.index_edit.setText(str(d.get('prevout_n', '')))
self.address_edit.setText(d.get('address', ''))
self.vin = dict(d)
def clear(self):
for widget in self.fields:
widget.clear()
self.vin = {}
def setReadOnly(self, isreadonly):
for widget in self.fields:
widget.setReadOnly(isreadonly)
class MasternodeEditor(QWidget):
"""Editor for masternodes."""
def __init__(self, parent=None):
super(MasternodeEditor, self).__init__(parent)
self.alias_edit = QLineEdit()
self.alias_edit.setPlaceholderText(_('Enter a name for this masternode'))
self.vin_edit = PrevOutWidget()
self.addr_edit = NetworkAddressWidget()
self.delegate_key_edit = QLineEdit()
self.delegate_key_edit.setFont(QFont(util.MONOSPACE_FONT))
self.delegate_key_edit.setPlaceholderText(_('Your masternode\'s private key'))
self.protocol_version_edit = QLineEdit()
self.protocol_version_edit.setText('70201')
self.status_edit = QLineEdit()
self.status_edit.setPlaceholderText(_('Masternode status'))
self.status_edit.setReadOnly(True)
form = QFormLayout()
form.addRow(_('Alias:'), self.alias_edit)
form.addRow(_('Status:'), self.status_edit)
form.addRow(_('Collateral AXE Output:'), self.vin_edit)
form.addRow(_('Masternode Private Key:'), self.delegate_key_edit)
form.addRow(_('Address:'), self.addr_edit)
form.addRow(_('Protocol Version:'), self.protocol_version_edit)
self.setLayout(form)
def get_masternode_args(self):
"""Get MasternodeAnnounce keyword args from this widget's data."""
kwargs = {}
kwargs['alias'] = str(self.alias_edit.text())
kwargs['vin'] = self.vin_edit.get_dict()
kwargs['addr'] = self.addr_edit.get_addr()
protocol_version = str(self.protocol_version_edit.text())
if protocol_version:
kwargs['protocol_version'] = int(protocol_version)
return kwargs
class MasternodeOutputsWidget(QListWidget):
"""Widget that displays available masternode outputs."""
outputSelected = pyqtSignal(dict, name='outputSelected')
def __init__(self, parent=None):
super(MasternodeOutputsWidget, self).__init__(parent)
self.outputs = {}
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.selectionModel().selectionChanged.connect(self.on_selection_changed)
def sizeHint(self):
return QSize(256, 60)
def add_output(self, d):
"""Add a valid output."""
label = '%s:%s' % (d['prevout_hash'], d['prevout_n'])
self.outputs[label] = d
item = QListWidgetItem(label)
item.setFont(QFont(util.MONOSPACE_FONT))
self.addItem(item)
def add_outputs(self, outputs):
map(self.add_output, outputs)
self.setCurrentRow(0)
def clear(self):
super(MasternodeOutputsWidget, self).clear()
self.outputs.clear()
def on_selection_changed(self, selected, deselected):
"""Emit the selected output."""
items = self.selectedItems()
if not items:
return
self.outputSelected.emit(self.outputs[str(items[0].text())])
class MasternodeOutputsTab(QWidget):
"""Widget that is used to select a masternode output."""
def __init__(self, parent):
super(MasternodeOutputsTab, self).__init__(parent)
self.dialog = parent
self.manager = parent.manager
include_frozen_checkbox = QCheckBox(_('Include frozen addresses'))
include_frozen_checkbox.setChecked(False)
self.scan_outputs_button = QPushButton(_('Scan For Masternode Outputs'))
def on_scan_outputs():
"""Call scan_for_outputs() with whether to include frozen addresses."""
self.scan_for_outputs(include_frozen_checkbox.isChecked())
self.scan_outputs_button.clicked.connect(on_scan_outputs)
self.status_edit = QLineEdit()
self.status_edit.setReadOnly(True)
self.valid_outputs_list = MasternodeOutputsWidget()
self.valid_outputs_list.outputSelected.connect(self.set_output)
self.collateral_edit = PrevOutWidget()
self.collateral_edit.setReadOnly(True)
self.mapper = QDataWidgetMapper()
self.mapper.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)
self.mapper.setModel(self.dialog.masternodes_widget.proxy_model)
model = self.dialog.masternodes_widget.model
self.mapper.addMapping(self.collateral_edit, model.VIN, 'string')
self.save_output_button = QPushButton(_('Save'))
self.save_output_button.setEnabled(False)
self.save_output_button.clicked.connect(self.save_output)
vbox = QVBoxLayout()
desc = ' '.join(['Use this tab to scan for and choose a collateral payment for your masternode.',
'A valid collateral payment is exactly 1000 AXE.'])
desc = QLabel(_(desc))
desc.setWordWrap(True)
vbox.addWidget(desc)
status_box = QHBoxLayout()
status_box.setContentsMargins(0, 0, 0, 0)
status_box.addWidget(QLabel(_('Status:')))
status_box.addWidget(self.status_edit, stretch=1)
vbox.addLayout(status_box)
valid_outputs_box = QVBoxLayout()
valid_outputs_box.setContentsMargins(0, 0, 0, 0)
valid_outputs_box.addWidget(QLabel(_('Masternode Outputs:')))
valid_outputs_box.addWidget(self.valid_outputs_list)
vbox.addLayout(util.Buttons(include_frozen_checkbox, self.scan_outputs_button))
vbox.addLayout(valid_outputs_box)
vbox.addWidget(self.collateral_edit)
vbox.addLayout(util.Buttons(self.save_output_button))
self.setLayout(vbox)
def scan_for_outputs(self, include_frozen):
"""Scan for 1000 AXE outputs.
If one or more is found, populate the list and enable the sign button.
"""
self.valid_outputs_list.clear()
exclude_frozen = not include_frozen
coins = self.manager.get_masternode_outputs(exclude_frozen=exclude_frozen)
if len(coins) > 0:
self.valid_outputs_list.add_outputs(coins)
else:
self.status_edit.setText(_('No 1000 AXE outputs were found.'))
self.status_edit.setStyleSheet(util.RED_FG)
def set_output(self, vin):
"""Set the selected output."""
self.collateral_edit.set_dict(vin)
self.save_output_button.setEnabled(True)
def save_output(self):
"""Save the selected output as the current masternode's collateral."""
self.mapper.submit()
# Determine the masternode's collateral key using this output.
self.dialog.populate_collateral_key()
def set_mapper_index(self, row):
"""Set the row that the data widget mapper should use."""
self.valid_outputs_list.clear()
self.status_edit.clear()
self.status_edit.setStyleSheet(util.BLACK_FG)
self.mapper.setCurrentIndex(row)
mn = self.dialog.masternodes_widget.masternode_for_row(row)
status_text = _('Masternode has no collateral payment assigned.')
can_scan = not mn.announced
# Disable the scan_outputs button if the masternode already has an assigned output.
if mn.vin.get('value', 0) == COIN * 1000:
can_scan = False
self.valid_outputs_list.add_output(mn.vin)
status_text = _('Masternode already has a collateral payment.')
self.status_edit.setText(_(status_text))
self.scan_outputs_button.setEnabled(can_scan)
class SignAnnounceWidget(QWidget):
"""Widget that displays information about signing a Masternode Announce."""
def __init__(self, parent):
super(SignAnnounceWidget, self).__init__(parent)
self.dialog = parent
self.manager = parent.manager
# Displays the status of the masternode.
self.status_edit = QLineEdit()
self.status_edit.setReadOnly(True)
self.alias_edit = QLineEdit()
self.collateral_edit = PrevOutWidget()
self.delegate_edit = QLineEdit()
self.delegate_edit.setFont(QFont(util.MONOSPACE_FONT))
for i in [self.alias_edit, self.collateral_edit, self.delegate_edit]:
i.setReadOnly(True)
self.mapper = QDataWidgetMapper()
self.mapper.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)
self.mapper.setModel(self.dialog.masternodes_widget.proxy_model)
model = self.dialog.masternodes_widget.model
self.mapper.addMapping(self.alias_edit, model.ALIAS)
self.mapper.addMapping(self.collateral_edit, model.VIN, 'string')
self.mapper.addMapping(self.delegate_edit, model.DELEGATE)
self.sign_button = QPushButton(_('Activate Masternode'))
self.sign_button.setEnabled(False)
self.sign_button.clicked.connect(self.sign_announce)
status_box = QHBoxLayout()
status_box.setContentsMargins(0, 0, 0, 0)
status_box.addWidget(QLabel(_('Status:')))
status_box.addWidget(self.status_edit, stretch=1)
vbox = QVBoxLayout()
vbox.addLayout(status_box)
form = QFormLayout()
form.addRow(_('Alias:'), self.alias_edit)
form.addRow(_('Collateral AXE Output:'), self.collateral_edit)
form.addRow(_('Masternode Private Key:'), self.delegate_edit)
vbox.addLayout(form)
vbox.addLayout(util.Buttons(self.sign_button))
self.setLayout(vbox)
def set_mapper_index(self, row):
"""Set the row that the data widget mapper should use."""
self.status_edit.clear()
self.status_edit.setStyleSheet(util.BLACK_FG)
self.mapper.setCurrentIndex(row)
mn = self.dialog.masternodes_widget.masternode_for_row(row)
# Disable the sign button if the masternode can't be signed (for whatever reason).
status_text = '%s can be activated' % mn.alias
can_sign = True
try:
self.manager.check_can_sign_masternode(mn.alias)
except Exception as e:
status_text = str(e)
can_sign = False
self.status_edit.setText(_(status_text))
self.sign_button.setEnabled(can_sign)
def sign_announce(self):
"""Set the masternode's vin and sign an announcement."""
self.mapper.submit()
self.dialog.sign_announce(str(self.alias_edit.text()))
| [
"slowdive@me.com"
] | slowdive@me.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.