hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
575ac01315005f3a8f5361df51402685f4d9c2d0
| 7,608
|
py
|
Python
|
scanpipe/pipes/__init__.py
|
sa-y-an/scancode.io
|
0dfb930fd977f0ac65a6b3947044c77c01cbc936
|
[
"Apache-2.0"
] | 55
|
2020-09-11T11:18:18.000Z
|
2022-03-26T20:47:59.000Z
|
scanpipe/pipes/__init__.py
|
sa-y-an/scancode.io
|
0dfb930fd977f0ac65a6b3947044c77c01cbc936
|
[
"Apache-2.0"
] | 327
|
2020-09-11T12:50:48.000Z
|
2022-03-30T18:28:40.000Z
|
scanpipe/pipes/__init__.py
|
sa-y-an/scancode.io
|
0dfb930fd977f0ac65a6b3947044c77c01cbc936
|
[
"Apache-2.0"
] | 50
|
2020-09-15T20:34:58.000Z
|
2022-03-20T07:21:57.000Z
|
# SPDX-License-Identifier: Apache-2.0
#
# http://nexb.com and https://github.com/nexB/scancode.io
# The ScanCode.io software is licensed under the Apache License version 2.0.
# Data generated with ScanCode.io is provided as-is without warranties.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Data Generated with ScanCode.io is provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode.io should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
#
# ScanCode.io is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode.io for support and download.
import logging
import subprocess
import sys
from datetime import datetime
from pathlib import Path
from time import sleep
from django.db.models import Count
from packageurl import normalize_qualifiers
from scanpipe.models import CodebaseResource
from scanpipe.models import DiscoveredPackage
from scanpipe.pipes import scancode
logger = logging.getLogger("scanpipe.pipes")
def make_codebase_resource(project, location, rootfs_path=None):
"""
Creates a CodebaseResource instance in the database for the given `project`.
The provided `location` is the absolute path of this resource.
It must be rooted in `project.codebase_path` as only the relative path within the
project codebase/ directory is stored in the database.
`rootfs_path` is an optional path relative to a rootfs root within an
Image/VM filesystem context. e.g.: "/var/log/file.log"
All paths use the POSIX separators.
If a CodebaseResource already exists in the `project` with the same path,
the error raised on save() is not stored in the database and the creation is
skipped.
"""
relative_path = Path(location).relative_to(project.codebase_path)
resource_data = scancode.get_resource_info(location=location)
if rootfs_path:
resource_data["rootfs_path"] = rootfs_path
codebase_resource = CodebaseResource(
project=project,
path=relative_path,
**resource_data,
)
codebase_resource.save(save_error=False)
def update_or_create_package(project, package_data):
"""
Gets, updates or creates a DiscoveredPackage then returns it.
Uses the `project` and `package_data` mapping to lookup and creates the
DiscoveredPackage using its Package URL as a unique key.
"""
# make a copy
package_data = dict(package_data or {})
if not package_data:
return
# keep only known fields with values
package_data = {
field_name: value
for field_name, value in package_data.items()
if field_name in DiscoveredPackage.model_fields() and value
}
purl_fields = ("type", "namespace", "name", "version", "qualifiers", "subpath")
purl_data = {}
for k in purl_fields:
# get and remove
v = package_data.pop(k, "")
if k == "qualifiers":
v = normalize_qualifiers(v, encode=True)
purl_data[k] = v or ""
if not purl_data:
raise Exception(f"Package without any Package URL fields: {package_data}")
# if 'type' not in purl_data and 'name' not in purl_data:
# raise Exception(
# f'Package missing type and name Package URL fields: {package_data}')
# FIXME: we should also consider the download URL as part of the key
# Ensure a purl is treated like if this is the UNIQUE key to a package.
dp, created = DiscoveredPackage.objects.get_or_create(
project=project, **purl_data, defaults=package_data
)
if not created:
# update/merge records since we have an existing record
dp_fields = DiscoveredPackage.model_fields()
has_updates = False
for field_name, value in package_data.items():
if field_name not in dp_fields or not value:
continue
existing_value = getattr(dp, field_name, "")
if not existing_value:
setattr(dp, field_name, value)
has_updates = True
elif existing_value != value:
# TODO: handle this case
pass
if has_updates:
dp.save()
return dp
def analyze_scanned_files(project):
"""
Sets the status for CodebaseResource to unknown or no license.
"""
scanned_files = project.codebaseresources.files().status("scanned")
scanned_files.has_no_licenses().update(status="no-licenses")
scanned_files.unknown_license().update(status="unknown-license")
def tag_not_analyzed_codebase_resources(project):
"""
Flags any of the `project`'s '`CodebaseResource` without a status as "not-analyzed".
"""
project.codebaseresources.no_status().update(status="not-analyzed")
def normalize_path(path):
"""
Returns a normalized path from a `path` string.
"""
return "/" + path.strip("/")
def strip_root(location):
"""
Returns the provided `location` without the root directory.
"""
return "/".join(str(location).strip("/").split("/")[1:])
def filename_now(sep="-"):
"""
Returns the current date and time in iso format suitable for filename.
"""
now = datetime.now().isoformat(sep=sep, timespec="seconds")
return now.replace(":", sep)
def count_group_by(queryset, field_name):
"""
Returns a summary of all existing values for the provided `field_name` on the
`queryset`, including the count of each entry, as a dictionary.
"""
counts = (
queryset.values(field_name)
.annotate(count=Count(field_name))
.order_by(field_name)
)
return {entry.get(field_name): entry.get("count") for entry in counts}
def get_bin_executable(filename):
"""
Returns the location of the `filename` executable binary.
"""
return str(Path(sys.executable).parent / filename)
def _stream_process(process, stream_to=logger.info):
exitcode = process.poll()
for line in process.stdout:
stream_to(line.rstrip("\n"))
has_terminated = exitcode is not None
return has_terminated
def run_command(cmd, log_output=False):
"""
Returns (exitcode, output) of executing the provided `cmd` in a shell.
`cmd` can be provided as a string or as a list of arguments.
If `log_output` is True, the stdout and stderr of the process will be captured
and streamed to the `logger`.
"""
if isinstance(cmd, list):
cmd = " ".join(cmd)
if not log_output:
exitcode, output = subprocess.getstatusoutput(cmd)
return exitcode, output
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
while _stream_process(process):
sleep(1)
exitcode = process.poll()
return exitcode, ""
def remove_prefix(text, prefix):
"""
Removes the `prefix` from `text`.
"""
if text.startswith(prefix):
prefix_len = len(prefix)
return text[prefix_len:]
return text
| 31.7
| 88
| 0.685463
|
a68d1c3376d777fe1b9c8024cf70e6e3da140716
| 75
|
py
|
Python
|
Untitled Folder/Lincoln.py
|
UncleLincoln/trainee
|
eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8
|
[
"MIT"
] | 36
|
2018-11-03T01:37:30.000Z
|
2019-04-07T19:52:34.000Z
|
Untitled Folder/Lincoln.py
|
UncleLincoln/trainee
|
eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8
|
[
"MIT"
] | 8
|
2020-11-13T19:06:32.000Z
|
2022-01-13T03:24:20.000Z
|
Untitled Folder/Lincoln.py
|
BuErTech/trainee
|
eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8
|
[
"MIT"
] | 86
|
2018-11-03T01:38:25.000Z
|
2019-04-07T05:55:02.000Z
|
def deductFunction(a,b):
return a-b
def sigma(a,b,c,d):
return a+b+c+d
| 10.714286
| 24
| 0.653333
|
2b454e631dfec98dbd62fd96fbf1e86d7296fac1
| 729
|
py
|
Python
|
src/account/api/serializers.py
|
arturchesnokov/currency_exchange
|
62fc52b755174ea888aca94f421b57ab085b57c9
|
[
"MIT"
] | null | null | null |
src/account/api/serializers.py
|
arturchesnokov/currency_exchange
|
62fc52b755174ea888aca94f421b57ab085b57c9
|
[
"MIT"
] | 6
|
2021-03-19T08:38:28.000Z
|
2022-03-12T00:19:16.000Z
|
src/account/api/serializers.py
|
arturchesnokov/currency_exchange
|
62fc52b755174ea888aca94f421b57ab085b57c9
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from account.tasks import send_email_async
from currency_exchange import settings
from account.models import Contact
class ContactSerializer(serializers.ModelSerializer):
class Meta:
model = Contact
fields = (
'id',
'created',
'email',
'title',
'text',
)
def create(self, validated_data):
subject = validated_data['title']
message = validated_data['text']
email_from = settings.EMAIL_HOST_USER
recipient_list = [validated_data['email'], ]
send_email_async.delay(subject, message, email_from, recipient_list)
return super().create(validated_data)
| 28.038462
| 76
| 0.644719
|
b5db8ba59b91e8349884b213a7a3df2095240c10
| 30,474
|
py
|
Python
|
mmdet/models/dense_heads/fcos_head.py
|
zimoqingfeng/UMOP
|
16af670ee10b95015296d4a8da56a10fb7b89f72
|
[
"Apache-2.0"
] | 27
|
2021-09-16T11:24:43.000Z
|
2022-03-29T06:52:20.000Z
|
mmdet/models/dense_heads/fcos_head.py
|
zimoqingfeng/UMOP
|
16af670ee10b95015296d4a8da56a10fb7b89f72
|
[
"Apache-2.0"
] | 9
|
2021-09-16T08:51:01.000Z
|
2022-01-05T10:37:47.000Z
|
mmdet/models/dense_heads/fcos_head.py
|
zimoqingfeng/UMOP
|
16af670ee10b95015296d4a8da56a10fb7b89f72
|
[
"Apache-2.0"
] | 4
|
2021-09-16T11:24:58.000Z
|
2021-12-18T01:13:30.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Scale
from mmcv.runner import force_fp32
from mmdet.core import distance2bbox, multi_apply, multiclass_nms, reduce_mean
from ..builder import HEADS, build_loss
from .anchor_free_head import AnchorFreeHead
INF = 1e8
@HEADS.register_module()
class FCOSHead(AnchorFreeHead):
"""Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to suppress
low-quality predictions.
Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training
tricks used in official repo, which will bring remarkable mAP gains
of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for
more detail.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
strides (list[int] | list[tuple[int, int]]): Strides of points
in multiple feature levels. Default: (4, 8, 16, 32, 64).
regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling. Default: False.
center_sample_radius (float): Radius of center sampling. Default: 1.5.
norm_on_bbox (bool): If true, normalize the regression targets
with FPN strides. Default: False.
centerness_on_reg (bool): If true, position centerness on the
regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by the
norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise
False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_centerness (dict): Config of centerness loss.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
""" # noqa: E501
def __init__(self,
num_classes,
in_channels,
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
center_sampling=False,
center_sample_radius=1.5,
norm_on_bbox=False,
centerness_on_reg=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='conv_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.regress_ranges = regress_ranges
self.center_sampling = center_sampling
self.center_sample_radius = center_sample_radius
self.norm_on_bbox = norm_on_bbox
self.centerness_on_reg = centerness_on_reg
super().__init__(
num_classes,
in_channels,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
norm_cfg=norm_cfg,
init_cfg=init_cfg,
**kwargs)
self.loss_centerness = build_loss(loss_centerness)
def _init_layers(self):
"""Initialize layers of the head."""
super()._init_layers()
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple:
cls_scores (list[Tensor]): Box scores for each scale level, \
each is a 4D-tensor, the channel number is \
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each \
scale level, each is a 4D-tensor, the channel number is \
num_points * 4.
centernesses (list[Tensor]): centerness for each scale level, \
each is a 4D-tensor, the channel number is num_points * 1.
"""
return multi_apply(self.forward_single, feats, self.scales,
self.strides)
def forward_single(self, x, scale, stride):
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
stride (int): The corresponding stride for feature maps, only
used to normalize the bbox prediction when self.norm_on_bbox
is True.
Returns:
tuple: scores for each class, bbox predictions and centerness \
predictions of input feature maps.
"""
cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)
if self.centerness_on_reg:
centerness = self.conv_centerness(reg_feat)
else:
centerness = self.conv_centerness(cls_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(bbox_pred).float()
if self.norm_on_bbox:
bbox_pred = F.relu(bbox_pred)
if not self.training:
bbox_pred *= stride
else:
bbox_pred = bbox_pred.exp()
return cls_score, bbox_pred, centerness
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
centernesses (list[Tensor]): centerness for each scale level, each
is a 4D-tensor, the channel number is num_points * 1.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
###### Insert hook for distribution
for idx in range(len(labels)):
lvl_valid_labels = labels[idx]
num_tot_valid_labels = torch.tensor((len(lvl_valid_labels) * self.num_classes),
dtype=torch.float,
device=lvl_valid_labels.device)
num_tot_valid_labels = reduce_mean(num_tot_valid_labels).item()
num_pos_samples = torch.tensor(len(lvl_valid_labels[lvl_valid_labels != 80]),
dtype=torch.float,
device=lvl_valid_labels.device)
num_pos_samples = reduce_mean(num_pos_samples).item()
if lvl_valid_labels.device.index == 0:
with open('./fcos_sum.log', 'a') as f:
f.write('\t'.join([str(idx),
str(num_tot_valid_labels),
str(num_pos_samples)]) + '\n')
###### Insert hook for distribution
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((flatten_labels >= 0)
& (flatten_labels < bg_class_ind)).nonzero().reshape(-1)
num_pos = torch.tensor(
len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)
num_pos = max(reduce_mean(num_pos), 1.0)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels, avg_factor=num_pos)
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
# centerness weighted iou loss
centerness_denorm = max(
reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)
if len(pos_inds) > 0:
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=centerness_denorm)
loss_centerness = self.loss_centerness(
pos_centerness, pos_centerness_targets, avg_factor=num_pos)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
img_metas,
cfg=None,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
with shape (N, num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_points * 4, H, W).
centernesses (list[Tensor]): Centerness for each scale level with
shape (N, num_points * 1, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where 5 represent
(tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
The shape of the second tensor in the tuple is (n,), and
each element represents the class label of the corresponding
box.
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
centerness_pred_list = [
centernesses[i].detach() for i in range(num_levels)
]
if torch.onnx.is_in_onnx_export():
assert len(
img_metas
) == 1, 'Only support one input image while in exporting to ONNX'
img_shapes = img_metas[0]['img_shape_for_onnx']
else:
img_shapes = [
img_metas[i]['img_shape']
for i in range(cls_scores[0].shape[0])
]
scale_factors = [
img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
]
result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
centerness_pred_list, mlvl_points,
img_shapes, scale_factors, cfg, rescale,
with_nms)
return result_list
def _get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
mlvl_points,
img_shapes,
scale_factors,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for a single scale level
with shape (N, num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for a single scale
level with shape (N, num_points * 4, H, W).
centernesses (list[Tensor]): Centerness for a single scale level
with shape (N, num_points, H, W).
mlvl_points (list[Tensor]): Box reference for a single scale level
with shape (num_total_points, 4).
img_shapes (list[tuple[int]]): Shape of the input image,
list[(height, width, 3)].
scale_factors (list[ndarray]): Scale factor of the image arrange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple(Tensor):
det_bboxes (Tensor): BBox predictions in shape (n, 5), where
the first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
between 0 and 1.
det_labels (Tensor): A (n,) tensor where each item is the
predicted class label of the corresponding box.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
device = cls_scores[0].device
batch_size = cls_scores[0].shape[0]
# convert to tensor to keep tracing
nms_pre_tensor = torch.tensor(
cfg.get('nms_pre', -1), device=device, dtype=torch.long)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
for cls_score, bbox_pred, centerness, points in zip(
cls_scores, bbox_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(0, 2, 3, 1).reshape(
batch_size, -1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(0, 2, 3,
1).reshape(batch_size,
-1).sigmoid()
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(batch_size, -1, 4)
points = points.expand(batch_size, -1, 2)
# Get top-k prediction
from mmdet.core.export import get_k_for_topk
nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1])
if nms_pre > 0:
max_scores, _ = (scores * centerness[..., None]).max(-1)
_, topk_inds = max_scores.topk(nms_pre)
batch_inds = torch.arange(batch_size).view(
-1, 1).expand_as(topk_inds).long()
# Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501
if torch.onnx.is_in_onnx_export():
transformed_inds = bbox_pred.shape[
1] * batch_inds + topk_inds
points = points.reshape(-1,
2)[transformed_inds, :].reshape(
batch_size, -1, 2)
bbox_pred = bbox_pred.reshape(
-1, 4)[transformed_inds, :].reshape(batch_size, -1, 4)
scores = scores.reshape(
-1, self.num_classes)[transformed_inds, :].reshape(
batch_size, -1, self.num_classes)
centerness = centerness.reshape(
-1, 1)[transformed_inds].reshape(batch_size, -1)
else:
points = points[batch_inds, topk_inds, :]
bbox_pred = bbox_pred[batch_inds, topk_inds, :]
scores = scores[batch_inds, topk_inds, :]
centerness = centerness[batch_inds, topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shapes)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
if rescale:
batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
scale_factors).unsqueeze(1)
batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
# Replace multiclass_nms with ONNX::NonMaxSuppression in deployment
if torch.onnx.is_in_onnx_export() and with_nms:
from mmdet.core.export import add_dummy_nms_for_onnx
batch_mlvl_scores = batch_mlvl_scores * (
batch_mlvl_centerness.unsqueeze(2))
max_output_boxes_per_class = cfg.nms.get(
'max_output_boxes_per_class', 200)
iou_threshold = cfg.nms.get('iou_threshold', 0.5)
score_threshold = cfg.score_thr
nms_pre = cfg.get('deploy_nms_pre', -1)
return add_dummy_nms_for_onnx(batch_mlvl_bboxes, batch_mlvl_scores,
max_output_boxes_per_class,
iou_threshold, score_threshold,
nms_pre, cfg.max_per_img)
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = batch_mlvl_scores.new_zeros(batch_size,
batch_mlvl_scores.shape[1], 1)
batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
if with_nms:
det_results = []
for (mlvl_bboxes, mlvl_scores,
mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
batch_mlvl_centerness):
det_bbox, det_label = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
det_results.append(tuple([det_bbox, det_label]))
else:
det_results = [
tuple(mlvl_bs)
for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
batch_mlvl_centerness)
]
return det_results
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points according to feature map sizes."""
y, x = super()._get_points_single(featmap_size, stride, dtype, device)
points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),
dim=-1) + stride // 2
return points
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerness targets for points
in multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
Returns:
tuple:
concat_lvl_labels (list[Tensor]): Labels of each level. \
concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \
level.
"""
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# the number of points per img, per lvl
num_points = [center.size(0) for center in points]
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self._get_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges,
num_points_per_lvl=num_points)
# split to per img, per level
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
bbox_targets = torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list])
if self.norm_on_bbox:
bbox_targets = bbox_targets / self.strides[i]
concat_lvl_bbox_targets.append(bbox_targets)
return concat_lvl_labels, concat_lvl_bbox_targets
def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,
num_points_per_lvl):
"""Compute regression and classification targets for a single image."""
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_full((num_points,), self.num_classes), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1])
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
if self.center_sampling:
# condition1: inside a `center bbox`
radius = self.center_sample_radius
center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2
center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2
center_gts = torch.zeros_like(gt_bboxes)
stride = center_xs.new_zeros(center_xs.shape)
# project the points on current lvl back to the `original` sizes
lvl_begin = 0
for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
lvl_end = lvl_begin + num_points_lvl
stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
lvl_begin = lvl_end
x_mins = center_xs - stride
y_mins = center_ys - stride
x_maxs = center_xs + stride
y_maxs = center_ys + stride
center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],
x_mins, gt_bboxes[..., 0])
center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],
y_mins, gt_bboxes[..., 1])
center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],
gt_bboxes[..., 2], x_maxs)
center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],
gt_bboxes[..., 3], y_maxs)
cb_dist_left = xs - center_gts[..., 0]
cb_dist_right = center_gts[..., 2] - xs
cb_dist_top = ys - center_gts[..., 1]
cb_dist_bottom = center_gts[..., 3] - ys
center_bbox = torch.stack(
(cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
else:
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
(max_regress_distance >= regress_ranges[..., 0])
& (max_regress_distance <= regress_ranges[..., 1]))
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = self.num_classes # set as BG
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
def centerness_target(self, pos_bbox_targets):
"""Compute centerness targets.
Args:
pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape
(num_pos, 4)
Returns:
Tensor: Centerness target.
"""
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
if len(left_right) == 0:
centerness_targets = left_right[..., 0]
else:
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
| 45.415797
| 113
| 0.561101
|
6738da8ec9d83dea1b5e836a32b63f6e3aaf1159
| 726
|
py
|
Python
|
transcribe.py
|
rampa3/WS-voice-translator
|
523b3c3d26b73b9fb0554a28b4ede0848577a282
|
[
"MIT"
] | 1
|
2021-03-05T21:37:26.000Z
|
2021-03-05T21:37:26.000Z
|
transcribe.py
|
rampa3/WS-voice-translator
|
523b3c3d26b73b9fb0554a28b4ede0848577a282
|
[
"MIT"
] | 3
|
2021-06-26T19:10:50.000Z
|
2021-06-26T19:15:29.000Z
|
transcribe.py
|
rampa3/WS-voice-translator
|
523b3c3d26b73b9fb0554a28b4ede0848577a282
|
[
"MIT"
] | null | null | null |
import soundfile as sf
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
# load pretrained model
print("Loading STT model...")
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
print("STT model loaded.")
def transcribe(sound_file):
# load audio
audio_input, _ = sf.read(sound_file)
# transcribe
input_values = tokenizer(audio_input, return_tensors="pt").input_values
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = tokenizer.batch_decode(predicted_ids)[0]
return transcription
| 31.565217
| 88
| 0.746556
|
f55a2d613d0561efb4a2fad6d3fe45c811bab911
| 8,654
|
py
|
Python
|
nnvm/tvm/python/tvm/target.py
|
CynthiaProtector/helo
|
ad9e22363a92389b3fa519ecae9061c6ead28b05
|
[
"Apache-2.0"
] | 9
|
2019-04-19T04:45:18.000Z
|
2021-01-07T06:31:15.000Z
|
nnvm/tvm/python/tvm/target.py
|
CynthiaProtector/helo
|
ad9e22363a92389b3fa519ecae9061c6ead28b05
|
[
"Apache-2.0"
] | null | null | null |
nnvm/tvm/python/tvm/target.py
|
CynthiaProtector/helo
|
ad9e22363a92389b3fa519ecae9061c6ead28b05
|
[
"Apache-2.0"
] | 5
|
2019-09-18T20:21:23.000Z
|
2020-11-22T11:18:15.000Z
|
"""Target management API of TVM.
TVM's target string is in fomat ``<target_name> [-option=value]...``.
Note
----
The list of options include:
- **-device=<device name>**
The device name.
- **-mtriple=<target triple>** or **-target**
Specify the target triple, which is useful for cross
compilation.
- **-mcpu=<cpuname>**
Specify a specific chip in the current architecture to
generate code for. By default this is infered from the
target triple and autodetected to the current architecture.
- **-mattr=a1,+a2,-a3,...**
Override or control specific attributes of the target,
such as whether SIMD operations are enabled or not. The
default set of attributes is set by the current CPU.
- **-system-lib**
Build TVM system library module. System lib is a global module that contains
self registered functions in program startup. User can get the module using
:any:`tvm.module.system_lib`.
It is useful in environments where dynamic loading api like dlopen is banned.
The system lib will be available as long as the result code is linked by the program.
We can use :any:`tvm.target.create` to create a tvm.target.Target from the target string.
We can also use other specific function in this module to create specific targets.
"""
from __future__ import absolute_import
import warnings
from ._ffi.base import _LIB_NAME
try:
from decorator import decorate
except ImportError as err_msg:
# Allow decorator to be missing in runtime
if _LIB_NAME != "libtvm_runtime.so":
raise err_msg
def _merge_opts(opts, new_opts):
"""Helper function to merge options"""
if isinstance(new_opts, str):
new_opts = new_opts.split()
if new_opts:
return opts + new_opts
return opts
class Target(object):
"""Target device information, use through TVM API.
Parameters
----------
target_name : {"llvm", "cuda", "opencl", "metal", "rocm", "stackvm", "ext_dev"}
The major target name.
options : list of str, optional
Additional arguments appended to the target.
Note
----
Do not use class constructor, you can create target using the following functions
- :any:`tvm.target.create` create target from string
- :any:`tvm.target.rasp` create raspberry pi target
- :any:`tvm.target.cuda` create CUDA target
- :any:`tvm.target.rocm` create ROCM target
"""
current = None
def __init__(self,
target_name,
options=None):
self.target_name = target_name
self.options = _merge_opts([], options)
self.device_name = ""
# Parse device option
for item in self.options:
if item.startswith("-device="):
self.device_name = item.split("=")[1]
# Target query searchs device name first
if self.device_name:
self.keys = (self.device_name,)
else:
self.keys = ()
# Target configuration handling
self.thread_warp_size = 1
if target_name in ("llvm", ):
self.keys += ("cpu",)
elif target_name in ("cuda", "nvptx"):
self.keys += ("cuda", "gpu")
self.max_num_threads = 512
self.thread_warp_size = 32
elif target_name in ("rocm", "opencl"):
# For now assume rocm schedule for opencl
self.keys += ("rocm", "gpu")
self.max_num_threads = 256
elif target_name in ("metal",):
self.keys += ("gpu",)
self.max_num_threads = 256
elif target_name in ("stackvm", "ext_dev"):
# Do not now class for stacvm or ext_dev
pass
else:
raise ValueError("Unknown target name %s" % target_name)
def __str__(self):
return " ".join([self.target_name] + self.options)
def __repr__(self):
return self.__str__()
def __enter__(self):
self._old_target = Target.current
if self._old_target is not None and str(self) != str(self._old_target):
warnings.warn(
"Override target '%s' with new target scope '%s'" % (
self._old_target, self))
Target.current = self
return self
def __exit__(self, ptype, value, trace):
Target.current = self._old_target
def generic_func(fdefault):
"""Wrap a target generic function.
Generic function allows registeration of further functions
that can be dispatched on current target context.
If no registered dispatch is matched, the fdefault will be called.
Parameters
----------
fdefault : function
The default function.
Returns
-------
fgeneric : function
A wrapped generic function.
Example
-------
.. code-block:: python
import tvm
# wrap function as target generic
@tvm.target.generic_func
def my_func(a):
return a + 1
# register specialization of my_func under target cuda
@my_func.register("cuda")
def my_func_cuda(a):
return a + 2
# displays 3, because my_func is called
print(my_func(2))
# displays 4, because my_func_cuda is called
with tvm.target.cuda():
print(my_func(2))
"""
dispatch_dict = {}
func_name = fdefault.__name__
def register(key, func=None, override=False):
"""Register function to be the dispatch function.
Parameters
----------
key : str or list of str
The key to be registered.
func : function
The function to be registered.
override : bool
Whether override existing registeration.
Returns
-------
The register function is necessary.
"""
def _do_reg(myf):
key_list = [key] if isinstance(key, str) else key
for k in key_list:
if k in dispatch_dict and not override:
raise ValueError(
"Key is already registered for %s" % func_name)
dispatch_dict[k] = myf
return myf
if func:
return _do_reg(myf)
return _do_reg
def dispatch_func(func, *args, **kwargs):
"""The wrapped dispath function"""
target = current_target()
if target is None:
return func(*args, **kwargs)
for k in target.keys:
if k in dispatch_dict:
return dispatch_dict[k](*args, **kwargs)
return func(*args, **kwargs)
fdecorate = decorate(fdefault, dispatch_func)
fdecorate.register = register
return fdecorate
def cuda(options=None):
"""Returns a cuda target.
Parameters
----------
options : list of str
Additional options
"""
return Target("cuda", options)
def rocm(options=None):
"""Returns a ROCM target.
Parameters
----------
options : list of str
Additional options
"""
return Target("rocm", options)
def rasp(options=None):
"""Returns a rasp target.
Parameters
----------
options : list of str
Additional options
"""
opts = ["-device=rasp",
"-mtriple=armv7l-none-linux-gnueabihf",
"-mcpu=cortex-a53",
"-mattr=+neon"]
opts = _merge_opts(opts, options)
return Target("llvm", opts)
def create(target_str):
"""Get a target given target string.
Parameters
----------
target_str : str
The target string.
Returns
-------
target : Target
The target object
Note
----
See the note on :any:`tvm.target` on target string format.
"""
if isinstance(target_str, Target):
return target_str
if not isinstance(target_str, str):
raise ValueError("target_str has to be string type")
arr = target_str.split()
# Parse device option
device_name = ""
for item in arr[1:]:
if item.startswith("-device="):
device_name = item.split("=")[1]
if device_name == "rasp":
return rasp(arr[1:])
return Target(arr[0], arr[1:])
def current_target(allow_none=True):
"""Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set.
"""
if Target.current:
return Target.current
if not allow_none:
raise RuntimeError(
"Requires a current target in generic function, but it is not set. "
"Please set it using `with TargetObject:`")
return Target.current
| 27.737179
| 89
| 0.602958
|
093c7253c450d7b73552f914a009d99bfd101656
| 20,939
|
py
|
Python
|
mods/Verification.py
|
drosoCode/NotSoBot
|
3c4b809fce75151cae0059ba8cfca68996147155
|
[
"MIT"
] | null | null | null |
mods/Verification.py
|
drosoCode/NotSoBot
|
3c4b809fce75151cae0059ba8cfca68996147155
|
[
"MIT"
] | null | null | null |
mods/Verification.py
|
drosoCode/NotSoBot
|
3c4b809fce75151cae0059ba8cfca68996147155
|
[
"MIT"
] | 1
|
2020-11-05T07:34:16.000Z
|
2020-11-05T07:34:16.000Z
|
import discord
import asyncio
import random
import steam
from steam.steamid import SteamId
from steam.steamprofile import SteamProfile
from steam.steamaccountuniverse import SteamAccountUniverse
from steam.steamaccounttype import SteamAccountType
from discord.ext import commands
from utils import checks
from mods.cog import Cog
code = "```py\n{0}\n```"
class Verification(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
self.bot.loop.create_task(self.verification_task())
async def remove_verification(self, server, idk=None):
role = discord.utils.get(server.roles, name='Awaiting Approval')
if role:
try:
await self.bot.delete_role(server, role)
except:
pass
sql = 'DELETE FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
sql = 'DELETE FROM `verification_queue` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
if idk is None:
try:
await self.bot.send_message(server.owner, ":warning: One of your server administrators (or you) have enabled approval/verification on user join.\n\nAdministrator permission was taken away from me making the feature unusable, I need Administrator permission to make/add a role to mute on join.\n\n`The system has been automatically disabled, re-enable anytime if you please.`")
except:
pass
@commands.group(pass_context=True, aliases=['onjoinverify', 'approval'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification(self, ctx, channel:discord.Channel=None, *, mentions:str=None):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if channel is None:
channel = ctx.message.channel
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
if mentions is None:
sql = "INSERT INTO `verification` (`server`, `channel`) VALUES (%s, %s)"
self.cursor.execute(sql, (ctx.message.server.id, channel.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification #<discord_channel>` to change)!".format(channel.mention))
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("invalid mention")
return
sql = "INSERT INTO `verification` (`server`, `channel`, `mentions`) VALUES (%s, %s, %s)"
mention_ids = []
mention_names = []
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
self.cursor.execute(sql, (ctx.message.server.id, channel.id, ' '.join(mention_ids)))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification <#discord_channel>` to change) and mention `{0}`!".format(channel.mention, ', '.join(mention_names)))
permissions = discord.Permissions()
permissions.read_messages = True
try:
await self.bot.create_role(ctx.message.server, name='Awaiting Approval', color=discord.Colour(int("FF0000", 16)), permissions=permissions)
except Exception as e:
print(e)
await self.bot.say(":warning: For some reason I couldn't create the \"Awaiting Approval\" role and users won't be muted, please create it (same name) and disable all the permissions you don't want unapproved-users to have.\nMake sure I have the administrator permission!")
elif channel is None:
sql = 'UPDATE `verification` SET channel={0} WHERE server={1}'
sql = sql.format(channel.id, ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Set approval/verification channel to {0}".format(channel.mention))
else:
await self.bot.say(':warning: You are about to disable member verification/approval on join, type `yes` to proceed.')
while True:
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author, channel=ctx.message.channel)
if response is None or response.content != 'yes':
await self.bot.say('**Aborting**')
return
else:
break
await self.remove_verification(ctx.message.server, True)
try:
role = discord.utils.get(ctx.message.server.roles, name='Awaiting Approval')
if role != None:
await self.bot.delete_role(ctx.message.server, role)
except discord.errors.Forbidden:
await self.bot.say("could not remove role, you took my perms away :(")
role2 = discord.utils.get(ctx.message.server.roles, name='Approved')
if role2 != None:
try:
await self.bot.delete_role(ctx.message.server, role2)
except:
pass
await self.bot.say(":negative_squared_cross_mark: **Disabled** user approval on join")
@verification.command(name='mention', aliases=['mentions'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification_mention(self, ctx, *mentions:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(ctx.message.mentions) == 0 and '@everyone' not in mentions and '@here' not in mentions:
await self.bot.say(':no_entry: `Invalid mention(s).`')
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
if len(mentions) == 0:
sql = 'UPDATE `verification` SET mentions=NULL WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":negative_squared_cross_mark: Disabled/Removed mentions on user join for approval")
else:
mention_ids = []
mention_names = []
everyone = False
for mention in mentions:
if mention == '@everyone':
mention_ids.append('@everyone')
elif mention == '@here':
mention_ids.append('@here')
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
sql = 'SELECT mentions FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
mention_results = self.cursor.execute(sql).fetchall()
update = False
if mention_results[0]['mentions'] != None:
update = True
things = mention_results[0]['mentions'].split()
for x in things:
mention_ids.append(x)
sql = "UPDATE `verification` SET mentions={0} WHERE server={1}"
sql = sql.format(self.escape(' '.join(mention_ids)), ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
if update:
await self.bot.say(":white_check_mark: Updated mentions to include `{0}` on user join for approval".format(', '.join(mention_names)))
else:
await self.bot.say(":white_check_mark: Set `{0}` to be mentioned on user join for approval".format(', '.join(mention_names)))
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.mod_or_perm(manage_server=True)
async def verify(self, ctx, *users:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(users) == 0:
await self.bot.say("pls input users to verify thx")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned **on** (`verification <#discord_channel>` to do so)!!!")
return
role = discord.utils.get(ctx.message.server.roles, name="Awaiting Approval")
count = 0
count2 = 0
discord_user = None
for user in users:
if user.isdigit():
user = int(user)
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count]['user']))
count += 1
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("If you're not gonna use approval id, atleast mention correctly!")
return
for x in ctx.message.mentions:
if count == len(ctx.message.mentions):
break
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count2]['user']))
count2 += 1
if discord_user is None:
continue
try:
await self.bot.remove_roles(discord_user, role)
except Exception as e:
await self.bot.say(code.format(e))
await self.bot.say(":warning: {0} was removed from the queue however his role could not be removed because I do not have Administrator permissions.\nPlease remove the role manually and give me **Administrator**.".format(user))
return
role = discord.utils.get(ctx.message.server.roles, name='Approved')
if role != None:
try:
await self.bot.add_roles(discord_user, role)
except:
pass
await self.bot.say(":white_check_mark: Removed `{0}` from queue!".format(user))
queue_removed_msg = 'You have been approved/verified for `{0}` and can now message!'.format(ctx.message.server.name)
await self.bot.send_message(discord_user, queue_removed_msg)
@verify.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def verify_list(self, ctx):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: `There are no users in the verification/approval queue`")
return
users = []
for s in result:
user = discord.Server.get_member(ctx.message.server, user_id=str(s['user']))
if user is None:
continue
users.append('{0}#{1} ({2})'.format(user.name, user.discriminator, str(s['id'])))
await self.bot.say("**{0} Users in Queue**\n`{1}`".format(len(users), ', '.join(users)))
# steam_regex = r"^(http|https|)(\:\/\/|)steamcommunity\.com\/id\/(.*)$"
@verify.command(name='check', pass_context=True, aliases=['steam', 'link'])
async def verify_check(self, ctx, stem:str):
try:
if ctx.message.channel.is_private is False:
await self.bot.say(':no_entry: `Private Message only.`')
return
sql = 'SELECT * FROM `verification_queue` WHERE user={0}'
sql = sql.format(ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: You are not in the verification queue for any server.')
return
server_id = result[0]['server']
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server_id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server you are in queue for disabled verification.")
return
sql = 'SELECT * FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) != 0:
await self.bot.say(":no_entry: You've already verified your steam account!")
return
sql = 'SELECT id,server FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(ctx.message.author))
return
verification_id = str(result[0]['id'])
steamId = None
steamProfile = None
if steamId is None:
steamId = SteamId.fromSteamId("{0}".format(stem))
if steamId is None:
steamId = SteamId.fromSteamId3(stem)
if steamId is None:
steamId = SteamId.fromSteamId64(stem)
if steamId is None:
steamId = SteamId.fromProfileUrl(stem)
if steamId is None:
steamProfile = SteamProfile.fromCustomProfileUrl(stem)
if steamProfile is None:
await self.bot.say("`:no_entry: `Bad Steam ID/64/URL`")
return
steamId = steamProfile.steamId
else:
steamProfile = SteamProfile.fromSteamId(steamId)
if verification_id in steamProfile.displayName:
sql = 'INSERT INTO `verification_steam` (`user`, `server`, `steam`, `id`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (ctx.message.author.id, server_id, steamId.profileUrl, verification_id))
self.cursor.commit()
await self.bot.say(':white_check_mark: `{0}` steam profile submitted and passed steam name check, awaiting moderator approval.'.format(ctx.message.author))
else:
await self.bot.say(':warning: **{0}** is not in the steam accounts name.'.format(verification_id))
except Exception as e:
await self.bot.say(code.format(e))
async def verification_task(self):
if self.bot.shard_id != 0:
return
while True:
sql = 'SELECT * FROM `verification_steam`'
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await asyncio.sleep(60)
continue
for s in result:
server = self.bot.manager.get_server(str(s['server']))
if server:
user = server.get_member(str(s['user']))
if user is None:
continue
sql = 'SELECT channel FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
channel = server.get_channel(str(self.cursor.execute(sql).fetchall()[0]['channel']))
msg = '**Steam Account Check**\n`{0} (Verification ID: {1})` has submitted their steam profile and passed the name check.\n`Steam Profile:` {2}'.format(user, s['id'], s['steam'])
await self.bot.send_message(channel, msg)
sql = 'DELETE FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server.id, user.id)
self.cursor.execute(sql)
self.cursor.commit()
await asyncio.sleep(60)
async def on_member_join(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
channel = server.get_channel(str(result[0]['channel']))
if channel is None:
raise discord.errors.NotFound
perms = server.me.permissions_in(channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.remove_verification(server)
return
sql = "INSERT INTO `verification_queue` (`user`, `server`, `id`) VALUES (%s, %s, %s)"
rand = random.randint(0, 99999)
self.cursor.execute(sql, (member.id, server.id, rand))
self.cursor.commit()
role = discord.utils.get(server.roles, name='Awaiting Approval')
await self.bot.add_roles(member, role)
for s in server.channels:
perms = member.permissions_in(s)
if perms.read_messages is False:
continue
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = False
await self.bot.edit_channel_permissions(s, role, overwrite)
msg = ''
if result[0]['mentions']:
for x in result[0]['mentions'].split(' '):
if 'everyone' in x or 'here' in x:
msg += '{0} '.format(x)
else:
msg += '<@{0}> '.format(x)
msg += '\n'
msg += ':warning: `{0}` has joined the server and is awaiting approval\n\nRun `verify {1} or mention` to approve, kick user to remove from the queue.'.format(member, rand)
await self.bot.send_message(channel, msg, replace_everyone=False, replace_mentions=False)
join_msg = "You've been placed in the approval queue for `{0}`, please be patient and wait until a staff member approves your join!\n\nIf you'd like to expedite approval (and have a steam account), place **{1}** in your steam name and then run `.verify check <stean_url/id/vanity>`.".format(server.name, rand)
await self.bot.send_message(member, join_msg)
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_remove(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for leaving the server or being kicked.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_ban(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for being banned from the server.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
def setup(bot):
bot.add_cog(Verification(bot))
| 46.121145
| 381
| 0.68542
|
d99957d23d47de266bc21f668e48f0721e7b040d
| 607
|
py
|
Python
|
rssant/wsgi.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 2
|
2021-02-06T15:07:48.000Z
|
2021-05-18T01:30:08.000Z
|
rssant/wsgi.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T11:23:24.000Z
|
2022-02-10T11:36:33.000Z
|
rssant/wsgi.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WSGI config for rssant project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import backdoor
from django.core.wsgi import get_wsgi_application
import rssant_common.django_setup # noqa:F401
from rssant_config import CONFIG
from rssant_common.logger import configure_logging
from rssant_common.helper import is_main_or_wsgi
if is_main_or_wsgi(__name__):
configure_logging(level=CONFIG.log_level)
backdoor.setup()
application = get_wsgi_application()
| 28.904762
| 78
| 0.805601
|
1b186381f52e27aaa3adae826a96c42621a9fd15
| 1,000
|
py
|
Python
|
pyrlang/dist_proto/__init__.py
|
wayfair-contribs/Pyrlang
|
7599a9906840d6e8442b3382f7d3cdcb2208cd12
|
[
"Apache-2.0"
] | 312
|
2018-09-25T08:14:04.000Z
|
2022-03-30T09:01:52.000Z
|
pyrlang/dist_proto/__init__.py
|
wayfair-contribs/Pyrlang
|
7599a9906840d6e8442b3382f7d3cdcb2208cd12
|
[
"Apache-2.0"
] | 36
|
2018-09-24T11:04:33.000Z
|
2021-09-20T14:37:12.000Z
|
pyrlang/dist_proto/__init__.py
|
wayfair-contribs/Pyrlang
|
7599a9906840d6e8442b3382f7d3cdcb2208cd12
|
[
"Apache-2.0"
] | 41
|
2018-11-06T20:29:32.000Z
|
2021-11-29T15:09:53.000Z
|
# Copyright 2018, Erlang Solutions Ltd, and S2HC Sweden AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyrlang.dist_proto.client import DistClientProtocol
from pyrlang.dist_proto.distribution import ErlangDistribution
# from pyrlang.dist_proto.epmd_client import EPMDClient
from pyrlang.dist_proto.flags import DistributionFlags
from pyrlang.dist_proto.server import DistServerProtocol
__all__ = ['ErlangDistribution', 'DistServerProtocol',
'DistClientProtocol', 'DistributionFlags']
| 43.478261
| 74
| 0.796
|
310d5833cbc15f04ed5a82d1e5c7d38a027d3900
| 321
|
py
|
Python
|
setup.py
|
akloster/table-cleaner
|
1d84e9828b24b9af165b6a597fd5cdf8a9da1a3c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
akloster/table-cleaner
|
1d84e9828b24b9af165b6a597fd5cdf8a9da1a3c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
akloster/table-cleaner
|
1d84e9828b24b9af165b6a597fd5cdf8a9da1a3c
|
[
"BSD-2-Clause"
] | 1
|
2022-01-28T19:35:01.000Z
|
2022-01-28T19:35:01.000Z
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='table-cleaner',
version='0.1',
description='Validation framework for tabular data using Pandas Dataframes.',
author='Andreas Klostermann',
author_email='andreas.klostermann@gmail.com',
packages=['table_cleaner'],
)
| 22.928571
| 83
| 0.688474
|
402e1f8b1530fcfff9cb9a82199cb93ad4150dd1
| 105
|
py
|
Python
|
snake/utils.py
|
coopersamuel/snake
|
4df316e3cf66b1536e44a4a2ab7ed87ae294011e
|
[
"MIT"
] | null | null | null |
snake/utils.py
|
coopersamuel/snake
|
4df316e3cf66b1536e44a4a2ab7ed87ae294011e
|
[
"MIT"
] | 1
|
2018-07-25T18:48:24.000Z
|
2018-07-25T18:48:24.000Z
|
snake/utils.py
|
coopersamuel/snake
|
4df316e3cf66b1536e44a4a2ab7ed87ae294011e
|
[
"MIT"
] | null | null | null |
import curses
def draw_tile(screen, x, y, tile='', color=None):
screen.addstr(y, x, tile, color)
| 26.25
| 49
| 0.647619
|
f6bc42bc7292d467124febc483211466cf5fa560
| 41,422
|
py
|
Python
|
bigtable/tests/unit/test_instance.py
|
udengcnf/gcloud
|
dd1714bd754e18739339e611c42a391ced27c614
|
[
"Apache-2.0"
] | 1
|
2021-06-30T11:43:47.000Z
|
2021-06-30T11:43:47.000Z
|
bigtable/tests/unit/test_instance.py
|
udengcnf/gcloud
|
dd1714bd754e18739339e611c42a391ced27c614
|
[
"Apache-2.0"
] | null | null | null |
bigtable/tests/unit/test_instance.py
|
udengcnf/gcloud
|
dd1714bd754e18739339e611c42a391ced27c614
|
[
"Apache-2.0"
] | 1
|
2021-06-30T11:44:03.000Z
|
2021-06-30T11:44:03.000Z
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
from google.cloud.bigtable.cluster import Cluster
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
return self.channel_stub.responses.pop()
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class TestInstance(unittest.TestCase):
PROJECT = 'project'
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID
LOCATION_ID = 'locid'
LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID
APP_PROFILE_PATH = (
'projects/' + PROJECT + '/instances/' + INSTANCE_ID
+ '/appProfiles/')
DISPLAY_NAME = 'display_name'
LABELS = {'foo': 'bar'}
OP_ID = 8915
OP_NAME = ('operations/projects/{}/instances/{}operations/{}'
.format(PROJECT, INSTANCE_ID, OP_ID))
TABLE_ID = 'table_id'
TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID
@staticmethod
def _get_target_class():
from google.cloud.bigtable.instance import Instance
return Instance
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_defaults(self):
client = object()
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
self.assertIs(instance._client, client)
self.assertIsNone(instance.state)
def test_constructor_non_default(self):
from google.cloud.bigtable import enums
instance_type = enums.Instance.Type.DEVELOPMENT
state = enums.Instance.State.READY
labels = {'test': 'test'}
client = object()
instance = self._make_one(self.INSTANCE_ID, client,
display_name=self.DISPLAY_NAME,
instance_type=instance_type,
labels=labels, _state=state)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, labels)
self.assertIs(instance._client, client)
self.assertEqual(instance.state, state)
def test_table_factory(self):
from google.cloud.bigtable.table import Table
app_profile_id = 'appProfileId1262094415'
instance = self._make_one(self.INSTANCE_ID, None)
table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id)
self.assertIsInstance(table, Table)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertEqual(table._instance, instance)
self.assertEqual(table._app_profile_id, app_profile_id)
def test_cluster_factory(self):
from google.cloud.bigtable import enums
CLUSTER_ID = '{}-cluster'.format(self.INSTANCE_ID)
LOCATION_ID = 'us-central1-c'
SERVE_NODES = 3
STORAGE_TYPE = enums.StorageType.HDD
instance = self._make_one(self.INSTANCE_ID, None)
cluster = instance.cluster(CLUSTER_ID, location_id=LOCATION_ID,
serve_nodes=SERVE_NODES,
default_storage_type=STORAGE_TYPE)
self.assertIsInstance(cluster, Cluster)
self.assertEqual(cluster.cluster_id, CLUSTER_ID)
self.assertEqual(cluster.location_id, LOCATION_ID)
self.assertIsNone(cluster._state)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
self.assertEqual(cluster.default_storage_type, STORAGE_TYPE)
def test_list_clusters(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.instance import Cluster
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()))
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = Instance(self.INSTANCE_ID, client)
failed_location = 'FAILED'
cluster_id1 = 'cluster-id1'
cluster_id2 = 'cluster-id2'
cluster_name1 = (client.instance_admin_client.cluster_path(
self.PROJECT, self.INSTANCE_ID, cluster_id1))
cluster_name2 = (client.instance_admin_client.cluster_path(
self.PROJECT, self.INSTANCE_ID, cluster_id2))
# Create response_pb
response_pb = messages_v2_pb2.ListClustersResponse(
failed_locations=[
failed_location
],
clusters=[
data_v2_pb2.Cluster(
name=cluster_name1,
),
data_v2_pb2.Cluster(
name=cluster_name2,
),
],
)
# Patch the stub used by the API method.
client._instance_admin_client = instance_api
instance_admin_client = client._instance_admin_client
instance_stub = instance_admin_client.transport
instance_stub.list_clusters.side_effect = [response_pb]
# Perform the method and check the result.
clusters, failed_locations = instance.list_clusters()
cluster_1, cluster_2 = clusters
self.assertIsInstance(cluster_1, Cluster)
self.assertEqual(cluster_1.name, cluster_name1)
self.assertIsInstance(cluster_2, Cluster)
self.assertEqual(cluster_2.name, cluster_name2)
self.assertEqual(failed_locations, [failed_location])
def test__update_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable import enums
instance_type = enums.Instance.Type.PRODUCTION
state = enums.Instance.State.READY
instance_pb = data_v2_pb2.Instance(
display_name=self.DISPLAY_NAME,
type=instance_type,
labels=self.LABELS,
state=state
)
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, self.LABELS)
self.assertEqual(instance._state, state)
def test__update_from_pb_success_defaults(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable import enums
instance_pb = data_v2_pb2.Instance(
display_name=self.DISPLAY_NAME,
)
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_,
enums.Instance.Type.UNSPECIFIED)
self.assertFalse(instance.labels)
def test__update_from_pb_no_display_name(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
instance_pb = data_v2_pb2.Instance()
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
with self.assertRaises(ValueError):
instance._update_from_pb(instance_pb)
def test_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable import enums
client = _Client(project=self.PROJECT)
instance_type = enums.Instance.Type.PRODUCTION
state = enums.Instance.State.READY
instance_pb = data_v2_pb2.Instance(
name=self.INSTANCE_NAME,
display_name=self.INSTANCE_ID,
type=instance_type,
labels=self.LABELS,
state=state
)
klass = self._get_target_class()
instance = klass.from_pb(instance_pb, client)
self.assertIsInstance(instance, klass)
self.assertEqual(instance._client, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, self.LABELS)
self.assertEqual(instance._state, state)
def test_from_pb_bad_instance_name(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
instance_name = 'INCORRECT_FORMAT'
instance_pb = data_v2_pb2.Instance(name=instance_name)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, None)
def test_from_pb_project_mistmatch(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
ALT_PROJECT = 'ALT_PROJECT'
client = _Client(project=ALT_PROJECT)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, client)
def test_name_property(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock())
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
# Patch the the API method.
client._instance_admin_client = api
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.name, self.INSTANCE_NAME)
def test___eq__(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance1, instance2)
def test___eq__type_differ(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = object()
self.assertNotEqual(instance1, instance2)
def test___ne__same_value(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = self._make_one(self.INSTANCE_ID, client)
comparison_val = (instance1 != instance2)
self.assertFalse(comparison_val)
def test___ne__(self):
instance1 = self._make_one('instance_id1', 'client1')
instance2 = self._make_one('instance_id2', 'client2')
self.assertNotEqual(instance1, instance2)
def test_reload(self):
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.cloud.bigtable import enums
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock())
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
# Create response_pb
DISPLAY_NAME = u'hey-hi-hello'
instance_type = enums.Instance.Type.PRODUCTION
response_pb = data_v2_pb2.Instance(
display_name=DISPLAY_NAME,
type=instance_type,
labels=self.LABELS
)
# Patch the stub used by the API method.
client._instance_admin_client = api
bigtable_instance_stub = (
client._instance_admin_client.transport)
bigtable_instance_stub.get_instance.side_effect = [response_pb]
# Create expected_result.
expected_result = None # reload() has no return value.
# Check Instance optional config values before.
self.assertEqual(instance.display_name, self.INSTANCE_ID)
# Perform the method and check the result.
result = instance.reload()
self.assertEqual(result, expected_result)
# Check Instance optional config values before.
self.assertEqual(instance.display_name, DISPLAY_NAME)
def test_exists(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.api_core import exceptions
api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()))
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
# Create response_pb
instance_name = client.instance_admin_client.instance_path(
self.PROJECT, self.INSTANCE_ID)
response_pb = data_v2_pb2.Instance(name=instance_name)
# Patch the stub used by the API method.
client._instance_admin_client = api
instance_admin_client = client._instance_admin_client
instance_stub = instance_admin_client.transport
instance_stub.get_instance.side_effect = [
response_pb,
exceptions.NotFound('testing'),
exceptions.BadRequest('testing')
]
# Perform the method and check the result.
non_existing_instance_id = 'instance-id-2'
alt_instance_1 = self._make_one(self.INSTANCE_ID, client)
alt_instance_2 = self._make_one(non_existing_instance_id, client)
self.assertTrue(alt_instance_1.exists())
self.assertFalse(alt_instance_2.exists())
with self.assertRaises(exceptions.BadRequest):
alt_instance_2.exists()
def test_create_check_conflicts(self):
instance = self._make_one(self.INSTANCE_ID, None)
with self.assertRaises(ValueError):
instance.create(location_id=self.LOCATION_ID,
clusters=[object(), object()])
with self.assertRaises(ValueError):
instance.create(serve_nodes=3,
clusters=[object(), object()])
with self.assertRaises(ValueError):
instance.create(default_storage_type=1,
clusters=[object(), object()])
def test_create(self):
import datetime
from google.api_core import operation
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable import enums
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client,
self.DISPLAY_NAME,
enums.Instance.Type.PRODUCTION,
self.LABELS)
# Create response_pb
metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB)
type_url = 'type.googleapis.com/{}'.format(
messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(
type_url=type_url,
value=metadata.SerializeToString(),
)
)
# Patch the stub used by the API method.
channel = ChannelStub(responses=[response_pb])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
client._instance_admin_client = instance_api
# Perform the method and check the result.
serve_nodes = 3
cluster_id = '{}-cluster'.format(self.INSTANCE_ID)
# cluster = instance.cluster(cluster_id, location_id=self.LOCATION_ID,
# serve_nodes=serve_nodes)
# result = instance.create(clusters=[cluster])
# TODO: replace this example with above once the otpion is removed
# from instance.create() method
result = instance.create(location_id=self.LOCATION_ID,
serve_nodes=serve_nodes)
actual_request = channel.requests[0][1]
cluster = self._create_cluster_pb(
instance_api, cluster_id, self.LOCATION_ID, serve_nodes,
enums.StorageType.UNSPECIFIED)
expected_request = self._create_instance_request({cluster_id: cluster})
self.assertEqual(expected_request, actual_request)
self.assertIsInstance(result, operation.Operation)
self.assertEqual(result.operation.name, self.OP_NAME)
self.assertIsInstance(result.metadata,
messages_v2_pb2.CreateInstanceMetadata)
def test_create_w_clusters(self):
import datetime
from google.api_core import operation
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable import enums
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client,
self.DISPLAY_NAME,
enums.Instance.Type.PRODUCTION,
self.LABELS)
# Create response_pb
metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB)
type_url = 'type.googleapis.com/{}'.format(
messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(
type_url=type_url,
value=metadata.SerializeToString(),
)
)
# Patch the stub used by the API method.
channel = ChannelStub(responses=[response_pb])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
client._instance_admin_client = instance_api
# Perform the method and check the result.
cluster_id_1 = 'cluster-1'
cluster_id_2 = 'cluster-2'
location_id_1 = 'location-id-1'
location_id_2 = 'location-id-2'
serve_nodes_1 = 3
serve_nodes_2 = 5
clusters = [
Cluster(cluster_id_1, instance,
location_id=location_id_1,
serve_nodes=serve_nodes_1),
Cluster(cluster_id_2, instance,
location_id=location_id_2,
serve_nodes=serve_nodes_2)]
result = instance.create(clusters=clusters)
actual_request = channel.requests[0][1]
cluster_1_pb = self._create_cluster_pb(
instance_api, cluster_id_1, location_id_1, serve_nodes_1,
enums.StorageType.UNSPECIFIED)
cluster_2_pb = self._create_cluster_pb(
instance_api, cluster_id_2, location_id_2, serve_nodes_2,
enums.StorageType.UNSPECIFIED)
expected_request = self._create_instance_request(
{cluster_id_1: cluster_1_pb,
cluster_id_2: cluster_2_pb}
)
self.assertEqual(expected_request, actual_request)
self.assertIsInstance(result, operation.Operation)
self.assertEqual(result.operation.name, self.OP_NAME)
self.assertIsInstance(result.metadata,
messages_v2_pb2.CreateInstanceMetadata)
def _create_cluster_pb(self, instance_api, cluster_id, location_id,
serve_nodes, storage_type):
from google.cloud.bigtable_admin_v2.types import instance_pb2
location = instance_api.location_path(
self.PROJECT, location_id)
return instance_pb2.Cluster(
location=location,
serve_nodes=serve_nodes,
default_storage_type=storage_type)
def _create_instance_request(self, clusters):
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud.bigtable_admin_v2.types import instance_pb2
from google.cloud.bigtable import enums
instance = instance_pb2.Instance(display_name=self.DISPLAY_NAME,
type=enums.Instance.Type.PRODUCTION,
labels=self.LABELS)
return messages_v2_pb2.CreateInstanceRequest(
parent='projects/{}'.format(self.PROJECT),
instance_id=self.INSTANCE_ID,
instance=instance,
clusters=clusters
)
def test_update(self):
import datetime
from google.api_core import operation
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable import enums
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.protobuf import field_mask_pb2
from google.cloud.bigtable_admin_v2.types import instance_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as instance_v2_pb2)
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(
self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME,
instance_type=enums.Instance.Type.DEVELOPMENT, labels=self.LABELS)
expected_request_instance = instance_pb2.Instance(
name=instance.name, display_name=instance.display_name,
type=instance.type_, labels=instance.labels)
expected_request_update_mask = field_mask_pb2.FieldMask(
paths=['display_name', 'type', 'labels'])
expected_request = instance_v2_pb2.PartialUpdateInstanceRequest(
instance=expected_request_instance,
update_mask=expected_request_update_mask)
metadata = messages_v2_pb2.UpdateInstanceMetadata(
request_time=NOW_PB)
type_url = 'type.googleapis.com/{}'.format(
messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(
type_url=type_url,
value=metadata.SerializeToString(),
)
)
channel = ChannelStub(responses=[response_pb])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
# Mock api calls
client._instance_admin_client = instance_api
# Perform the method and check the result.
result = instance.update()
actual_request = channel.requests[0][1]
self.assertEqual(actual_request, expected_request)
self.assertIsInstance(result, operation.Operation)
self.assertEqual(result.operation.name, self.OP_NAME)
self.assertIsInstance(result.metadata,
messages_v2_pb2.UpdateInstanceMetadata)
def test_update_empty(self):
from google.api_core import operation
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.longrunning import operations_pb2
from google.protobuf import field_mask_pb2
from google.cloud.bigtable_admin_v2.types import instance_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as instance_v2_pb2)
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(None, client)
expected_request_instance = instance_pb2.Instance(
name=instance.name, display_name=instance.display_name,
type=instance.type_, labels=instance.labels)
expected_request_update_mask = field_mask_pb2.FieldMask()
expected_request = instance_v2_pb2.PartialUpdateInstanceRequest(
instance=expected_request_instance,
update_mask=expected_request_update_mask)
response_pb = operations_pb2.Operation(name=self.OP_NAME)
channel = ChannelStub(responses=[response_pb])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
# Mock api calls
client._instance_admin_client = instance_api
# Perform the method and check the result.
result = instance.update()
actual_request = channel.requests[0][1]
self.assertIsInstance(result, operation.Operation)
self.assertEqual(actual_request, expected_request)
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock())
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
# Mock api calls
client._instance_admin_client = api
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = instance.delete()
self.assertEqual(result, expected_result)
def _list_tables_helper(self, table_name=None):
from google.cloud.bigtable_admin_v2.proto import (
table_pb2 as table_data_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_table_admin_client, bigtable_instance_admin_client)
table_api = bigtable_table_admin_client.BigtableTableAdminClient(
mock.Mock())
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()))
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
# Create response_pb
if table_name is None:
table_name = self.TABLE_NAME
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[
table_data_v2_pb2.Table(name=table_name),
],
)
# Patch the stub used by the API method.
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = (
client._table_admin_client.transport)
bigtable_table_stub.list_tables.side_effect = [response_pb]
# Create expected_result.
expected_table = instance.table(self.TABLE_ID)
expected_result = [expected_table]
# Perform the method and check the result.
result = instance.list_tables()
self.assertEqual(result, expected_result)
def test_list_tables(self):
self._list_tables_helper()
def test_list_tables_failure_bad_split(self):
with self.assertRaises(ValueError):
self._list_tables_helper(table_name='wrong-format')
def test_list_tables_failure_name_bad_before(self):
BAD_TABLE_NAME = ('nonempty-section-before' +
'projects/' + self.PROJECT +
'/instances/' + self.INSTANCE_ID +
'/tables/' + self.TABLE_ID)
with self.assertRaises(ValueError):
self._list_tables_helper(table_name=BAD_TABLE_NAME)
def test_app_profile_factory(self):
from google.cloud.bigtable.enums import RoutingPolicyType
APP_PROFILE_ID_1 = 'app-profile-id-1'
ANY = RoutingPolicyType.ANY
DESCRIPTION_1 = 'routing policy any'
APP_PROFILE_ID_2 = 'app-profile-id-2'
SINGLE = RoutingPolicyType.SINGLE
DESCRIPTION_2 = 'routing policy single'
ALLOW_WRITES = True
CLUSTER_ID = 'cluster-id'
instance = self._make_one(self.INSTANCE_ID, None)
app_profile1 = instance.app_profile(
APP_PROFILE_ID_1,
routing_policy_type=ANY,
description=DESCRIPTION_1,
)
app_profile2 = instance.app_profile(
APP_PROFILE_ID_2,
routing_policy_type=SINGLE,
description=DESCRIPTION_2,
cluster_id=CLUSTER_ID,
allow_transactional_writes=ALLOW_WRITES,
)
self.assertEqual(app_profile1.app_profile_id, APP_PROFILE_ID_1)
self.assertIs(app_profile1._instance, instance)
self.assertEqual(app_profile1.routing_policy_type, ANY)
self.assertEqual(app_profile1.description, DESCRIPTION_1)
self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2)
self.assertIs(app_profile2._instance, instance)
self.assertEqual(app_profile2.routing_policy_type, SINGLE)
self.assertEqual(app_profile2.description, DESCRIPTION_2)
self.assertEqual(app_profile2.cluster_id, CLUSTER_ID)
self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES)
def test_list_app_profiles(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable.app_profile import AppProfile
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()))
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
# Setup Expected Response
next_page_token = ''
app_profile_id1 = 'app-profile-id1'
app_profile_id2 = 'app-profile-id2'
app_profile_name1 = (client.instance_admin_client.app_profile_path(
self.PROJECT, self.INSTANCE_ID, app_profile_id1))
app_profile_name2 = (client.instance_admin_client.app_profile_path(
self.PROJECT, self.INSTANCE_ID, app_profile_id2))
routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()
expected_response = messages_v2_pb2.ListAppProfilesResponse(
next_page_token=next_page_token,
app_profiles=[
data_v2_pb2.AppProfile(
name=app_profile_name1,
multi_cluster_routing_use_any=routing_policy,
),
data_v2_pb2.AppProfile(
name=app_profile_name2,
multi_cluster_routing_use_any=routing_policy,
)
],
)
# Patch the stub used by the API method.
client._instance_admin_client = instance_api
bigtable_instance_stub = (
client._instance_admin_client.transport)
bigtable_instance_stub.list_app_profiles.side_effect = [
expected_response]
# Perform the method and check the result.
app_profiles = instance.list_app_profiles()
app_profile_1, app_profile_2 = app_profiles
self.assertIsInstance(app_profile_1, AppProfile)
self.assertEqual(app_profile_1.name, app_profile_name1)
self.assertIsInstance(app_profile_2, AppProfile)
self.assertEqual(app_profile_2.name, app_profile_name2)
def test_get_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
version = 1
etag = b'etag_v1'
bindings = [{'role': BIGTABLE_ADMIN_ROLE,
'members': ['serviceAccount:service_acc1@test.com',
'user:user1@test.com']}]
expected_request_policy = policy_pb2.Policy(version=version,
etag=etag,
bindings=bindings)
expected_request = iam_policy_pb2.GetIamPolicyRequest(
resource=instance.name
)
# Patch the stub used by the API method.
channel = ChannelStub(responses=[expected_request_policy])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
client._instance_admin_client = instance_api
# Perform the method and check the result.
policy_request = Policy(etag=etag, version=version)
policy_request[BIGTABLE_ADMIN_ROLE] = [Policy.user("user1@test.com"),
Policy.service_account(
"service_acc1@test.com")]
result = instance.get_iam_policy()
actual_request = channel.requests[0][1]
self.assertEqual(actual_request, expected_request)
self.assertEqual(result.bigtable_admins,
policy_request.bigtable_admins)
def test_set_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
version = 1
etag = b'etag_v1'
bindings = [{'role': BIGTABLE_ADMIN_ROLE,
'members': ['serviceAccount:service_acc1@test.com',
'user:user1@test.com']}]
expected_request_policy = policy_pb2.Policy(version=version,
etag=etag,
bindings=bindings)
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=instance.name,
policy=expected_request_policy
)
# Patch the stub used by the API method.
channel = ChannelStub(responses=[expected_request_policy])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
client._instance_admin_client = instance_api
# Perform the method and check the result.
policy_request = Policy(etag=etag, version=version)
policy_request[BIGTABLE_ADMIN_ROLE] = [Policy.user("user1@test.com"),
Policy.service_account(
"service_acc1@test.com")]
result = instance.set_iam_policy(policy_request)
actual_request = channel.requests[0][1]
self.assertEqual(actual_request, expected_request)
self.assertEqual(result.bigtable_admins,
policy_request.bigtable_admins)
def test_test_iam_permissions(self):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client)
from google.iam.v1 import iam_policy_pb2
credentials = _make_credentials()
client = self._make_client(project=self.PROJECT,
credentials=credentials, admin=True)
instance = self._make_one(self.INSTANCE_ID, client)
permissions = ["bigtable.tables.create", "bigtable.clusters.create"]
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=instance.name,
permissions=permissions)
# Patch the stub used by the API method.
channel = ChannelStub(responses=[expected_request])
instance_api = (
bigtable_instance_admin_client.BigtableInstanceAdminClient(
channel=channel))
client._instance_admin_client = instance_api
result = instance.test_iam_permissions(permissions)
actual_request = channel.requests[0][1]
self.assertEqual(actual_request, expected_request)
self.assertEqual(result, permissions)
class _Client(object):
def __init__(self, project):
self.project = project
self.project_name = 'projects/' + self.project
self._operations_stub = mock.sentinel.operations_stub
def __eq__(self, other):
return (other.project == self.project and
other.project_name == self.project_name)
| 40.137597
| 79
| 0.651924
|
ad33b279e489c57b6ce76b129f57dfe5853a07a6
| 2,015
|
py
|
Python
|
tests/algo/test_base.py
|
awesome-archive/buffalo
|
1bcb76b61161e74324ca71ed05ce0576598798b5
|
[
"Apache-2.0"
] | 1
|
2019-09-06T06:59:28.000Z
|
2019-09-06T06:59:28.000Z
|
tests/algo/test_base.py
|
awesome-archive/buffalo
|
1bcb76b61161e74324ca71ed05ce0576598798b5
|
[
"Apache-2.0"
] | null | null | null |
tests/algo/test_base.py
|
awesome-archive/buffalo
|
1bcb76b61161e74324ca71ed05ce0576598798b5
|
[
"Apache-2.0"
] | 1
|
2022-02-26T12:57:54.000Z
|
2022-02-26T12:57:54.000Z
|
# -*- coding: utf-8 -*-
import unittest
from buffalo.misc import aux
from buffalo.algo.als import ALS
from buffalo.algo.options import ALSOption
from buffalo.misc.log import set_log_level
from buffalo.data.mm import MatrixMarketOptions
from .base import TestBase, MockAlgo
class TestAlgoBase(TestBase):
def test0_tensorboard(self):
set_log_level(2)
opt = ALSOption().get_default_option()
opt.d = 5
opt.validation = aux.Option({'topk': 10})
opt.tensorboard = aux.Option({'root': './tb',
'name': 'als'})
data_opt = MatrixMarketOptions().get_default_option()
data_opt.input.main = self.ml_100k + 'main'
data_opt.input.uid = self.ml_100k + 'uid'
data_opt.input.iid = self.ml_100k + 'iid'
data_opt.data.value_prepro = aux.Option({'name': 'OneBased'})
als = ALS(opt, data_opt=data_opt)
als.initialize()
als.train()
results = als.get_validation_results()
self.assertTrue(results['ndcg'] > 0.025)
self.assertTrue(results['map'] > 0.015)
def test1_early_stopping(self):
set_log_level(2)
algo = MockAlgo()
algo.initialize()
algo.set_losses([1.0 + i / 1.0 for i in range(100)])
algo.opt.early_stopping_rounds = 5
algo.train()
self.assertEqual(algo.last_iteration, 5)
def test2_most_similar(self):
set_log_level(2)
opt = ALSOption().get_default_option()
data_opt = MatrixMarketOptions().get_default_option()
data_opt.input.main = self.ml_100k + 'main'
data_opt.input.uid = self.ml_100k + 'uid'
data_opt.input.iid = self.ml_100k + 'iid'
als = ALS(opt, data_opt=data_opt)
als.initialize()
als.train()
q1, q2, q3 = '49.Star_Wars_(1977)', '180.Return_of_the_Jedi_(1983)', '171.Empire_Strikes_Back,_The_(1980)'
self._test_most_similar(als, q1, q2, q3)
if __name__ == '__main__':
unittest.main()
| 33.032787
| 114
| 0.627295
|
8a63014ecae7bd1b7ae550b0e1c74e216c30002f
| 629
|
py
|
Python
|
erpnext_chinese/monkey_patches/company.py
|
eanfs/erpnext_chinese
|
68c22267b37553092955f2c3c14d35cfdbb79873
|
[
"MIT"
] | null | null | null |
erpnext_chinese/monkey_patches/company.py
|
eanfs/erpnext_chinese
|
68c22267b37553092955f2c3c14d35cfdbb79873
|
[
"MIT"
] | null | null | null |
erpnext_chinese/monkey_patches/company.py
|
eanfs/erpnext_chinese
|
68c22267b37553092955f2c3c14d35cfdbb79873
|
[
"MIT"
] | 1
|
2022-01-27T01:20:08.000Z
|
2022-01-27T01:20:08.000Z
|
import frappe
from erpnext.setup.doctype.company.company import Company
from erpnext_chinese.localize.localize import import_coa
old_create_default_accounts = Company.create_default_accounts
def create_default_accounts(self):
if self.chart_of_accounts == '中国会计科目表':
self.create_default_warehouses()
frappe.local.flags.ignore_root_company_validation = True
frappe.local.flags.ignore_chart_of_accounts = True #bypass system to set default accounts
import_coa(self.name)
else:
old_create_default_accounts(self)
Company.create_default_accounts = create_default_accounts
| 39.3125
| 102
| 0.780604
|
0afec9bf2640143c87c5ce99c62b75c76c85a336
| 17,814
|
py
|
Python
|
nagios/datadog_checks/nagios/nagios.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T13:00:14.000Z
|
2021-03-24T13:00:14.000Z
|
nagios/datadog_checks/nagios/nagios.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
nagios/datadog_checks/nagios/nagios.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import re
from collections import namedtuple
from datadog_checks.base import AgentCheck
from datadog_checks.base.utils.tailfile import TailFile
# fields order for each event type, as named tuples
EVENT_FIELDS = {
'CURRENT HOST STATE': namedtuple('E_CurrentHostState', 'host, event_state, event_soft_hard, return_code, payload'),
'CURRENT SERVICE STATE': namedtuple(
'E_CurrentServiceState', 'host, check_name, event_state, event_soft_hard, return_code, payload'
),
'SERVICE ALERT': namedtuple(
'E_ServiceAlert', 'host, check_name, event_state, event_soft_hard, return_code, payload'
),
'PASSIVE SERVICE CHECK': namedtuple('E_PassiveServiceCheck', 'host, check_name, return_code, payload'),
'HOST ALERT': namedtuple('E_HostAlert', 'host, event_state, event_soft_hard, return_code, payload'),
# [1305744274] SERVICE NOTIFICATION: ops;ip-10-114-237-165;Metric ETL;ACKNOWLEDGEMENT (CRITICAL);
# notify-service-by-email;HTTP CRITICAL: HTTP/1.1 503 Service Unavailable - 394 bytes
# in 0.010 second response time;datadog;alq
'SERVICE NOTIFICATION': namedtuple(
'E_ServiceNotification', 'contact, host, check_name, event_state, notification_type, payload'
),
# [1296509331] SERVICE FLAPPING ALERT: ip-10-114-97-27;cassandra JVM Heap;
# STARTED; Service appears to have started flapping (23.4% change >= 20.0% threshold)
# [1296662511] SERVICE FLAPPING ALERT: ip-10-114-97-27;cassandra JVM Heap;
# STOPPED; Service appears to have stopped flapping (3.8% change < 5.0% threshold)
'SERVICE FLAPPING ALERT': namedtuple('E_FlappingAlert', 'host, check_name, flap_start_stop, payload'),
# Reference for external commands: http://old.nagios.org/developerinfo/externalcommands/commandlist.php
# Command Format:
# ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;<sticky>;<notify>;<persistent>;<author>;<comment>
# [1305832665] EXTERNAL COMMAND: ACKNOWLEDGE_SVC_PROBLEM;ip-10-202-161-236;Resources ETL;2;1;0;datadog;alq checking
'ACKNOWLEDGE_SVC_PROBLEM': namedtuple(
'E_ServiceAck', 'host, check_name, sticky_ack, notify_ack, persistent_ack, ack_author, payload'
),
# Command Format:
# ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent>;<author>;<comment>
'ACKNOWLEDGE_HOST_PROBLEM': namedtuple(
'E_HostAck', 'host, sticky_ack, notify_ack, persistent_ack, ack_author, payload'
),
# Comment Format:
# PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<result_code>;<comment>
# We ignore it because Nagios will log a "PASSIVE SERVICE CHECK" after
# receiving this, and we don't want duplicate events to be counted.
'PROCESS_SERVICE_CHECK_RESULT': False,
# Host Downtime
# [1297894825] HOST DOWNTIME ALERT: ip-10-114-89-59;STARTED; Host has entered a period of scheduled downtime
# [1297894825] SERVICE DOWNTIME ALERT: ip-10-114-237-165;intake;
# STARTED; Service has entered a period of scheduled downtime
'HOST DOWNTIME ALERT': namedtuple('E_HostDowntime', 'host, downtime_start_stop, payload'),
'SERVICE DOWNTIME ALERT': namedtuple('E_ServiceDowntime', 'host, check_name, downtime_start_stop, payload'),
}
# Regex for the Nagios event log
RE_LINE_REG = re.compile(r'^\[(\d+)\] EXTERNAL COMMAND: (\w+);(.*)$')
RE_LINE_EXT = re.compile(r'^\[(\d+)\](?: \[\d+\])? ([^:]+): (.*)$')
SOURCE_TYPE_NAME = 'Nagios'
class NagiosCheck(AgentCheck):
NAGIOS_CONF_KEYS = [
re.compile(r'^(?P<key>log_file)\s*=\s*(?P<value>.+)$'),
re.compile(r'^(?P<key>host_perfdata_file_template)\s*=\s*(?P<value>.+)$'),
re.compile(r'^(?P<key>service_perfdata_file_template)\s*=\s*(?P<value>.+)$'),
re.compile(r'^(?P<key>host_perfdata_file)\s*=\s*(?P<value>.+)$'),
re.compile(r'^(?P<key>service_perfdata_file)\s*=\s*(?P<value>.+)$'),
]
def __init__(self, name, init_config, instances):
AgentCheck.__init__(self, name, init_config, instances)
self.nagios_tails = {}
instance = self.instances[0]
tailers = []
nagios_conf = {}
instance_key = None
custom_tag = instance.get('tags', [])
if 'nagios_conf' in instance: # conf.d check
conf_path = instance['nagios_conf']
nagios_conf = self.parse_nagios_config(conf_path)
instance_key = conf_path
# Retrocompatibility Code
elif 'nagios_perf_cfg' in instance:
conf_path = instance['nagios_perf_cfg']
nagios_conf = self.parse_nagios_config(conf_path)
instance["collect_host_performance_data"] = True
instance["collect_service_performance_data"] = True
instance_key = conf_path
if 'nagios_log' in instance:
nagios_conf["log_file"] = instance['nagios_log']
if instance_key is None:
instance_key = instance['nagios_log']
# End of retrocompatibility code
if not nagios_conf:
self.log.warning("Missing path to nagios_conf")
return
if 'log_file' in nagios_conf and instance.get('collect_events', True):
self.log.debug("Starting to tail the event log")
tailers.append(
NagiosEventLogTailer(
log_path=nagios_conf['log_file'],
logger=self.log,
hostname=self.hostname,
event_func=self.event,
tags=custom_tag,
passive_checks=instance.get('passive_checks_events', False),
)
)
if (
'host_perfdata_file' in nagios_conf
and 'host_perfdata_file_template' in nagios_conf
and instance.get('collect_host_performance_data', False)
):
self.log.debug("Starting to tail the host_perfdata file")
tailers.append(
NagiosPerfDataTailer(
log_path=nagios_conf['host_perfdata_file'],
file_template=nagios_conf['host_perfdata_file_template'],
logger=self.log,
hostname=self.hostname,
gauge_func=self.gauge,
tags=custom_tag,
perfdata_field='HOSTPERFDATA',
metric_prefix=_get_host_metric_prefix,
)
)
if (
'service_perfdata_file' in nagios_conf
and 'service_perfdata_file_template' in nagios_conf
and instance.get('collect_service_performance_data', False)
):
self.log.debug("Starting to tail the service_perfdata file")
tailers.append(
NagiosPerfDataTailer(
log_path=nagios_conf['service_perfdata_file'],
file_template=nagios_conf['service_perfdata_file_template'],
logger=self.log,
hostname=self.hostname,
gauge_func=self.gauge,
tags=custom_tag,
perfdata_field='SERVICEPERFDATA',
metric_prefix=_get_service_metric_prefix,
)
)
self.nagios_tails[instance_key] = tailers
def parse_nagios_config(self, filename):
output = {}
try:
with open(filename) as f:
for line in f:
line = line.strip()
if not line:
continue
for key in self.NAGIOS_CONF_KEYS:
m = key.match(line)
if m:
output[m.group('key')] = m.group('value')
break
except Exception as e:
# Can't parse, assume it's just not working
# Don't return an incomplete config
self.log.exception(e)
raise Exception("Could not parse Nagios config file")
return output
def check(self, instance):
"""
Parse until the end of each tailer associated with this instance.
We match instance and tailers based on the path to the Nagios configuration file
Special case: Compatibility with the old conf when no conf file is specified
but the path to the event_log is given
"""
instance_key = instance.get('nagios_conf', instance.get('nagios_perf_cfg', instance.get('nagios_log')))
# Bad configuration: This instance does not contain any necessary configuration
if not instance_key or instance_key not in self.nagios_tails:
raise Exception('No Nagios configuration file specified')
for tailer in self.nagios_tails[instance_key]:
tailer.check()
class NagiosTailer(object):
def __init__(self, log_path, logger, parse_line):
"""
:param log_path: string, path to the file to parse
:param logger: Logger object
"""
self.log_path = log_path
self.log = logger
self._nested_parse_line = parse_line
self._line_parsed = 0
tail = TailFile(self.log, self.log_path, self.parse_line)
self.gen = tail.tail(line_by_line=False, move_end=True)
next(self.gen)
def parse_line(self, line):
self._line_parsed += 1
return self._nested_parse_line(line)
def check(self):
self._line_parsed = 0
# read until the end of file
try:
self.log.debug("Start nagios check for file %s", self.log_path)
next(self.gen)
self.log.debug("Done nagios check for file %s (parsed %s line(s))", self.log_path, self._line_parsed)
except StopIteration as e:
self.log.exception(e)
self.log.warning("Can't tail %s file", self.log_path)
class NagiosEventLogTailer(object):
def __init__(self, log_path, logger, hostname, event_func, tags, passive_checks):
"""
:param log_path: string, path to the file to parse
:param logger: Logger object
:param hostname: string, name of the host this agent is running on
:param event_func: function to create event, should accept dict
:param passive_checks: bool, enable or not passive checks events
"""
self.log = logger
self.hostname = hostname
self._event = event_func
self._tags = tags
self._passive_checks = passive_checks
self.check = NagiosTailer(log_path, logger, self._parse_line).check
def _parse_line(self, line):
"""
Actual nagios parsing
Return True if we found an event, False otherwise
"""
# first isolate the timestamp and the event type
try:
m = RE_LINE_REG.match(line)
if m is None:
m = RE_LINE_EXT.match(line)
if m is None:
return False
self.log.debug("Matching line found %s", line)
tstamp, event_type, remainder = m.groups()
tstamp = int(tstamp)
# skip passive checks reports by default for spamminess
if event_type == 'PASSIVE SERVICE CHECK' and not self._passive_checks:
return False
# then retrieve the event format for each specific event type
fields = EVENT_FIELDS.get(event_type)
if fields is None:
self.log.warning("Ignoring unknown nagios event for line: %s", (line[:-1]))
return False
if not fields:
# Ignore and skip
self.log.debug("Ignoring Nagios event for line: %s", (line[:-1]))
return False
# and parse the rest of the line
parts = [p.strip() for p in remainder.split(';')]
# Chop parts we don't recognize
parts = parts[: len(fields._fields)]
event = self.create_event(tstamp, event_type, self.hostname, fields._make(parts), tags=self._tags)
self._event(event)
self.log.debug("Nagios event: %s", event)
return True
except Exception:
self.log.exception("Unable to create a nagios event from line: [%s]", line)
return False
def create_event(self, timestamp, event_type, hostname, fields, tags=None):
"""Factory method called by the parsers
"""
# Agent6 expects a specific set of fields, so we need to place all
# extra fields in the msg_title and let the Datadog backend separate them
# Any remaining fields that aren't a part of the datadog-agent payload
# specification will be dropped.
event_payload = fields._asdict()
msg_text = {
'event_type': event_type,
'event_soft_hard': event_payload.pop('event_soft_hard', None),
'check_name': event_payload.pop('check_name', None),
'event_state': event_payload.pop('event_state', None),
'payload': event_payload.pop('payload', None),
'ack_author': event_payload.pop('ack_author', None),
}
msg_text = json.dumps(msg_text)
self.log.info("Nagios Event pack: %s", msg_text)
event_payload.update(
{
'timestamp': timestamp,
'event_type': event_type,
'msg_text': msg_text,
'source_type_name': SOURCE_TYPE_NAME,
'tags': tags,
}
)
# if host is localhost, turn that into the internal host name
host = event_payload.get('host')
if host == "localhost":
event_payload["host"] = hostname
return event_payload
class NagiosPerfDataTailer(object):
metric_prefix = 'nagios'
pair_pattern = re.compile(
r"".join(
[
r"'?(?P<label>[^=']+)'?=",
r"(?P<value>[-0-9.]+)",
r"(?P<unit>s|us|ms|%|B|KB|MB|GB|TB|c)?",
r"(;(?P<warn>@?[-0-9.~]*:?[-0-9.~]*))?",
r"(;(?P<crit>@?[-0-9.~]*:?[-0-9.~]*))?",
r"(;(?P<min>[-0-9.]*))?",
r"(;(?P<max>[-0-9.]*))?",
]
)
)
def __init__(self, log_path, file_template, logger, hostname, gauge_func, tags, perfdata_field, metric_prefix):
self.log = logger
self.compile_file_template(file_template)
self.hostname = hostname
self._gauge = gauge_func
self._tags = tags
self._get_metric_prefix = metric_prefix
self._perfdata_field = perfdata_field
self.check = NagiosTailer(log_path, logger, self._parse_line).check
def compile_file_template(self, file_template):
try:
# Escape characters that will be interpreted as regex bits
# e.g. [ and ] in "[SERVICEPERFDATA]"
regex = re.sub(r'[\[\]*]', r'.', file_template)
regex = re.sub(r'\|', r'\|', regex)
regex = re.sub(r'\$([^\$]*)\$', r'(?P<\1>[^\$]*)', regex)
self.line_pattern = re.compile(regex)
except Exception as e:
raise InvalidDataTemplate("%s (%s)" % (file_template, e))
def _parse_line(self, line):
matched = self.line_pattern.match(line)
if not matched:
self.log.debug("Non matching line found %s", line)
else:
self.log.debug("Matching line found %s", line)
data = matched.groupdict()
metric_prefix = self._get_metric_prefix(self.metric_prefix, data)
# Parse the prefdata values, which are a space-delimited list of:
# 'label'=value[UOM];[warn];[crit];[min];[max]
perf_data = data.get(self._perfdata_field)
if not perf_data:
self.log.warning(
'Could not find field {} in {}, check your perfdata_format', self._perfdata_field, line
)
return
for pair in perf_data.split():
pair_match = self.pair_pattern.match(pair)
if not pair_match:
continue
else:
pair_data = pair_match.groupdict()
label = pair_data['label']
value = float(pair_data['value'])
device_name = None
if '/' in label:
# Special case: if the label begins
# with a /, treat the label as the device
# and use the metric prefix as the metric name
metric = '.'.join(metric_prefix)
device_name = label
else:
# Otherwise, append the label to the metric prefix
# and use that as the metric name
metric = '.'.join(metric_prefix + [label])
host_name = data.get('HOSTNAME', self.hostname)
optional_keys = ['unit', 'warn', 'crit', 'min', 'max']
tags = []
for key in optional_keys:
attr_val = pair_data.get(key)
if attr_val is not None and attr_val != '':
tags.append("{}:{}".format(key, attr_val))
self._gauge(metric, value, tags=tags + self._tags, hostname=host_name, device_name=device_name)
def _get_host_metric_prefix(prefix, line_data):
return [prefix, 'host']
def _get_service_metric_prefix(prefix, line_data):
prefix = [prefix]
middle_name = line_data.get('SERVICEDESC')
if middle_name:
prefix.append(middle_name.replace(' ', '_').lower())
return prefix
class InvalidDataTemplate(Exception):
pass
| 41.71897
| 119
| 0.593185
|
6dcda9d4f5a7a6f98c7822367a35f08fdf1e602c
| 393
|
py
|
Python
|
CursoemVideo/challenge002.py
|
ElptsJunior/Python
|
7347b38947b439afa392764aafe0a55f808530dd
|
[
"MIT"
] | null | null | null |
CursoemVideo/challenge002.py
|
ElptsJunior/Python
|
7347b38947b439afa392764aafe0a55f808530dd
|
[
"MIT"
] | null | null | null |
CursoemVideo/challenge002.py
|
ElptsJunior/Python
|
7347b38947b439afa392764aafe0a55f808530dd
|
[
"MIT"
] | null | null | null |
print('\033[32m = \033[m'*27)
print(" BUILD AN PYTHON SCRIPT THAT READ'S DAY,MONTH AND YEAR AND RETURN THE VALUES ".title())
print('\033[32m = \033[m'*27)
year = int(input('Please insert the year - yyyy :'))
month = int(input('Now insert the month - mm :'))
day = int(input('insert your day - dd : '))
print(' the date inserted \033[7m{} / {} / {}\033[m isnt ? '.format(day, month, year))
| 39.3
| 95
| 0.633588
|
2cd3d2adc4fc679c5aa595752acbc48ae0baa862
| 5,895
|
py
|
Python
|
tests/test_swf.py
|
dennisvang/imageio
|
54e957a8d4c5faa90c6dd16ae3f64346a5ceafa4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_swf.py
|
dennisvang/imageio
|
54e957a8d4c5faa90c6dd16ae3f64346a5ceafa4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_swf.py
|
dennisvang/imageio
|
54e957a8d4c5faa90c6dd16ae3f64346a5ceafa4
|
[
"BSD-2-Clause"
] | null | null | null |
""" Tests for the shockwave flash plugin
"""
import numpy as np
import pytest
import imageio.v2 as iio
import imageio.plugins
from imageio import core
from imageio.core import IS_PYPY
from conftest import deprecated_test
def mean(x):
return x.sum() / x.size # pypy-compat mean
@deprecated_test
def test_format_selection(test_images):
fname1 = test_images / "stent.swf"
fname2 = fname1.with_suffix(".out.swf")
F = iio.formats["swf"]
assert F.name == "SWF"
assert type(iio.formats[".swf"]) is type(F)
assert type(iio.read(fname1).format) is type(F)
assert type(iio.save(fname2).format) is type(F)
def test_reading_saving(test_images, tmp_path):
fname1 = test_images / "stent.swf"
fname2 = fname1.with_suffix(".out.swf")
fname3 = fname1.with_suffix(".compressed.swf")
fname4 = fname1.with_suffix(".out2.swf")
# Read
R = iio.read(fname1)
assert len(R) == 10
assert R.get_meta_data() == {} # always empty dict
ims1 = []
for im in R:
assert im.shape == (657, 451, 4)
assert mean(im) > 0
ims1.append(im)
# Seek
assert (R.get_data(3) == ims1[3]).all()
# Fails
with pytest.raises(IndexError):
R.get_data(-1) # No negative index
with pytest.raises(IndexError):
R.get_data(10) # Out of bounds
R.close()
# Test loop
R = iio.read(fname1, loop=True)
assert (R.get_data(10) == ims1[0]).all()
# setting meta data is ignored
W = iio.save(fname2)
W.set_meta_data({"foo": 3})
W.close()
# Just make sure mimread works
assert len(iio.mimread(fname1)) == 10
# I'm not sure why, but the below does not work on pypy, which is weird,
# because the file *is* closed, but somehow it's not flushed? Ah well ...
if IS_PYPY:
return
# Write and re-read, now without loop, and with html page
iio.mimsave(fname2, ims1, loop=False, html=True)
ims2 = iio.mimread(fname2)
# Check images. We can expect exact match, since
# SWF is lossless.
assert len(ims1) == len(ims2)
for im1, im2 in zip(ims1, ims2):
assert (im1 == im2).all()
# Test compressed
iio.mimsave(fname3, ims2, compress=True)
ims3 = iio.mimread(fname3)
assert len(ims1) == len(ims3)
for im1, im3 in zip(ims1, ims3):
assert (im1 == im3).all()
# Test conventional, Bonus, we don't officially support this.
_swf = imageio.plugins.swf.load_lib()
_swf.write_swf(fname4, ims1)
ims4 = _swf.read_swf(fname4)
assert len(ims1) == len(ims4)
for im1, im4 in zip(ims1, ims4):
assert (im1 == im4).all()
# We want to manually validate that this file plays in 3d party tools
# So we write a small HTML5 doc that we can load
html = """<!DOCTYPE html>
<html>
<body>
Original:
<embed src="%s">
<br ><br >
Written:
<embed src="%s">
<br ><br >
Compressed:
<embed src="%s">
<br ><br >
Written 2:
<embed src="%s">
</body>
</html>
""" % (
fname1,
fname2,
fname3,
fname4,
)
with open(tmp_path / "test_swf.html", "wb") as f:
for line in html.splitlines():
f.write(line.strip().encode("utf-8") + b"\n")
@pytest.mark.needs_internet
def test_read_from_url():
burl = "https://raw.githubusercontent.com/imageio/imageio-binaries/master/"
url = burl + "images/stent.swf"
ims = iio.mimread(url)
assert len(ims) == 10
@deprecated_test
def test_invalid(test_images):
fname1 = test_images / "stent.swf"
fname2 = fname1.with_suffix(".invalid.swf")
# Empty file
with open(fname2, "wb"):
pass
assert not iio.formats.search_read_format(core.Request(fname2, "rI"))
with pytest.raises(RuntimeError):
iio.mimread(fname2, "swf")
# File with BS data
with open(fname2, "wb") as f:
f.write(b"x" * 100)
assert not iio.formats.search_read_format(core.Request(fname2, "rI"))
with pytest.raises(RuntimeError):
iio.mimread(fname2, "swf")
@pytest.mark.needs_internet
def test_lowlevel():
# Some tests from low level implementation that is not covered
# by using the plugin itself.
_swf = imageio.plugins.swf.load_lib()
tag = _swf.Tag()
with pytest.raises(NotImplementedError):
tag.process_tag()
assert tag.make_matrix_record() == "00000000"
assert tag.make_matrix_record(scale_xy=(1, 1))
assert tag.make_matrix_record(rot_xy=(1, 1))
assert tag.make_matrix_record(trans_xy=(1, 1))
SetBackgroundTag = _swf.SetBackgroundTag
assert SetBackgroundTag(1, 2, 3).rgb == SetBackgroundTag((1, 2, 3)).rgb
tag = _swf.ShapeTag(0, (0, 0), (1, 1))
assert tag.make_style_change_record(1, 1, (1, 1))
assert tag.make_style_change_record()
assert (
tag.make_straight_edge_record(2, 3).tobytes()
== tag.make_straight_edge_record((2, 3)).tobytes()
)
def test_types(test_images):
fname1 = test_images / "stent.swf"
fname2 = fname1.with_suffix(".out3.swf")
for dtype in [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.int8,
np.int16,
np.int32,
np.int64,
np.float16,
np.float32,
np.float64,
]:
for shape in [(100, 1), (100, 3)]:
# Repeats an identity matrix, just for testing
im1 = np.dstack((np.identity(shape[0], dtype=dtype),) * shape[1])
iio.mimsave(fname2, [im1], "swf")
im2 = iio.mimread(fname2, "swf")[0]
assert im2.shape == (100, 100, 4)
assert im2.dtype == np.uint8
if len(shape) == 3 and dtype == np.uint8:
assert (im1[:, :, 0] == im2[:, :, 0]).all()
| 27.418605
| 79
| 0.596098
|
aef217318b493538513ff68ae863ff37d7ad050a
| 3,080
|
py
|
Python
|
app/app/settings.py
|
majalinet/recipe-app-api
|
f0c109a4617c515eb0511ab7235d50beb4a88994
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
majalinet/recipe-app-api
|
f0c109a4617c515eb0511ab7235d50beb4a88994
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
majalinet/recipe-app-api
|
f0c109a4617c515eb0511ab7235d50beb4a88994
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm0v+di4cv-7z!(ed6e^atldnf4n%5rrve_64mqfkvhq81^63-a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.454545
| 91
| 0.696429
|
833c3d3cf7e89b02986b58ab756f86437425479a
| 19,922
|
py
|
Python
|
kivy/core/image/img_gif.py
|
CharaD7/kivy
|
85065fe6633f5ac831c193dc84e3f636b789cc3a
|
[
"MIT"
] | 2
|
2021-05-16T09:46:14.000Z
|
2021-11-17T11:23:15.000Z
|
kivy/core/image/img_gif.py
|
CharaD7/kivy
|
85065fe6633f5ac831c193dc84e3f636b789cc3a
|
[
"MIT"
] | 1
|
2016-11-11T13:45:42.000Z
|
2016-11-11T13:45:42.000Z
|
kivy/core/image/img_gif.py
|
CharaD7/kivy
|
85065fe6633f5ac831c193dc84e3f636b789cc3a
|
[
"MIT"
] | 2
|
2020-03-28T10:18:00.000Z
|
2021-02-13T06:34:14.000Z
|
#-*- coding: utf-8 -*-
#
# this program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# this program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# The Graphics Interchange Format(c) is the Copyright property of
# CompuServe Incorporated. GIF(sm) is a Service Mark property of
# CompuServe Incorporated.
#
# The unisys/lzw patent has expired, yes. If anyone puts another patent
# over this code, you must *burn* this file.
'''pygif: gif implementation in python
http://www.java2s.com/Open-Source/Python/Network/\
emesene/emesene-1.6.2/pygif/pygif.py.htm'''
#TODO issues to fix
#optimize for speed #partially done# a lot of room for improvement
import struct
from array import array
KNOWN_FORMATS = ('GIF87a', 'GIF89a')
from kivy.compat import PY2
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
Debug = False
class ImageLoaderGIF(ImageLoaderBase):
'''Image loader for gif'''
@staticmethod
def extensions():
'''Return accepted extension for this loader'''
return ('gif', )
def load(self, filename):
try:
try:
im = GifDecoder(open(filename, 'rb').read())
except UnicodeEncodeError:
if PY2:
im = GifDecoder(open(filename.encode('utf8'), 'rb').read())
except:
Logger.warning('Image: Unable to load Image <%s>' % filename)
raise
if Debug:
print(im.print_info())
img_data = []
ls_width = im.ls_width
ls_height = im.ls_height
im_images = im.images
im_palette = im.palette
pixel_map = array('B', [0] * (ls_width * ls_height * 4))
for img in im_images:
palette = img.palette if img.local_color_table_flag\
else im_palette
have_transparent_color = img.has_transparent_color
transparent_color = img.transparent_color
#draw_method_restore_previous = 1 \
# if img.draw_method == 'restore previous' else 0
draw_method_replace = 1 \
if ((img.draw_method == 'replace') or
(img.draw_method == 'restore background')) else 0
pixels = img.pixels
img_height = img.height
img_width = img.width
left = img.left
top = img.top
if img_height > ls_height or img_width > ls_width or\
top > ls_height or left > ls_width:
Logger.warning('Image_GIF: decoding error on frame <%s>' %
len(img_data))
img_height = ls_height
img_width = ls_width
left = top = 0
#reverse top to bottom and left to right
tmp_top = (ls_height - (img_height + top))
img_width_plus_left = (img_width + left)
ls_width_multiply_4 = ls_width * 4
left_multiply_4 = left * 4
img_data_append = img_data.append
while img_height > 0:
i = left
img_height -= 1
x = (img_height * img_width) - left
rgba_pos = (tmp_top * ls_width_multiply_4) + (left_multiply_4)
tmp_top += 1
while i < img_width_plus_left:
#this should now display corrupted gif's
#instead of crashing on gif's not decoded properly
try:
(r, g, b) = palette[pixels[x + i]]
except:
rgba_pos += 4
i += 1
continue
# when not magic pink
if (r, g, b) != (255, 0, 255):
if have_transparent_color:
if transparent_color == pixels[x + i]:
if draw_method_replace:
#transparent pixel draw method replace
pixel_map[rgba_pos + 3] = 0
rgba_pos += 4
i += 1
continue
#transparent pixel draw method combine
rgba_pos += 4
i += 1
continue
# this pixel isn't transparent
#doesn't have transparent color
(pixel_map[rgba_pos], pixel_map[rgba_pos + 1],
pixel_map[rgba_pos + 2]) = (r, g, b)
pixel_map[rgba_pos + 3] = 255
# if magic pink move to next pixel
rgba_pos += 4
i += 1
if PY2:
img_data_append(ImageData(ls_width, ls_height,
'rgba', pixel_map.tostring(), flip_vertical=False))
else:
img_data_append(ImageData(ls_width, ls_height,
'rgba', pixel_map.tobytes(), flip_vertical=False))
if draw_method_replace:
pixel_map = array('B', [0] * (ls_width * ls_height * 4))
self.filename = filename
return img_data
class Gif(object):
'''Base class to decoder'''
# struct format strings
#17,18:
FMT_HEADER = '<6sHHBBB'
#20:
FMT_IMGDESC = '<HHHHB'
IMAGE_SEPARATOR = 0x2C
EXTENSION_INTRODUCER = 0x21
GIF_TRAILER = 0x3b
LABEL_GRAPHIC_CONTROL = 0xF9
LABEL_COMMENT = 0xFE
LABEL_PLAINTEXT = 0x01
FMT_EXT_GRAPHIC_CONTROL = '<BBHB' # 89a
def __init__(self, data, debug):
self.data = data
self.pointer = 0
# default data for an empty file
self.header = 'GIF87a'
self.ls_width = 0
self.ls_height = 0
self.flags = 0
self.color_resolution = 0
self.sort_flag = 0
self.color_table_flag = 0
self.global_color_table_size = 0
self.background_color = 0
self.aspect_ratio = 0
# greyscale palette by default
self.palette = [(x, x, x) for x in range(0, 256)]
self.images = []
self.debug_enabled = False
return
def pop(self, data, length=1):
'''gets the next $len chars from the data stack import
and increment the pointer'''
start = self.pointer
end = self.pointer + length
self.pointer += length
return data[start:end]
def pops(self, format, data):
'''pop struct: get size, pop(), unpack()'''
size = struct.calcsize(format)
return struct.unpack(format, self.pop(data, size))
def print_info(self):
'''prints out some useful info (..debug?)'''
print("Version: %s" % self.header)
print("Logical screen width: %d" % self.ls_width)
print("Logical screen height: %d" % self.ls_height)
print("Flags: %s" % repr(self.flags))
print(" " * 6, "Color resolution: %d" % self.color_resolution)
print(" " * 6, "Sort flag: %r" % self.sort_flag)
print(" " * 6, "Global color table flag: %r" % self.color_table_flag)
print(" " * 22, "...size: %d (%d bytes)" %
(self.global_color_table_size, self.global_color_table_size * 3))
print("Background color: %d" % self.background_color)
print("Aspect ratio info: %d" % self.aspect_ratio)
def new_image(self, header=None):
'''adds a new image descriptor'''
image = ImageDescriptor(self, header)
self.images.append(image)
return image
class ImageDescriptor(object):
'''A class that represents a single image'''
def __init__(self, parent, header=None):
self.parent = parent
# this will be set when needed
self.codesize = 0
# compressed output codes
self.lzwcode = ''
# uncompressed pixels (decoded)
self.pixels = []
# we assume a "fullscreen" image
self.left = self.top = 0
self.width = parent.ls_width
self.height = parent.ls_height
# yes, these default flags work...
self.flags = [False for x in range(8)]
self.local_color_table_flag = False
self.interlace_flag = False
self.sort_flag = False
self.local_color_table_size = 0
self.draw_method = 'replace'
self.transparent_color = -1
self.has_transparent_color = 0
self.palette = []
if header:
self.setup_header(header)
def setup_header(self, header):
'''takes a header tuple and fills the attributes'''
self.left = header[0]
self.top = header[1]
self.width = header[2]
self.height = header[3]
self.flags = get_bits(header[4])
self.local_color_table_flag = self.flags[7]
self.interlace_flag = self.flags[6]
self.sort_flag = self.flags[5]
#-- flags 4 and 3 are reserved
self.local_color_table_size = 2 ** (pack_bits(self.flags[:3]) + 1)
if self.local_color_table_flag:
if Debug:
print('local color table true')
self.palette = self.parent.get_color_table(
self.local_color_table_size * 3)
def get_header(self):
'''builds a header dynamically'''
flags = [False for x in range(8)]
flags[7] = self.local_color_table_flag
flags[6] = self.interlace_flag
flags[5] = self.sort_flag
# useless!
flags[2], flags[1], flags[0] = get_bits(len(self.palette), bits=3)
return (self.left, self.top, self.width, self.height, pack_bits(flags))
header = property(fget=get_header)
class GifDecoder(Gif):
'''decodes a gif file into.. something.. else..'''
def __init__(self, data, debug=False):
Gif.__init__(self, data, debug)
self.fill()
def fill(self):
'''reads the data and fills each field of the file'''
# start reading from the beggining of the file
self.pointer = 0
#17. Header.
#18. Logical Screen Descriptor.
data = self.pops(Gif.FMT_HEADER, self.data)
self.header = data[0]
self.ls_width = data[1]
self.ls_height = data[2]
self.background_color = data[4]
self.aspect_ratio = data[5]
# flags field
self.flags = get_bits(data[3])
#1 bit
self.color_table_flag = self.flags[7]
self.sort_flag = self.flags[3]
#3 bit
self.color_resolution = pack_bits(self.flags[4:7]) # 7 not included
#3 bit
self.global_color_table_size = 2 ** (pack_bits(self.flags[:3]) + 1)
#19. Global Color Table.
if self.color_table_flag:
size = (self.global_color_table_size) * 3
self.palette = self.get_color_table(size)
else:
# generate a greyscale palette
self.palette = [(x, x, x) for x in range(256)]
# blocks
image = None
self_data = self.data
self_pops = self.pops
Gif_IMAGE_SEPARATOR = Gif.IMAGE_SEPARATOR
Gif_FMT_IMGDESC = Gif.FMT_IMGDESC
self_new_image = self.new_image
self_pop = self.pop
self_debug_enabled = self.debug_enabled
self_lzw_decode = self.lzw_decode
Gif_EXTENSION_INTRODUCER = Gif.EXTENSION_INTRODUCER
Gif_GIF_TRAILER = Gif.GIF_TRAILER
Gif_LABEL_GRAPHIC_CONTROL = Gif.LABEL_GRAPHIC_CONTROL
trans_color = 0
has_transparent_color = 0
drw_method = 'replace'
while True:
try:
nextbyte = self_pops('<B', self_data)[0]
except:
nextbyte = 0x3b # force end
#20. Image Descriptor
if nextbyte == Gif_IMAGE_SEPARATOR:
descriptor = self_pops(Gif_FMT_IMGDESC, self_data)
image = self_new_image(descriptor)
image.transparent_color = trans_color
image.has_transparent_color = has_transparent_color
image.draw_method = drw_method
image.codesize = self_pops('<B', self_data)[0]
image.lzwcode = b''
image_lzwcode = image.lzwcode
###TODO too many corner casses for gifs:(
table_size = image.local_color_table_size\
if image.local_color_table_flag and \
self.global_color_table_size < image.local_color_table_size\
else self.global_color_table_size
while True:
try:
blocksize = self_pops('<B', self_data)[0]
except:
break
if blocksize == 0:
break # no more image data
lzwdata = self_pop(self_data, blocksize)
image_lzwcode = b''.join((image_lzwcode, lzwdata))
if self_debug_enabled:
print('LZW length:', len(image_lzwcode))
image.lzwcode = image_lzwcode
image.pixels = self_lzw_decode(image.lzwcode, image.codesize,
table_size)
# Extensions
elif nextbyte == Gif_EXTENSION_INTRODUCER:
pass
# Gif trailer
elif nextbyte == Gif_GIF_TRAILER:
return
elif nextbyte == Gif_LABEL_GRAPHIC_CONTROL:
nextbyte = self_pops('<B', self_data)[0]
drw_bits = (get_bits(self_pops('<B', self_data)[0]))
has_transparent_color = drw_bits[0]
if drw_bits[2:5] == array('B', [0, 0, 1]):
drw_method = 'replace'
elif (drw_bits[2:5]) == array('B', [0, 1, 0]):
drw_method = 'restore background'
else:
drw_method = 'restore previous'
nextbyte = self_pops('<B', self_data)[0]
nextbyte = self_pops('<B', self_data)[0]
nextbyte = self_pops('<B', self_data)[0]
trans_color = nextbyte
pass
# "No Idea What Is This"
else:
pass
def string_to_bits(self, string):
'''high level string unpacker'''
ordarray = array('B', string)
bits = array('B')
bits_append = bits.append
_get_bits = get_bits
for byte in ordarray:
list(map(bits_append, _get_bits(byte)))
return bits
def readable(bool_list):
'''Converts a list of booleans to a readable list of ints
Useful for debug only'''
return [int(x) for x in bool_list]
def bits_to_int(self, bits):
'''high level bit list packer'''
c = 1
i = 0
for bit in bits:
if bit:
i += 2 ** (c - 1)
c += 1
return i
def get_color_table(self, size):
'''Returns a color table in the format [(r,g,b),(r,g,b), ...]'''
raw_color_table = self.pops("<%dB" % size, self.data)
pos = 0
palette = []
palette_append = palette.append
while pos + 3 < (size + 1):
red = raw_color_table[pos]
green = raw_color_table[pos + 1]
blue = raw_color_table[pos + 2]
palette_append((red, green, blue))
pos += 3
return palette
def lzw_decode(self, input, initial_codesize, color_table_size):
'''Decodes a lzw stream from input import
Returns list of ints (pixel values)'''
string_table = {}
output = array('B')
output_append = output.append
output_extend = output.extend
old = ''
index = 0
bits = self.string_to_bits(input)
self.bitpointer = 0
codesize = initial_codesize + 1
clearcode, end_of_info = color_table_size, color_table_size + 1
if Debug:
print('codesize: %d' % codesize)
print('clearcode %d, end_of_info: %d' % (clearcode, end_of_info))
def pop(size, _bits):
''' return bits '''
start = self.bitpointer
end = self.bitpointer = start + size
return _bits[start: end]
def clear():
'''Called on clear code'''
string_table.clear()
for index in range(color_table_size):
string_table[index] = chr(index)
index = end_of_info + 1
return index
index = clear()
# skip first (clear)code
bits = bits[codesize:]
# read first code, append to output
self_bits_to_int = self.bits_to_int
code = self_bits_to_int(pop(codesize, bits))
if code in string_table:
output_append(ord(string_table[code]))
else:
Logger.warning('Image_GIF: decoding error on code '
'<%d> aode size <%d>' % (code, codesize))
string_table[code] = string_table[0]
output_append(ord(string_table[code]))
old = string_table[code]
bitlen = len(bits)
while self.bitpointer < bitlen:
# read next code
code = self_bits_to_int(pop(codesize, bits))
# special code?
if code == clearcode:
index = clear()
codesize = initial_codesize + 1
code = self_bits_to_int(pop(codesize, bits))
if code in string_table:
output_append(ord(string_table[code]))
else:
Logger.warning('Image_GIF: decoding error on code '
'<%d> aode size <%d>' % (code, codesize))
string_table[code] = string_table[0]
output_append(ord(string_table[code]))
old = string_table[code]
continue
elif code == end_of_info:
break
# code in stringtable?
if code in string_table:
c = string_table[code]
string_table[index] = ''.join((old, c[0]))
else:
c = ''.join((old, old[0]))
string_table[code] = c
index += 1
old = c
output_extend(list(map(ord, c)))
if index == 2 ** codesize:
codesize += 1
if codesize == 13:
codesize = 12
if self.debug_enabled:
print('Output stream len: %d' % len(output))
return output
def get_bits(flags, reverse=False, bits=8):
'''return a list with $bits items, one for each enabled bit'''
mybits = (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048)[:bits]
rev_num = 1
if reverse:
rev_num = -1
ret = array('B')
ret_append = ret.append
for bit in mybits[::rev_num]:
ret_append(flags & bit != 0)
return ret
def pack_bits(bits):
'''convert a bit (bool or int) tuple into a int'''
packed = 0
level = 0
for bit in bits:
if bit:
packed += 2 ** level
level += 1
return packed
# register
ImageLoader.register(ImageLoaderGIF)
| 34.113014
| 80
| 0.542867
|
77af215611c36464b68c0f19a5ecae975003bbe3
| 39,269
|
py
|
Python
|
pypy/interpreter/pyframe.py
|
alexmechanic/pypy
|
6b1511399cb6f174e408ca74e8046c49e98fcc8c
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/interpreter/pyframe.py
|
alexmechanic/pypy
|
6b1511399cb6f174e408ca74e8046c49e98fcc8c
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/interpreter/pyframe.py
|
alexmechanic/pypy
|
6b1511399cb6f174e408ca74e8046c49e98fcc8c
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2022-03-30T11:42:37.000Z
|
2022-03-30T11:42:37.000Z
|
""" PyFrame class implementation with the interpreter main loop.
"""
import sys
from rpython.rlib import jit, rweakref
from rpython.rlib.debug import make_sure_not_resized, check_nonneg
from rpython.rlib.debug import ll_assert_not_none
from rpython.rlib.jit import hint
from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated
from rpython.rlib.objectmodel import not_rpython
from rpython.rlib.rarithmetic import intmask, r_uint
from rpython.tool.pairtype import extendabletype
from pypy.interpreter import pycode, pytraceback
from pypy.interpreter.argument import Arguments
from pypy.interpreter.astcompiler import consts
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import (
OperationError, get_cleared_operation_error, oefmt)
from pypy.interpreter.executioncontext import ExecutionContext
from pypy.interpreter.nestedscope import Cell
from pypy.tool import stdlib_opcode
# Define some opcodes used
for op in '''DUP_TOP POP_TOP SETUP_EXCEPT SETUP_FINALLY SETUP_WITH
SETUP_ASYNC_WITH POP_BLOCK YIELD_VALUE
NOP FOR_ITER EXTENDED_ARG END_ASYNC_FOR LOAD_CONST
JUMP_IF_FALSE_OR_POP JUMP_IF_TRUE_OR_POP POP_JUMP_IF_FALSE POP_JUMP_IF_TRUE
JUMP_IF_NOT_EXC_MATCH JUMP_ABSOLUTE JUMP_FORWARD GET_ITER GET_AITER
RETURN_VALUE RERAISE RAISE_VARARGS POP_EXCEPT
'''.split():
globals()[op] = stdlib_opcode.opmap[op]
class FrameDebugData(object):
""" A small object that holds debug data for tracing
"""
w_f_trace = None
instr_lb = 0
instr_ub = 0
instr_prev_plus_one = 0
f_lineno = 0 # current lineno for tracing
is_being_profiled = False
is_in_line_tracing = False
f_trace_lines = True
f_trace_opcodes = False
w_locals = None
hidden_operationerr = None
def __init__(self, pycode):
self.f_lineno = pycode.co_firstlineno
self.w_globals = pycode.w_globals
class PyFrame(W_Root):
"""Represents a frame for a regular Python function
that needs to be interpreted.
Public fields:
* 'space' is the object space this frame is running in
* 'code' is the PyCode object this frame runs
* 'w_locals' is the locals dictionary to use, if needed, stored on a
debug object
* 'w_globals' is the attached globals dictionary
* 'builtin' is the attached built-in module
* 'valuestack_w', 'blockstack', control the interpretation
Cell Vars:
my local variables that are exposed to my inner functions
Free Vars:
variables coming from a parent function in which i'm nested
'closure' is a list of Cell instances: the received free vars.
"""
__metaclass__ = extendabletype
frame_finished_execution = False
f_generator_wref = rweakref.dead_ref # for generators/coroutines
f_generator_nowref = None # (only one of the two attrs)
w_yielding_from = None
last_instr = -1
f_backref = jit.vref_None
escaped = False # see mark_as_escaped()
debugdata = None
pycode = None # code object executed by that frame
locals_cells_stack_w = None # the list of all locals, cells and the valuestack
valuestackdepth = 0 # number of items on valuestack
lastblock = None
# other fields:
# builtin - builtin cache, only if honor__builtins__ is True
# defaults to False
# there is also self.space which is removed by the annotator
# additionally JIT uses vable_token field that is representing
# frame current virtualizable state as seen by the JIT
def __init__(self, space, code, w_globals, outer_func):
self = hint(self, access_directly=True, fresh_virtualizable=True)
assert isinstance(code, pycode.PyCode)
self.space = space
self.pycode = code
if code.frame_stores_global(w_globals):
self.getorcreatedebug().w_globals = w_globals
ncellvars = len(code.co_cellvars)
nfreevars = len(code.co_freevars)
size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize
# the layout of this list is as follows:
# | local vars | cells | stack |
self.locals_cells_stack_w = [None] * size
self.valuestackdepth = code.co_nlocals + ncellvars + nfreevars
make_sure_not_resized(self.locals_cells_stack_w)
check_nonneg(self.valuestackdepth)
#
if space.config.objspace.honor__builtins__:
self.builtin = space.builtin.pick_builtin(w_globals)
# regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
# class bodies only have CO_NEWLOCALS.
self.initialize_frame_scopes(outer_func, code)
def getdebug(self):
return self.debugdata
def getorcreatedebug(self):
if self.debugdata is None:
self.debugdata = FrameDebugData(self.pycode)
return self.debugdata
def get_w_globals(self):
debugdata = self.getdebug()
if debugdata is not None:
return debugdata.w_globals
return jit.promote(self.pycode).w_globals
def get_w_f_trace(self):
d = self.getdebug()
if d is None:
return None
return d.w_f_trace
def get_is_being_profiled(self):
d = self.getdebug()
if d is None:
return False
return d.is_being_profiled
def get_w_locals(self):
d = self.getdebug()
if d is None:
return None
return d.w_locals
def get_f_trace_lines(self):
d = self.getdebug()
if d is None:
return True
return d.f_trace_lines
def get_f_trace_opcodes(self):
d = self.getdebug()
if d is None:
return False
return d.f_trace_opcodes
@not_rpython
def __repr__(self):
# useful in tracebacks
return "<%s.%s executing %s at line %s" % (
self.__class__.__module__, self.__class__.__name__,
self.pycode, self.get_last_lineno())
def _getcell(self, varindex):
cell = self.locals_cells_stack_w[varindex + self.pycode.co_nlocals]
assert isinstance(cell, Cell)
return cell
def mark_as_escaped(self):
"""
Must be called on frames that are exposed to applevel, e.g. by
sys._getframe(). This ensures that the virtualref holding the frame
is properly forced by ec.leave(), and thus the frame will be still
accessible even after the corresponding C stack died.
"""
self.escaped = True
def append_block(self, block):
assert block.previous is self.lastblock
self.lastblock = block
def pop_block(self):
block = self.lastblock
self.lastblock = block.previous
return block
def blockstack_non_empty(self):
return self.lastblock is not None
def get_blocklist(self):
"""Returns a list containing all the blocks in the frame"""
lst = []
block = self.lastblock
while block is not None:
lst.append(block)
block = block.previous
return lst
def set_blocklist(self, lst):
self.lastblock = None
i = len(lst) - 1
while i >= 0:
block = lst[i]
i -= 1
block.previous = self.lastblock
self.lastblock = block
def get_builtin(self):
if self.space.config.objspace.honor__builtins__:
return self.builtin
else:
return self.space.builtin
@jit.unroll_safe
def initialize_frame_scopes(self, outer_func, code):
# regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
# class bodies only have CO_NEWLOCALS.
# CO_NEWLOCALS: make a locals dict unless optimized is also set
# CO_OPTIMIZED: no locals dict needed at all
flags = code.co_flags
if not (flags & pycode.CO_OPTIMIZED):
if flags & pycode.CO_NEWLOCALS:
self.getorcreatedebug().w_locals = self.space.newdict(module=True)
else:
w_globals = self.get_w_globals()
assert w_globals is not None
self.getorcreatedebug().w_locals = w_globals
ncellvars = len(code.co_cellvars)
nfreevars = len(code.co_freevars)
if not nfreevars:
if not ncellvars:
return # no cells needed - fast path
elif outer_func is None:
space = self.space
raise oefmt(space.w_TypeError,
"directly executed code object may not contain free "
"variables")
if outer_func and outer_func.closure:
closure_size = len(outer_func.closure)
else:
closure_size = 0
if closure_size != nfreevars:
raise ValueError("code object received a closure with "
"an unexpected number of free variables")
index = code.co_nlocals
for i in range(ncellvars):
self.locals_cells_stack_w[index] = Cell(
None, self.pycode.cell_families[i])
index += 1
for i in range(nfreevars):
self.locals_cells_stack_w[index] = outer_func.closure[i]
index += 1
def _is_generator_or_coroutine(self):
return (self.getcode().co_flags & (pycode.CO_COROUTINE |
pycode.CO_GENERATOR |
pycode.CO_ASYNC_GENERATOR)) != 0
def run(self, name=None, qualname=None):
"""Start this frame's execution."""
if self._is_generator_or_coroutine():
return self.initialize_as_generator(name, qualname)
else:
return self.execute_frame()
run._always_inline_ = True
def initialize_as_generator(self, name, qualname):
space = self.space
flags = self.getcode().co_flags
if flags & pycode.CO_COROUTINE:
from pypy.interpreter.generator import Coroutine
gen = Coroutine(self, name, qualname)
ec = space.getexecutioncontext()
gen.capture_origin(ec)
elif flags & pycode.CO_ASYNC_GENERATOR:
from pypy.interpreter.generator import AsyncGenerator
gen = AsyncGenerator(self, name, qualname)
elif flags & pycode.CO_GENERATOR:
from pypy.interpreter.generator import GeneratorIterator
gen = GeneratorIterator(self, name, qualname)
else:
raise AssertionError("bad co_flags")
if space.config.translation.rweakref:
self.f_generator_wref = rweakref.ref(gen)
else:
self.f_generator_nowref = gen
w_gen = gen
return w_gen
def resume_execute_frame(self, w_arg_or_err):
# Called from execute_frame() just before resuming the bytecode
# interpretation.
from pypy.interpreter.pyopcode import SApplicationException
space = self.space
w_yf = self.w_yielding_from
if w_yf is not None:
self.w_yielding_from = None
try:
self.next_yield_from(w_yf, w_arg_or_err)
except OperationError as operr:
operr.record_context(space, space.getexecutioncontext())
return self.handle_generator_error(operr)
# Normal case: the call above raises Yield.
# We reach this point if the iterable is exhausted.
last_instr = jit.promote(self.last_instr)
assert last_instr & 1 == 0
assert last_instr >= 0
return r_uint(last_instr + 2)
if isinstance(w_arg_or_err, SApplicationException):
return self.handle_generator_error(w_arg_or_err.operr)
last_instr = jit.promote(self.last_instr)
if last_instr != -1:
assert last_instr & 1 == 0
self.pushvalue(w_arg_or_err)
return r_uint(last_instr + 2)
else:
return r_uint(0)
def execute_frame(self, w_arg_or_err=None):
"""Execute this frame. Main entry point to the interpreter.
'w_arg_or_err' is non-None iff we are starting or resuming
a generator or coroutine frame; in that case, w_arg_or_err
is the input argument -or- an SApplicationException instance.
"""
from pypy.interpreter import pyopcode as pyopcode
# the following 'assert' is an annotation hint: it hides from
# the annotator all methods that are defined in PyFrame but
# overridden in the {,Host}FrameClass subclasses of PyFrame.
assert (isinstance(self, self.space.FrameClass) or
not self.space.config.translating)
executioncontext = self.space.getexecutioncontext()
executioncontext.enter(self)
got_exception = True
w_exitvalue = self.space.w_None
try:
executioncontext.call_trace(self)
#
# Execution starts just after the last_instr. Initially,
# last_instr is -1. After a generator suspends it points to
# the YIELD_VALUE/YIELD_FROM instruction.
try:
try:
if w_arg_or_err is None:
assert self.last_instr == -1
next_instr = r_uint(0)
else:
next_instr = self.resume_execute_frame(w_arg_or_err)
except pyopcode.Yield:
w_exitvalue = self.popvalue()
else:
w_exitvalue = self.dispatch(self.pycode, next_instr,
executioncontext)
except OperationError:
raise
except Exception as e: # general fall-back
raise self._convert_unexpected_exception(e)
finally:
executioncontext.return_trace(self, w_exitvalue)
got_exception = False
finally:
executioncontext.leave(self, w_exitvalue, got_exception)
return w_exitvalue
execute_frame.insert_stack_check_here = True
# stack manipulation helpers
def pushvalue(self, w_object):
depth = self.valuestackdepth
self.locals_cells_stack_w[depth] = ll_assert_not_none(w_object)
self.valuestackdepth = depth + 1
def pushvalue_none(self):
depth = self.valuestackdepth
# the entry is already None, and remains None
assert self.locals_cells_stack_w[depth] is None
self.valuestackdepth = depth + 1
def pushvalue_maybe_none(self, w_object):
depth = self.valuestackdepth
self.locals_cells_stack_w[depth] = w_object
self.valuestackdepth = depth + 1
def assert_stack_index(self, index):
if we_are_translated():
return
if not self._check_stack_index(index):
#import pdb; pdb.set_trace()
assert 0
def _check_stack_index(self, index):
code = self.pycode
ncellvars = len(code.co_cellvars)
nfreevars = len(code.co_freevars)
stackstart = code.co_nlocals + ncellvars + nfreevars
return index >= stackstart
def popvalue(self):
return ll_assert_not_none(self.popvalue_maybe_none())
def popvalue_maybe_none(self):
depth = self.valuestackdepth - 1
self.assert_stack_index(depth)
assert depth >= 0
w_object = self.locals_cells_stack_w[depth]
self.locals_cells_stack_w[depth] = None
self.valuestackdepth = depth
return w_object
# we need two popvalues that return different data types:
# one in case we want list another in case of tuple
def _new_popvalues():
@jit.unroll_safe
def popvalues(self, n):
values_w = [None] * n
while True:
n -= 1
if n < 0:
break
values_w[n] = self.popvalue()
return values_w
return popvalues
popvalues = _new_popvalues()
popvalues_mutable = _new_popvalues()
del _new_popvalues
@jit.unroll_safe
def peekvalues(self, n):
values_w = [None] * n
base = self.valuestackdepth - n
self.assert_stack_index(base)
assert base >= 0
while True:
n -= 1
if n < 0:
break
values_w[n] = self.locals_cells_stack_w[base+n]
return values_w
@jit.unroll_safe
def dropvalues(self, n):
n = hint(n, promote=True)
finaldepth = self.valuestackdepth - n
self.assert_stack_index(finaldepth)
assert finaldepth >= 0
while True:
n -= 1
if n < 0:
break
self.locals_cells_stack_w[finaldepth+n] = None
self.valuestackdepth = finaldepth
@jit.unroll_safe
def pushrevvalues(self, n, values_w): # n should be len(values_w)
make_sure_not_resized(values_w)
while True:
n -= 1
if n < 0:
break
self.pushvalue(values_w[n])
@jit.unroll_safe
def dupvalues(self, n):
delta = n-1
while True:
n -= 1
if n < 0:
break
w_value = self.peekvalue(delta)
self.pushvalue(w_value)
def peekvalue(self, index_from_top=0):
# NOTE: top of the stack is peekvalue(0).
# Contrast this with CPython where it's PEEK(-1).
return ll_assert_not_none(self.peekvalue_maybe_none(index_from_top))
def peekvalue_maybe_none(self, index_from_top=0):
index_from_top = hint(index_from_top, promote=True)
index = self.valuestackdepth + ~index_from_top
self.assert_stack_index(index)
assert index >= 0
return self.locals_cells_stack_w[index]
def settopvalue(self, w_object, index_from_top=0):
index_from_top = hint(index_from_top, promote=True)
index = self.valuestackdepth + ~index_from_top
self.assert_stack_index(index)
assert index >= 0
self.locals_cells_stack_w[index] = ll_assert_not_none(w_object)
@jit.unroll_safe
def dropvaluesuntil(self, finaldepth):
depth = self.valuestackdepth - 1
finaldepth = hint(finaldepth, promote=True)
assert finaldepth >= 0
while depth >= finaldepth:
self.locals_cells_stack_w[depth] = None
depth -= 1
self.valuestackdepth = finaldepth
def _guess_function_name_parens(self, fnname=None, w_function=None):
""" Returns 'funcname()' from either a function name fnname or a
wrapped callable w_function. If it's not a function or a method, returns
'Classname object'"""
# XXX this is super annoying to compute every time we do a function call!
# CPython has a similar function, PyEval_GetFuncName
from pypy.interpreter.function import Function, _Method
if fnname is not None:
return fnname + '()'
if w_function is None:
return None
if isinstance(w_function, Function):
return w_function.name + '()'
if isinstance(w_function, _Method):
return self._guess_function_name_parens(None, w_function.w_function)
return self.space.type(w_function).getname(self.space) + ' object'
def make_arguments(self, nargs, methodcall=False, w_function=None, fnname=None):
fnname_parens = self._guess_function_name_parens(fnname, w_function)
return Arguments(
self.space, self.peekvalues(nargs), methodcall=methodcall, fnname_parens=fnname_parens)
def argument_factory(self, arguments, keywords, keywords_w, w_star, w_starstar, methodcall=False, w_function=None, fnname=None):
fnname_parens = self._guess_function_name_parens(fnname, w_function)
return Arguments(
self.space, arguments, keywords, keywords_w, w_star,
w_starstar, methodcall=methodcall, fnname_parens=fnname_parens)
def hide(self):
return self.pycode.hidden_applevel
def getcode(self):
return hint(self.pycode, promote=True)
@jit.look_inside_iff(lambda self, scope_w: jit.isvirtual(scope_w))
def setfastscope(self, scope_w):
"""Initialize the fast locals from a list of values,
where the order is according to self.pycode.signature()."""
scope_len = len(scope_w)
if scope_len > self.pycode.co_nlocals:
raise ValueError("new fastscope is longer than the allocated area")
# don't assign directly to 'locals_cells_stack_w[:scope_len]' to be
# virtualizable-friendly
for i in range(scope_len):
self.locals_cells_stack_w[i] = scope_w[i]
self.init_cells()
def getdictscope(self):
"""
Get the locals as a dictionary
"""
self.fast2locals()
return self.debugdata.w_locals
def setdictscope(self, w_locals):
"""
Initialize the locals from a dictionary.
"""
self.getorcreatedebug().w_locals = w_locals
self.locals2fast()
@jit.unroll_safe
def fast2locals(self):
# Copy values from the fastlocals to self.w_locals
d = self.getorcreatedebug()
if d.w_locals is None:
d.w_locals = self.space.newdict(module=True)
varnames = self.getcode().getvarnames()
for i in range(min(len(varnames), self.getcode().co_nlocals)):
name = varnames[i]
w_value = self.locals_cells_stack_w[i]
if w_value is not None:
self.space.setitem_str(d.w_locals, name, w_value)
else:
w_name = self.space.newtext(name)
try:
self.space.delitem(d.w_locals, w_name)
except OperationError as e:
if not e.match(self.space, self.space.w_KeyError):
raise
# cellvars are values exported to inner scopes
# freevars are values coming from outer scopes
# (see locals2fast for why CO_OPTIMIZED)
freevarnames = self.pycode.co_cellvars
if self.pycode.co_flags & consts.CO_OPTIMIZED:
freevarnames = freevarnames + self.pycode.co_freevars
for i in range(len(freevarnames)):
name = freevarnames[i]
cell = self._getcell(i)
try:
w_value = cell.get()
except ValueError:
w_name = self.space.newtext(name)
try:
self.space.delitem(d.w_locals, w_name)
except OperationError as e:
if not e.match(self.space, self.space.w_KeyError):
raise
else:
self.space.setitem_str(d.w_locals, name, w_value)
@jit.unroll_safe
def locals2fast(self):
# Copy values from self.w_locals to the fastlocals
w_locals = self.getorcreatedebug().w_locals
assert w_locals is not None
varnames = self.getcode().getvarnames()
numlocals = self.getcode().co_nlocals
new_fastlocals_w = [None] * numlocals
for i in range(min(len(varnames), numlocals)):
name = varnames[i]
w_value = self.space.finditem_str(w_locals, name)
if w_value is not None:
new_fastlocals_w[i] = w_value
self.setfastscope(new_fastlocals_w)
freevarnames = self.pycode.co_cellvars
if self.pycode.co_flags & consts.CO_OPTIMIZED:
freevarnames = freevarnames + self.pycode.co_freevars
# If the namespace is unoptimized, then one of the
# following cases applies:
# 1. It does not contain free variables, because it
# uses import * or is a top-level namespace.
# 2. It is a class namespace.
# We don't want to accidentally copy free variables
# into the locals dict used by the class.
for i in range(len(freevarnames)):
name = freevarnames[i]
cell = self._getcell(i)
w_value = self.space.finditem_str(w_locals, name)
if w_value is not None:
cell.set(w_value)
else:
cell.set(None)
@jit.unroll_safe
def init_cells(self):
"""
Initialize cellvars from self.locals_cells_stack_w.
"""
args_to_copy = self.pycode._args_as_cellvars
index = self.pycode.co_nlocals
for i in range(len(args_to_copy)):
argnum = args_to_copy[i]
if argnum >= 0:
cell = self.locals_cells_stack_w[index]
assert isinstance(cell, Cell)
cell.set(self.locals_cells_stack_w[argnum])
index += 1
def getclosure(self):
return None
def fget_code(self, space):
return self.getcode()
def fget_getdictscope(self, space):
return self.getdictscope()
def fget_w_globals(self, space):
# bit silly, but GetSetProperty passes a space
return self.get_w_globals()
### line numbers ###
def fget_f_lineno(self, space):
"Returns the line number of the instruction currently being executed."
if self.get_w_f_trace() is None:
return space.newint(self.get_last_lineno())
else:
return space.newint(self.getorcreatedebug().f_lineno)
def fset_f_lineno(self, space, w_new_lineno):
"Change the line number of the instruction currently being executed."
try:
new_lineno = space.int_w(w_new_lineno)
except OperationError:
raise oefmt(space.w_ValueError, "lineno must be an integer")
# You can only do this from within a trace function, not via
# _getframe or similar hackery.
if space.int_w(self.fget_f_lasti(space)) == -1:
raise oefmt(space.w_ValueError,
"can't jump from the 'call' trace event of a new frame")
if self.get_w_f_trace() is None:
raise oefmt(space.w_ValueError,
"f_lineno can only be set by a trace function")
code = self.pycode.co_code
if ord(code[self.last_instr]) == YIELD_VALUE:
raise oefmt(space.w_ValueError,
"can't jump from a yield statement")
# Only allow jumps when we're tracing a line event.
d = self.getorcreatedebug()
if not d.is_in_line_tracing:
raise oefmt(space.w_ValueError,
"can only jump from a 'line' trace event")
line = self.pycode.co_firstlineno
if new_lineno < line:
raise oefmt(space.w_ValueError,
"line %d comes before the current code block", new_lineno)
lines = marklines(self.pycode)
x = first_line_not_before(lines, new_lineno)
# If we didn't reach the requested line, return an error.
if x == -1:
raise oefmt(space.w_ValueError,
"line %d comes after the current code block", new_lineno)
new_lineno = x
blocks = markblocks(self.pycode)
start_block_stack = blocks[self.last_instr // 2]
best_block_stack = None
error = "cannot find bytecode for specified line"
best_addr = -1
for i in range(len(lines)):
if lines[i] == new_lineno:
target_block_stack = blocks[i]
if compatible_block_stack(start_block_stack, target_block_stack):
error = None
if best_block_stack is None or len(target_block_stack) > len(best_block_stack):
best_block_stack = target_block_stack
best_addr = i * 2
elif error is not None:
if target_block_stack:
error = explain_incompatible_block_stack(target_block_stack)
else:
error = "code may be unreachable"
if error is not None:
raise OperationError(space.w_ValueError, space.newtext(error))
while len(start_block_stack) > len(best_block_stack):
kind = start_block_stack[-1]
if kind == JUMP_BLOCKSTACK_LOOP:
self.popvalue()
elif kind == JUMP_BLOCKSTACK_TRY:
self.pop_block().cleanupstack(self)
elif kind == JUMP_BLOCKSTACK_WITH:
self.pop_block().cleanupstack(self)
self.popvalue()
else:
assert kind == JUMP_BLOCKSTACK_EXCEPT
raise OperationError(space.w_ValueError, space.newtext(
"can't jump out of an 'except' block"))
start_block_stack = pop_simulated_stack(start_block_stack)
d.f_lineno = new_lineno
assert best_addr & 1 == 0
self.last_instr = best_addr
def get_last_lineno(self):
"Returns the line number of the instruction currently being executed."
return pytraceback.offset2lineno(self.pycode, self.last_instr)
def fget_f_builtins(self, space):
return self.get_builtin().getdict(space)
def get_f_back(self):
return ExecutionContext.getnextframe_nohidden(self)
def fget_f_back(self, space):
return self.get_f_back()
def fget_f_lasti(self, space):
return self.space.newint(self.last_instr)
def fget_f_trace(self, space):
return self.get_w_f_trace()
def fset_f_trace(self, space, w_trace):
if space.is_w(w_trace, space.w_None):
self.getorcreatedebug().w_f_trace = None
else:
d = self.getorcreatedebug()
d.w_f_trace = w_trace
d.f_lineno = self.get_last_lineno()
def fdel_f_trace(self, space):
self.getorcreatedebug().w_f_trace = None
def fget_f_trace_lines(self, space):
return space.newbool(self.get_f_trace_lines())
def fset_f_trace_lines(self, space, w_trace):
self.getorcreatedebug().f_trace_lines = space.is_true(w_trace)
def fget_f_trace_opcodes(self, space):
return space.newbool(self.get_f_trace_opcodes())
def fset_f_trace_opcodes(self, space, w_trace):
self.getorcreatedebug().f_trace_opcodes = space.is_true(w_trace)
def get_generator(self):
if self.space.config.translation.rweakref:
return self.f_generator_wref()
else:
return self.f_generator_nowref
def descr_clear(self, space):
"""F.clear(): clear most references held by the frame"""
# Clears a random subset of the attributes: the local variables
# and the w_locals. Note that CPython doesn't clear f_locals
# (which can create leaks) but it's hard to notice because
# the next Python-level read of 'frame.f_locals' will clear it.
if not self.frame_finished_execution:
if not self._is_generator_or_coroutine():
raise oefmt(space.w_RuntimeError,
"cannot clear an executing frame")
gen = self.get_generator()
if gen is not None:
if gen.running:
raise oefmt(space.w_RuntimeError,
"cannot clear an executing frame")
# xxx CPython raises the RuntimeWarning "coroutine was never
# awaited" in this case too. Does it make any sense?
gen.descr_close()
debug = self.getdebug()
if debug is not None:
debug.w_f_trace = None
if debug.w_locals is not None:
debug.w_locals = space.newdict()
# clear the locals, including the cell/free vars, and the stack
for i in range(len(self.locals_cells_stack_w)):
w_oldvalue = self.locals_cells_stack_w[i]
if isinstance(w_oldvalue, Cell):
# we can't mutate w_oldvalue here, because that could still be
# shared by an inner/outer function
w_newvalue = Cell(
None, w_oldvalue.family)
else:
w_newvalue = None
self.locals_cells_stack_w[i] = w_newvalue
self.valuestackdepth = 0
self.lastblock = None # the FrameBlock chained list
def _convert_unexpected_exception(self, e):
from pypy.interpreter import error
operr = error.get_converted_unexpected_exception(self.space, e)
pytraceback.record_application_traceback(
self.space, operr, self, self.last_instr)
raise operr
def descr_repr(self, space):
code = self.pycode
moreinfo = ", file '%s', line %s, code %s" % (
code.co_filename, self.get_last_lineno(), code.co_name)
return self.getrepr(space, "frame", moreinfo)
# ____________________________________________________________
JUMP_BLOCKSTACK_WITH = 'w'
JUMP_BLOCKSTACK_LOOP = 'l'
JUMP_BLOCKSTACK_TRY = 't'
JUMP_BLOCKSTACK_EXCEPT = 'e'
def marklines(code):
res = [-1] * (len(code.co_code) // 2)
lnotab = code.co_lnotab
addr = 0
line = code.co_firstlineno
res[0] = line
for offset in xrange(0, len(lnotab), 2):
addr += ord(lnotab[offset])
line_offset = ord(lnotab[offset + 1])
if line_offset >= 0x80:
line_offset -= 0x100
line += line_offset
res[addr // 2] = line
return res
def first_line_not_before(lines, line):
result = sys.maxint
for index, l in enumerate(lines):
if l < result and l >= line:
result = l
if result == sys.maxint:
return -1
return result
def markblocks(code):
blocks = [None] * ((len(code.co_code) // 2) + 1)
blocks[0] = ''
todo = True
while todo:
todo = False
for i in range(0, len(code.co_code), 2):
block_stack = blocks[i // 2]
if block_stack is None:
continue
opcode = ord(code.co_code[i])
if (
opcode == JUMP_IF_FALSE_OR_POP or
opcode == JUMP_IF_TRUE_OR_POP or
opcode == POP_JUMP_IF_FALSE or
opcode == POP_JUMP_IF_TRUE or
opcode == JUMP_IF_NOT_EXC_MATCH
):
j = _get_arg(code.co_code, i)
if blocks[j // 2] is None and j < i:
todo = True
assert blocks[j // 2] is None or blocks[j // 2] == block_stack
blocks[j // 2] = block_stack
blocks[i // 2 + 1] = block_stack
elif opcode == JUMP_ABSOLUTE:
j = _get_arg(code.co_code, i)
if blocks[j // 2] is None and j < i:
todo = True
assert blocks[j // 2] is None or blocks[j // 2] == block_stack
blocks[j // 2] = block_stack
elif (
opcode == SETUP_FINALLY or
opcode == SETUP_EXCEPT
):
j = _get_arg(code.co_code, i) + i + 2
stack = block_stack + JUMP_BLOCKSTACK_EXCEPT
assert blocks[j // 2] is None or blocks[j // 2] == stack
blocks[j // 2] = stack
block_stack = block_stack + JUMP_BLOCKSTACK_TRY
blocks[i // 2 + 1] = block_stack
elif (
opcode == SETUP_WITH or
opcode == SETUP_ASYNC_WITH
):
j = _get_arg(code.co_code, i) + i + 2
stack = block_stack + JUMP_BLOCKSTACK_EXCEPT
assert blocks[j // 2] is None or blocks[j // 2] == stack
blocks[j // 2] = stack
block_stack = block_stack + JUMP_BLOCKSTACK_WITH
blocks[i // 2 + 1] = block_stack
elif opcode == JUMP_FORWARD:
j = _get_arg(code.co_code, i) + i + 2
assert blocks[j // 2] is None or blocks[j // 2] == block_stack
blocks[j // 2] = block_stack
elif (
opcode == GET_ITER or
opcode == GET_AITER
):
block_stack = block_stack + JUMP_BLOCKSTACK_LOOP
blocks[i // 2 + 1] = block_stack
elif opcode == FOR_ITER:
blocks[i // 2 + 1] = block_stack
block_stack = pop_simulated_stack(block_stack)
j = _get_arg(code.co_code, i) + i + 2
assert blocks[j // 2] is None or blocks[j // 2] == block_stack
blocks[j // 2] = block_stack
elif (
opcode == POP_BLOCK or
opcode == POP_EXCEPT
):
block_stack = pop_simulated_stack(block_stack)
blocks[i // 2 + 1] = block_stack
elif opcode == END_ASYNC_FOR:
block_stack = pop_simulated_stack(block_stack, 2)
blocks[i // 2 + 1] = block_stack
elif (
opcode == RETURN_VALUE or
opcode == RAISE_VARARGS or
opcode == RERAISE
):
pass
else:
blocks[i // 2 + 1] = block_stack
return blocks
def pop_simulated_stack(stack, offset=1):
end = len(stack) - offset
assert end >= 0
return stack[:end]
def _get_arg(code, addr):
# read backwards for EXTENDED_ARG
oparg = ord(code[addr + 1])
if addr >= 2 and ord(code[addr - 2]) == EXTENDED_ARG:
oparg |= ord(code[addr - 1]) << 8
if addr >= 4 and ord(code[addr - 4]) == EXTENDED_ARG:
raise ValueError("fix me please!")
return oparg
def compatible_block_stack(from_stack, to_stack):
if to_stack is None:
return False
return from_stack[:len(to_stack)] == to_stack
def explain_incompatible_block_stack(to_stack):
kind = to_stack[-1]
if kind == JUMP_BLOCKSTACK_LOOP:
return "can't jump into the body of a for loop"
elif kind == JUMP_BLOCKSTACK_TRY:
return "can't jump into the body of a try statement"
elif kind == JUMP_BLOCKSTACK_WITH:
return "can't jump into the body of a with statement"
else:
assert kind == JUMP_BLOCKSTACK_EXCEPT
return "can't jump into an 'except' block as there's no exception"
# ____________________________________________________________
def get_block_class(opname):
# select the appropriate kind of block
from pypy.interpreter.pyopcode import block_classes
return block_classes[opname]
def unpickle_block(space, w_tup):
w_opname, w_handlerposition, w_valuestackdepth = space.unpackiterable(w_tup)
opname = space.text_w(w_opname)
handlerposition = space.int_w(w_handlerposition)
valuestackdepth = space.int_w(w_valuestackdepth)
assert valuestackdepth >= 0
assert handlerposition >= 0
blk = instantiate(get_block_class(opname))
blk.handlerposition = handlerposition
blk.valuestackdepth = valuestackdepth
return blk
| 37.722382
| 132
| 0.607146
|
d9602008e99223a57dc875cc5a9c165e2deec6df
| 2,291
|
py
|
Python
|
code/data_analysis.py
|
pillowsofwind/bucket_tree_blockchain
|
d1472558e09cc8728c3f47a05e44b241b3361ce3
|
[
"MIT"
] | null | null | null |
code/data_analysis.py
|
pillowsofwind/bucket_tree_blockchain
|
d1472558e09cc8728c3f47a05e44b241b3361ce3
|
[
"MIT"
] | null | null | null |
code/data_analysis.py
|
pillowsofwind/bucket_tree_blockchain
|
d1472558e09cc8728c3f47a05e44b241b3361ce3
|
[
"MIT"
] | null | null | null |
import csv
# we investigate 20 continous blocks from ETH
BLOCK_NUM = 20
def read_file(filenames):
dict = {}
tx_num = 0
for filename in filenames:
with open(filename, 'r') as file:
reader = csv.reader(file)
for line in reader:
if line == []:
continue
if line[0] != 'tx':
continue
tx_num += 1
# both the acounts need to be updated after the transaction
from_whom = line[10]
to_whom = line[11]
if from_whom in dict:
dict[from_whom] += 1
else:
dict[from_whom] = 1
if to_whom in dict:
dict[to_whom] += 1
else:
dict[to_whom] = 1
return dict, tx_num
def gen_stats(data, num):
data_sorted = sorted(data.items(), key=lambda x: x[1], reverse=True)
# traverse sorted transaction data
cur_num = 0
cur_acounts = 0
print_1 = False
print_5 = False
print_20 = False
for account in data_sorted:
cur_num += account[1]
cur_acounts += 1
if cur_acounts >= len(data)*0.01 and print_1 == False:
print('1% most used accounts countains ' +
str(50*cur_num/num)+'% of total transactions.')
print_1 = True
elif cur_acounts >= len(data)*0.05 and print_5 == False:
print('5% most used accounts countains ' +
str(50*cur_num/num)+'% of total transactions.')
print_5 = True
elif cur_acounts >= len(data)*0.5 and print_20 == False:
print('20% most used accounts countains ' +
str(50*cur_num/num)+'% of total transactions.')
print_20 = True
break
if __name__ == "__main__":
print('%d blocks in total from Ethereum:' % BLOCK_NUM)
# read blocks from dataset
filename = []
for i in range(BLOCK_NUM):
filename.append('../data/data/txs%d.csv' % (i+1))
tx_data, total_txs = read_file(filename)
print(str(len(tx_data)) + ' acounts in total ' +
str(total_txs) + ' transactions.\n')
# get account usage ststs
gen_stats(tx_data, total_txs)
| 32.267606
| 75
| 0.532519
|
3c13bf7ee2f149fd8de535f767a0a492d3f2fdb8
| 885
|
py
|
Python
|
setup.py
|
LucaButera/pytorch-grad-cam
|
582913a34264a45b581d23d13d0b42351ffef3a4
|
[
"MIT"
] | 1
|
2021-04-16T03:05:24.000Z
|
2021-04-16T03:05:24.000Z
|
setup.py
|
Spicybird/pytorch-grad-cam
|
977556ee2ceda7487b3fe8c27e62ec26040b960b
|
[
"MIT"
] | null | null | null |
setup.py
|
Spicybird/pytorch-grad-cam
|
977556ee2ceda7487b3fe8c27e62ec26040b960b
|
[
"MIT"
] | null | null | null |
import setuptools
with open('README.md', mode='r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='grad-cam',
version='1.1.0',
author='Jacob Gildenblat',
author_email='jacob.gildenblat@gmail.com',
description='Many Class Activation Map methods implemented in Pytorch. Including Grad-CAM, Grad-CAM++, Score-CAM, Ablation-CAM and XGrad-CAM',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/jacobgil/pytorch-grad-cam',
project_urls={
'Bug Tracker': 'https://github.com/jacobgil/pytorch-grad-cam/issues',
},
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
packages=setuptools.find_packages(),
python_requires='>=3.6',
)
| 35.4
| 146
| 0.674576
|
2a1fa6a4b1b630d13b7df45a006b56e6dacca449
| 15,768
|
py
|
Python
|
kedro/framework/cli/starters.py
|
daniel-falk/kedro
|
19187199339ddc4a757aaaa328f319ec4c1e452a
|
[
"Apache-2.0"
] | 2,047
|
2022-01-10T15:22:12.000Z
|
2022-03-31T13:38:56.000Z
|
kedro/framework/cli/starters.py
|
daniel-falk/kedro
|
19187199339ddc4a757aaaa328f319ec4c1e452a
|
[
"Apache-2.0"
] | 170
|
2022-01-10T12:44:31.000Z
|
2022-03-31T17:01:24.000Z
|
kedro/framework/cli/starters.py
|
daniel-falk/kedro
|
19187199339ddc4a757aaaa328f319ec4c1e452a
|
[
"Apache-2.0"
] | 112
|
2022-01-10T19:15:24.000Z
|
2022-03-30T11:20:52.000Z
|
"""kedro is a CLI for managing Kedro projects.
This module implements commands available from the kedro CLI for creating
projects.
"""
import os
import re
import shutil
import stat
import tempfile
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import click
import yaml
import kedro
from kedro import __version__ as version
from kedro.framework.cli.utils import (
CONTEXT_SETTINGS,
KedroCliError,
_clean_pycache,
_filter_deprecation_warnings,
command_with_verbosity,
)
KEDRO_PATH = Path(kedro.__file__).parent
TEMPLATE_PATH = KEDRO_PATH / "templates" / "project"
_STARTER_ALIASES = {
"astro-airflow-iris",
"standalone-datacatalog",
"pandas-iris",
"pyspark",
"pyspark-iris",
"spaceflights",
}
_STARTERS_REPO = "git+https://github.com/kedro-org/kedro-starters.git"
CONFIG_ARG_HELP = """Non-interactive mode, using a configuration yaml file. This file
must supply the keys required by the template's prompts.yml. When not using a starter,
these are `project_name`, `repo_name` and `python_package`."""
STARTER_ARG_HELP = """Specify the starter template to use when creating the project.
This can be the path to a local directory, a URL to a remote VCS repository supported
by `cookiecutter` or one of the aliases listed in ``kedro starter list``.
"""
CHECKOUT_ARG_HELP = (
"An optional tag, branch or commit to checkout in the starter repository."
)
DIRECTORY_ARG_HELP = (
"An optional directory inside the repository where the starter resides."
)
# pylint: disable=unused-argument
def _remove_readonly(func: Callable, path: Path, excinfo: Tuple): # pragma: no cover
"""Remove readonly files on Windows
See: https://docs.python.org/3/library/shutil.html?highlight=shutil#rmtree-example
"""
os.chmod(path, stat.S_IWRITE)
func(path)
# pylint: disable=missing-function-docstring
@click.group(context_settings=CONTEXT_SETTINGS, name="Kedro")
def create_cli(): # pragma: no cover
pass
@command_with_verbosity(create_cli, short_help="Create a new kedro project.")
@click.option(
"--config",
"-c",
"config_path",
type=click.Path(exists=True),
help=CONFIG_ARG_HELP,
)
@click.option("--starter", "-s", "starter_name", help=STARTER_ARG_HELP)
@click.option("--checkout", help=CHECKOUT_ARG_HELP)
@click.option("--directory", help=DIRECTORY_ARG_HELP)
def new(
config_path, starter_name, checkout, directory, **kwargs
): # pylint: disable=unused-argument
"""Create a new kedro project."""
if checkout and not starter_name:
raise KedroCliError("Cannot use the --checkout flag without a --starter value.")
if directory and not starter_name:
raise KedroCliError(
"Cannot use the --directory flag without a --starter value."
)
# The `astro-iris` was renamed to `astro-airflow-iris`, but old (external) documentation
# and tutorials still refer to `astro-iris`. The below line checks if a user has entered old
# `astro-iris` as the starter name and changes it to `astro-airflow-iris`.
starter_name = (
"astro-airflow-iris" if starter_name == "astro-iris" else starter_name
)
if starter_name in _STARTER_ALIASES:
if directory:
raise KedroCliError(
"Cannot use the --directory flag with a --starter alias."
)
template_path = _STARTERS_REPO
directory = starter_name
checkout = checkout or version
elif starter_name is not None:
template_path = starter_name
checkout = checkout or version
else:
template_path = str(TEMPLATE_PATH)
# Get prompts.yml to find what information the user needs to supply as config.
tmpdir = tempfile.mkdtemp()
cookiecutter_dir = _get_cookiecutter_dir(template_path, checkout, directory, tmpdir)
prompts_required = _get_prompts_required(cookiecutter_dir)
# We only need to make cookiecutter_context if interactive prompts are needed.
if not config_path:
cookiecutter_context = _make_cookiecutter_context_for_prompts(cookiecutter_dir)
# Cleanup the tmpdir after it's no longer required.
# Ideally we would want to be able to use tempfile.TemporaryDirectory() context manager
# but it causes an issue with readonly files on windows
# see: https://bugs.python.org/issue26660.
# So onerror, we will attempt to clear the readonly bits and re-attempt the cleanup
shutil.rmtree(tmpdir, onerror=_remove_readonly)
# Obtain config, either from a file or from interactive user prompts.
if not prompts_required:
config = {}
if config_path:
config = _fetch_config_from_file(config_path)
elif config_path:
config = _fetch_config_from_file(config_path)
_validate_config_file(config, prompts_required)
else:
config = _fetch_config_from_user_prompts(prompts_required, cookiecutter_context)
cookiecutter_args = _make_cookiecutter_args(config, checkout, directory)
_create_project(template_path, cookiecutter_args)
@create_cli.group()
def starter():
"""Commands for working with project starters."""
@starter.command("list")
def list_starters():
"""List all official project starters available."""
repo_url = _STARTERS_REPO.replace("git+", "").replace(".git", "/tree/main/{alias}")
output = [
{alias: repo_url.format(alias=alias)} for alias in sorted(_STARTER_ALIASES)
]
click.echo(yaml.safe_dump(output))
def _fetch_config_from_file(config_path: str) -> Dict[str, str]:
"""Obtains configuration for a new kedro project non-interactively from a file.
Args:
config_path: The path of the config.yml which should contain the data required
by ``prompts.yml``.
Returns:
Configuration for starting a new project. This is passed as ``extra_context``
to cookiecutter and will overwrite the cookiecutter.json defaults.
Raises:
KedroCliError: If the file cannot be parsed.
"""
try:
with open(config_path, encoding="utf-8") as config_file:
config = yaml.safe_load(config_file)
if KedroCliError.VERBOSE_ERROR:
click.echo(config_path + ":")
click.echo(yaml.dump(config, default_flow_style=False))
except Exception as exc:
raise KedroCliError(
f"Failed to generate project: could not load config at {config_path}."
) from exc
return config
def _make_cookiecutter_args(
config: Dict[str, str],
checkout: str,
directory: str,
) -> Dict[str, Any]:
"""Creates a dictionary of arguments to pass to cookiecutter.
Args:
config: Configuration for starting a new project. This is passed as
``extra_context`` to cookiecutter and will overwrite the cookiecutter.json
defaults.
checkout: The tag, branch or commit in the starter repository to checkout.
Maps directly to cookiecutter's ``checkout`` argument. Relevant only when
using a starter.
directory: The directory of a specific starter inside a repository containing
multiple starters. Maps directly to cookiecutter's ``directory`` argument.
Relevant only when using a starter.
https://cookiecutter.readthedocs.io/en/1.7.2/advanced/directories.html
Returns:
Arguments to pass to cookiecutter.
"""
config.setdefault("kedro_version", version)
cookiecutter_args = {
"output_dir": config.get("output_dir", str(Path.cwd().resolve())),
"no_input": True,
"extra_context": config,
}
if checkout:
cookiecutter_args["checkout"] = checkout
if directory:
cookiecutter_args["directory"] = directory
return cookiecutter_args
def _create_project(template_path: str, cookiecutter_args: Dict[str, str]):
"""Creates a new kedro project using cookiecutter.
Args:
template_path: The path to the cookiecutter template to create the project.
It could either be a local directory or a remote VCS repository
supported by cookiecutter. For more details, please see:
https://cookiecutter.readthedocs.io/en/latest/usage.html#generate-your-project
cookiecutter_args: Arguments to pass to cookiecutter.
Raises:
KedroCliError: If it fails to generate a project.
"""
with _filter_deprecation_warnings():
# pylint: disable=import-outside-toplevel
from cookiecutter.main import cookiecutter # for performance reasons
try:
result_path = cookiecutter(template=template_path, **cookiecutter_args)
except Exception as exc:
raise KedroCliError(
"Failed to generate project when running cookiecutter."
) from exc
_clean_pycache(Path(result_path))
click.secho(
f"\nChange directory to the project generated in {result_path}",
fg="green",
)
click.secho(
"\nA best-practice setup includes initialising git and creating "
"a virtual environment before running ``pip install -r src/requirements.txt`` to install "
"project-specific dependencies. Refer to the Kedro documentation: "
"https://kedro.readthedocs.io/"
)
def _get_cookiecutter_dir(
template_path: str, checkout: str, directory: str, tmpdir: str
) -> Path:
"""Gives a path to the cookiecutter directory. If template_path is a repo then
clones it to ``tmpdir``; if template_path is a file path then directly uses that
path without copying anything.
"""
# pylint: disable=import-outside-toplevel
from cookiecutter.exceptions import RepositoryCloneFailed, RepositoryNotFound
from cookiecutter.repository import determine_repo_dir # for performance reasons
try:
cookiecutter_dir, _ = determine_repo_dir(
template=template_path,
abbreviations={},
clone_to_dir=Path(tmpdir).resolve(),
checkout=checkout,
no_input=True,
directory=directory,
)
except (RepositoryNotFound, RepositoryCloneFailed) as exc:
error_message = f"Kedro project template not found at {template_path}."
if checkout:
error_message += (
f" Specified tag {checkout}. The following tags are available: "
+ ", ".join(_get_available_tags(template_path))
)
official_starters = sorted(_STARTER_ALIASES)
raise KedroCliError(
f"{error_message}. The aliases for the official Kedro starters are: \n"
f"{yaml.safe_dump(official_starters)}"
) from exc
return Path(cookiecutter_dir)
def _get_prompts_required(cookiecutter_dir: Path) -> Optional[Dict[str, Any]]:
"""Finds the information a user must supply according to prompts.yml."""
prompts_yml = cookiecutter_dir / "prompts.yml"
if not prompts_yml.is_file():
return None
try:
with prompts_yml.open("r") as prompts_file:
return yaml.safe_load(prompts_file)
except Exception as exc:
raise KedroCliError(
"Failed to generate project: could not load prompts.yml."
) from exc
def _fetch_config_from_user_prompts(
prompts: Dict[str, Any], cookiecutter_context: OrderedDict
) -> Dict[str, str]:
"""Interactively obtains information from user prompts.
Args:
prompts: Prompts from prompts.yml.
cookiecutter_context: Cookiecutter context generated from cookiecutter.json.
Returns:
Configuration for starting a new project. This is passed as ``extra_context``
to cookiecutter and will overwrite the cookiecutter.json defaults.
"""
# pylint: disable=import-outside-toplevel
from cookiecutter.environment import StrictEnvironment
from cookiecutter.prompt import read_user_variable, render_variable
config: Dict[str, str] = {}
for variable_name, prompt_dict in prompts.items():
prompt = _Prompt(**prompt_dict)
# render the variable on the command line
cookiecutter_variable = render_variable(
env=StrictEnvironment(context=cookiecutter_context),
raw=cookiecutter_context[variable_name],
cookiecutter_dict=config,
)
# read the user's input for the variable
user_input = read_user_variable(str(prompt), cookiecutter_variable)
if user_input:
prompt.validate(user_input)
config[variable_name] = user_input
return config
def _make_cookiecutter_context_for_prompts(cookiecutter_dir: Path):
# pylint: disable=import-outside-toplevel
from cookiecutter.generate import generate_context
cookiecutter_context = generate_context(cookiecutter_dir / "cookiecutter.json")
return cookiecutter_context.get("cookiecutter", {})
class _Prompt:
"""Represent a single CLI prompt for `kedro new`"""
def __init__(self, *args, **kwargs) -> None: # pylint: disable=unused-argument
try:
self.title = kwargs["title"]
except KeyError as exc:
raise KedroCliError(
"Each prompt must have a title field to be valid."
) from exc
self.text = kwargs.get("text", "")
self.regexp = kwargs.get("regex_validator", None)
self.error_message = kwargs.get("error_message", "")
def __str__(self) -> str:
title = self.title.strip().title()
title = click.style(title + "\n" + "=" * len(title), bold=True)
prompt_lines = [title] + [self.text]
prompt_text = "\n".join(str(line).strip() for line in prompt_lines)
return f"\n{prompt_text}\n"
def validate(self, user_input: str) -> None:
"""Validate a given prompt value against the regex validator"""
if self.regexp and not re.match(self.regexp, user_input):
click.secho(f"`{user_input}` is an invalid value.", fg="red", err=True)
click.secho(self.error_message, fg="red", err=True)
raise ValueError(user_input)
def _get_available_tags(template_path: str) -> List:
# Not at top level so that kedro CLI works without a working git executable.
# pylint: disable=import-outside-toplevel
import git
try:
tags = git.cmd.Git().ls_remote("--tags", template_path.replace("git+", ""))
unique_tags = {
tag.split("/")[-1].replace("^{}", "") for tag in tags.split("\n")
}
# Remove git ref "^{}" and duplicates. For example,
# tags: ['/tags/version', '/tags/version^{}']
# unique_tags: {'version'}
except git.GitCommandError:
return []
return sorted(unique_tags)
def _validate_config_file(config: Dict[str, str], prompts: Dict[str, Any]):
"""Checks that the configuration file contains all needed variables.
Args:
config: The config as a dictionary.
prompts: Prompts from prompts.yml.
Raises:
KedroCliError: If the config file is empty or does not contain all the keys
required in prompts, or if the output_dir specified does not exist.
"""
if config is None:
raise KedroCliError("Config file is empty.")
missing_keys = set(prompts) - set(config)
if missing_keys:
click.echo(yaml.dump(config, default_flow_style=False))
raise KedroCliError(f"{', '.join(missing_keys)} not found in config file.")
if "output_dir" in config and not Path(config["output_dir"]).exists():
raise KedroCliError(
f"`{config['output_dir']}` is not a valid output directory. "
"It must be a relative or absolute path to an existing directory."
)
| 36.331797
| 98
| 0.680873
|
5c6a3e4f0cb119ba897ddce1873ffcdf6aec4a5d
| 4,815
|
py
|
Python
|
src/utils.py
|
advancedbioimagingcenter/opticalaberrations
|
80e642925bdc907d135717499e15d3217b5c6a0a
|
[
"BSD-2-Clause"
] | null | null | null |
src/utils.py
|
advancedbioimagingcenter/opticalaberrations
|
80e642925bdc907d135717499e15d3217b5c6a0a
|
[
"BSD-2-Clause"
] | 3
|
2021-11-12T17:13:45.000Z
|
2021-11-23T14:07:50.000Z
|
src/utils.py
|
advancedbioimagingcenter/opticalaberrations
|
80e642925bdc907d135717499e15d3217b5c6a0a
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import multiprocessing as mp
import sys
from typing import Any, List
import io
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import vis
from preprocessing import center_crop
from synthetic import SyntheticPSF
from wavefront import Wavefront
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
def multiprocess(func: Any, jobs: List, desc: str = 'Processing', cores: int = -1):
""" Multiprocess a generic function
Args:
func: a python function
jobs: a list of jobs for function `func`
desc: description for the progress bar
cores: number of cores to use
Returns:
an array of outputs for every function call
"""
jobs = list(jobs)
if cores == 1:
logs = []
for j in tqdm(jobs, total=len(jobs), desc=desc):
logs.append(func(j))
elif cores == -1:
with mp.Pool(mp.cpu_count()) as p:
logs = list(tqdm(p.imap(func, jobs), total=len(jobs), desc=desc))
elif cores > 1:
with mp.Pool(cores) as p:
logs = list(tqdm(p.imap(func, jobs), total=len(jobs), desc=desc))
else:
logging.error('Jobs must be a positive integer')
return False
return logs
def mae(y: np.array, p: np.array, axis=0) -> np.array:
error = np.abs(y - p)
return np.mean(error[np.isfinite(error)], axis=axis)
def mse(y: np.array, p: np.array, axis=0) -> np.array:
error = (y - p) ** 2
return np.mean(error[np.isfinite(error)], axis=axis)
def rmse(y: np.array, p: np.array, axis=0) -> np.array:
error = np.sqrt((y - p)**2)
return np.mean(error[np.isfinite(error)], axis=axis)
def mape(y: np.array, p: np.array, axis=0) -> np.array:
error = np.abs(y - p) / np.abs(y)
return 100 * np.mean(error[np.isfinite(error)], axis=axis)
def peak_aberration(w) -> float:
w = Wavefront(w).wave(100)
mn = np.nanquantile(w, .05)
mx = np.nanquantile(w, .95)
return abs(mx-mn)
def peak2peak(y: np.array) -> np.array:
return np.array(multiprocess(peak_aberration, list(y), desc='Compute peak2peak aberrations'))
def peak2peak_residuals(y: np.array, p: np.array) -> np.array:
error = np.abs(peak2peak(y) - peak2peak(p))
return error
def microns2waves(phi, wavelength):
return phi * (2 * np.pi / wavelength)
def waves2microns(phi, wavelength):
return phi / (2 * np.pi / wavelength)
def compute_signal_lost(phi, gen, res):
hashtbl = {}
w = Wavefront(phi, order='ansi')
psf = gen.single_psf(w, zplanes=0, normed=True, noise=False)
abr = 0 if np.count_nonzero(phi) == 0 else round(peak_aberration(phi))
for k, r in enumerate(res):
window = center_crop(psf, crop_shape=tuple(3*[r]))
hashtbl[abr][r] = np.sum(window)
return hashtbl
def compute_error(y_true: pd.DataFrame, y_pred: pd.DataFrame, axis=None) -> pd.DataFrame:
res = np.abs(y_true - y_pred).mean(axis=axis).to_frame('mae')
res['mae'] = mae(y_true, y_pred, axis)
res['mse'] = mse(y_true, y_pred, axis)
res['mape'] = mape(y_true, y_pred, axis)
res['rmse'] = rmse(y_true, y_pred, axis)
return res
def eval(k: tuple, psfargs: dict):
psf, y, pred, path, psnr, zplanes, maxcounts = k
if psf.ndim == 5:
psf = np.squeeze(psf, axis=0)
psf = np.squeeze(psf, axis=-1)
elif psf.ndim == 4:
psf = np.squeeze(psf, axis=-1)
diff = y - pred
y = Wavefront(y)
pred = Wavefront(pred)
diff = Wavefront(diff)
psf_gen = SyntheticPSF(**psfargs)
p_psf = psf_gen.single_psf(pred, zplanes=zplanes)
gt_psf = psf_gen.single_psf(y, zplanes=zplanes)
corrected_psf = psf_gen.single_psf(diff, zplanes=zplanes)
vis.diagnostic_assessment(
psf=psf,
gt_psf=gt_psf,
corrected_psf=corrected_psf,
predicted_psf=p_psf,
psnr=psnr,
maxcounts=maxcounts,
y=y,
pred=pred,
save_path=path,
display=False
)
def plot_to_image(figure):
"""
Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call.
https://www.tensorflow.org/tensorboard/image_summaries
"""
import tensorflow as tf
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
| 27.672414
| 97
| 0.638214
|
2b76589933cc1fee3f5146344b6c13dec23d4680
| 5,506
|
py
|
Python
|
uiflow/components/input.py
|
dyvenia/timesheets
|
5120acd66cbeb3ce43bff5acfee4cd6b14882962
|
[
"MIT"
] | 13
|
2021-09-27T14:53:15.000Z
|
2022-02-15T05:48:01.000Z
|
uiflow/components/input.py
|
dyvenia/timesheets
|
5120acd66cbeb3ce43bff5acfee4cd6b14882962
|
[
"MIT"
] | 45
|
2021-10-13T08:47:28.000Z
|
2022-02-15T09:16:51.000Z
|
uiflow/components/input.py
|
dyvenia/timesheets
|
5120acd66cbeb3ce43bff5acfee4cd6b14882962
|
[
"MIT"
] | null | null | null |
from typing import Any, Callable, List, Dict
from idom import html, component
# from data.common import Select
class_str = """text-primary-500 bg-transparent placeholder-secondary-400 w-full px-4 py-2.5 mt-2
text-base transition duration-500 ease-in-out transform
border-transparent focus:border-blueGray-500
focus:bg-white dark:focus:bg-secondary-400 focus:outline-none
focus:shadow-outline focus:ring-2 ring-offset-current ring-offset-2
ring-gray-400"""
inputWrapperClass = "w-full md:w-1/2 flex justify-between items-center border-input-border border-[1px] rounded-[3px] py-2 px-4 xl:max-w-[401px]"
selectClass = (
"w-full border-select-border py-3 pl-3 border-[1px] rounded-[3px] appearance-none"
)
selectWrapperClass = "block relative w-full sm:w-[48%] md:w-[121px] md:mr-2 my-4 before:content-[''] before:border-[6px] before:border-[transparent] before:border-t-appearance before:top-1/2 before:right-5 before:-translate-y-0.5 before:absolute xl:w-{width} 2xl:mr-0"
checkboxTd = "w-6 pr-4 pt-4 pb-3"
@component
def Input(
set_value: Callable,
label: str = "",
type: str = "text",
placeholder: str = "Write here the",
_class: str = class_str,
width: str = "[401px]",
md_width: str = "1/2",
):
return html.div(
{
"class": f"w-full my-4 md:w-{md_width} flex justify-between items-center bg-nav border-input-border border-[1px] rounded-[3px] py-2 px-4 xl:max-w-{width} xl:w-full"
},
html.input(
{
"type": type,
"placeholder": f"{placeholder} {label}",
"onChange": lambda event: set_value(event["target"]["value"]),
"class": _class,
}
),
)
@component
def SearchInput(input_value, set_input_value):
def handle_click(event):
set_input_value("")
return html.div(
{"class": inputWrapperClass},
html.img({"src": "../static/img/svg/search.svg"}),
html.input(
{
"type": "text",
"placeholder": "Search your timelog here",
"value": input_value,
"onChange": lambda event: set_input_value(event["target"]["value"]),
"class": "w-10/12 outline-none",
},
),
html.img(
{
"class": "cursor-pointer",
"src": "../static/img/svg/cross.svg",
"onClick": handle_click,
}
),
)
@component
def Selector(
set_value: Callable,
placeholder,
dropdown_list,
_class: str = class_str,
):
return html.select(
{
"class": _class,
"onChange": lambda event: set_value(event["target"]["value"]),
},
html.option({"value": ""}, placeholder),
dropdown_list,
)
@component
def Selector2(
set_value: Callable, data: List, width: str = "14%", md_width: str = "121px"
):
options = []
for row in data:
option = html.option({"value": row["value"]}, row["display_value"])
options.append(option)
return html.div(
{
"class": f"block relative w-full sm:w-[48%] md:w-[{md_width}] md:mr-2 my-4 before:content-[''] before:border-[6px] before:border-[transparent] before:border-t-appearance before:top-1/2 before:right-5 before:-translate-y-0.5 before:absolute xl:w-[{width}] 2xl:mr-0"
},
html.select(
{
"class": selectClass,
"onChange": lambda event: set_value(event["target"]["value"]),
},
options,
),
)
def SelectorDropdownKeyValue(rows: List[Any]):
crows = []
for row in rows:
for key in row:
value = row[key]
c = html.option({"value": f"{value}"}, key)
crows.append(c)
dropdown_list = tuple(crows)
return dropdown_list
def SelectorDropdownList(rows: List[Any]):
crows = []
for n in rows:
a = html.option({"value": f"{n}"}, n)
crows.append(a)
dropdown_list = tuple(crows)
return dropdown_list
@component
def AutoSelect(
set_value: Callable,
option: Any,
_class: str = class_str,
):
return html.select(
{
"class": _class,
"onChange": lambda event: set_value(event["target"]["value"]),
},
option,
)
@component
def SelectPerPage(set_select_per_page, per_page_list):
dropdown = [html.option({"value": el}, el) for el in per_page_list]
return html.div(
{
"class": "block w-[176px] shrink-0 relative md:mr-2 my-4 before:content-["
"] before:border-[6px] before:border-[transparent] before:border-t-appearance before:top-1/2 before:right-5 before:-translate-y-0.5 before:absolute 2xl:mr-0"
},
html.select(
{
"class": selectClass,
"onChange": lambda event: set_select_per_page(event["target"]["value"]),
},
dropdown,
),
)
@component
def Checkbox(value_checkbox, handle_change):
return html.td(
{
"class": checkboxTd,
},
html.input(
{
"class": "w-4 h-4",
"checked": value_checkbox,
"onChange": lambda event: handle_change(event),
"type": "checkbox",
}
),
)
| 30.087432
| 276
| 0.556484
|
9ff34955465a22d249ec05606dc95ed22e5d5610
| 270
|
py
|
Python
|
dist/snippets/woosmap_http_zones/woosmap_http_zones.py
|
woosmap/openapi-specification
|
7f934628a75695884db2fa29dd1d04efd1fb20de
|
[
"MIT"
] | null | null | null |
dist/snippets/woosmap_http_zones/woosmap_http_zones.py
|
woosmap/openapi-specification
|
7f934628a75695884db2fa29dd1d04efd1fb20de
|
[
"MIT"
] | 3
|
2021-12-20T16:15:13.000Z
|
2022-02-15T00:44:19.000Z
|
dist/snippets/woosmap_http_zones/woosmap_http_zones.py
|
woosmap/openapi-specification
|
7f934628a75695884db2fa29dd1d04efd1fb20de
|
[
"MIT"
] | null | null | null |
# [START woosmap_http_zones]
import requests
url = "https://api.woosmap.com/zones/ZoneA/?private_key=YOUR_PRIVATE_API_KEY"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
# [END woosmap_http_zones]
| 20.769231
| 77
| 0.759259
|
8ede460bd8fcc02049fe028261dea40e63202e0a
| 1,411
|
py
|
Python
|
contents/2020_ITinerary/assets/session_1/car.py
|
EunSeong-Park/ITinerary
|
7e33613e3382f3e4b4404ad6795bc28823c7641d
|
[
"MIT"
] | 4
|
2020-03-31T01:18:43.000Z
|
2020-11-21T16:53:02.000Z
|
contents/2020_ITinerary/assets/session_1/car.py
|
EunSeong-Park/ITinerary
|
7e33613e3382f3e4b4404ad6795bc28823c7641d
|
[
"MIT"
] | null | null | null |
contents/2020_ITinerary/assets/session_1/car.py
|
EunSeong-Park/ITinerary
|
7e33613e3382f3e4b4404ad6795bc28823c7641d
|
[
"MIT"
] | null | null | null |
# skeleton
class Car:
def __init__(self, name, mileage, max_fuel):
self.name = name
self.mileage = mileage
self.max_fuel = max_fuel
self.fuel = self.max_fuel
self.dist = 0
def status(self):
''' Show the current status of the car
it should be called after brrr() and gas_statation()
<<< Template >>>
Car name: [car name]
Mileage: [mileage]km/L
Fuel: [Current fuel]L / [Max fuel]L
Distance: [Total Distance]km
if fuel < 20 %, print this:
"WARNING: remaining fuel is too low"
'''
print("Car name: " + self.name)
print("Mileage: " + str(self.mileage) + "km/L")
print("Fuel: " + str(self.fuel) + "L" + " / " + str(self.max_fuel) + "L")
print("Distance: " + str(self.dist) + "km")
def brrr(self, km):
'''
Drive [km]km. You should implement:
- distance increases as you drive
- fuel decreases as you use
- if the fuel is empty, then you cannot go more
(+ print, "EMPTY!")
'''
for i in range(km):
if self.fuel > 1 / self.mileage: # it can go
self.fuel = self.fuel - 1 / self.mileage
self.dist = self.dist + 1
else: # it cannot go
break
self.status()
def gas_station(self):
self.fuel = self.max_fuel
self.status()
benz = Car("Benz", 25, 100)
benz.brrr(10000)
benz.gas_station()
benz.brrr(1000)
benz.gas_station()
| 26.12963
| 81
| 0.582566
|
f1c91d772c4cfd53993b6e840638374b17338016
| 298
|
py
|
Python
|
nlpaug/util/visual/wave.py
|
avostryakov/nlpaug
|
f75770c230fe586cf21d11ad3342c2f160560d6a
|
[
"MIT"
] | 1
|
2019-11-11T06:47:43.000Z
|
2019-11-11T06:47:43.000Z
|
nlpaug/util/visual/wave.py
|
avostryakov/nlpaug
|
f75770c230fe586cf21d11ad3342c2f160560d6a
|
[
"MIT"
] | null | null | null |
nlpaug/util/visual/wave.py
|
avostryakov/nlpaug
|
f75770c230fe586cf21d11ad3342c2f160560d6a
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import librosa.display
class VisualWave:
@staticmethod
def visual(title, audio, sample_rate):
plt.figure(figsize=(8, 4))
librosa.display.waveplot(audio, sr=sample_rate)
plt.title(title)
plt.tight_layout()
plt.show()
| 22.923077
| 55
| 0.661074
|
933f5985b53195c174ae29fcbec7fa876ce84210
| 7,480
|
py
|
Python
|
app/tests/refs/amr_dict.py
|
superphy/spfy
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 2
|
2019-05-22T14:29:37.000Z
|
2020-02-13T11:30:46.000Z
|
app/tests/refs/amr_dict.py
|
superphy/backend
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 88
|
2017-04-07T21:52:10.000Z
|
2018-03-10T23:12:47.000Z
|
app/tests/refs/amr_dict.py
|
superphy/backend
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 2
|
2017-02-10T21:30:13.000Z
|
2017-06-05T22:30:17.000Z
|
# output from amr_to_dict.py, to be sent to beautify.py
# example is from ECI-2866_lcl.fasta_rgi.tsv_rgi.p
amr_dict = {'Antimicrobial Resistance': {'lcl|ECI-2866|NODE_37_length_34194_cov_30.2716_ID_73': [{'START': 13647, 'STOP': 14867, 'ORIENTATION': '-', 'GENE_NAME': 'rosA', 'CUT_OFF': 'Strict'}, {'START': 11733, 'STOP': 13409, 'ORIENTATION': '-', 'GENE_NAME': 'rosB', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_33_length_43220_cov_31.1898_ID_65': [{'START': 40296, 'STOP': 40628, 'ORIENTATION': '+', 'GENE_NAME': 'emrE', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_9_length_157371_cov_34.6522_ID_17': [{'START': 73291, 'STOP': 74523, 'ORIENTATION': '-', 'GENE_NAME': 'mdtM', 'CUT_OFF': 'Strict'}, {'START': 144362, 'STOP': 145231, 'ORIENTATION': '-', 'GENE_NAME': 'robA', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_26_length_62239_cov_34.3381_ID_51': [{'START': 56339, 'STOP': 59452, 'ORIENTATION': '+', 'GENE_NAME': 'mexD', 'CUT_OFF': 'Strict'}, {'START': 55157, 'STOP': 56314, 'ORIENTATION': '+', 'GENE_NAME': 'mdtE', 'CUT_OFF': 'Perfect'}, {'START': 60911, 'STOP': 61735, 'ORIENTATION': '-', 'GENE_NAME': 'gadX', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_11_length_143533_cov_28.5907_ID_21': [{'START': 71625, 'STOP': 72998, 'ORIENTATION': '-', 'GENE_NAME': 'mdtK', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_15_length_124782_cov_33.4952_ID_29': [{'START': 109022, 'STOP': 109654, 'ORIENTATION': '-', 'GENE_NAME': 'CRP', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_18_length_100066_cov_32.3135_ID_35': [{'START': 4535, 'STOP': 5728, 'ORIENTATION': '+', 'GENE_NAME': 'acrE', 'CUT_OFF': 'Strict'}, {'START': 91272, 'STOP': 92366, 'ORIENTATION': '+', 'GENE_NAME': 'vanG', 'CUT_OFF': 'Strict'}, {'START': 5751, 'STOP': 8900, 'ORIENTATION': '+', 'GENE_NAME': 'mexD', 'CUT_OFF': 'Strict'}, {'START': 3746, 'STOP': 4393, 'ORIENTATION': '-', 'GENE_NAME': 'Klebsiella pneumoniae acrR mutant resulting in high level antibiotic resistance', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_23_length_82758_cov_29.125_ID_45': [{'START': 42207, 'STOP': 42620, 'ORIENTATION': '+', 'GENE_NAME': 'H-NS', 'CUT_OFF': 'Perfect'}], 'lcl|ECI-2866|NODE_14_length_130829_cov_35.5941_ID_27': [{'START': 91741, 'STOP': 92925, 'ORIENTATION': '-', 'GENE_NAME': 'emrD', 'CUT_OFF': 'Strict'}, {'START': 54060, 'STOP': 55235, 'ORIENTATION': '-', 'GENE_NAME': 'mdtL', 'CUT_OFF': 'Strict'}, {'START': 66731, 'STOP': 69145, 'ORIENTATION': '+', 'GENE_NAME': 'Staphylococcus aureus gyrB conferring resistance to aminocoumarin', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_21_length_94236_cov_29.797_ID_41': [{'START': 64279, 'STOP': 65487, 'ORIENTATION': '+', 'GENE_NAME': 'mdtH', 'CUT_OFF': 'Perfect'}, {'START': 74118, 'STOP': 75344, 'ORIENTATION': '+', 'GENE_NAME': 'mdtG', 'CUT_OFF': 'Perfect'}], 'lcl|ECI-2866|NODE_7_length_168070_cov_30.8681_ID_13': [{'START': 108519, 'STOP': 111596, 'ORIENTATION': '+', 'GENE_NAME': 'mexN', 'CUT_OFF': 'Strict'}, {'START': 114409, 'STOP': 115131, 'ORIENTATION': '+', 'GENE_NAME': 'baeR', 'CUT_OFF': 'Strict'}, {'START': 104029, 'STOP': 105396, 'ORIENTATION': '+', 'GENE_NAME': 'mdtA', 'CUT_OFF': 'Strict'}, {'START': 105396, 'STOP': 108518, 'ORIENTATION': '+', 'GENE_NAME': 'mexN', 'CUT_OFF': 'Strict'}, {'START': 111597, 'STOP': 113012, 'ORIENTATION': '+', 'GENE_NAME': 'mdtD', 'CUT_OFF': 'Strict'}, {'START': 51560, 'STOP': 52726, 'ORIENTATION': '-', 'GENE_NAME': 'PmrE', 'CUT_OFF': 'Strict'}, {'START': 113009, 'STOP': 114412, 'ORIENTATION': '+', 'GENE_NAME': 'baeS', 'CUT_OFF': 'Perfect'}], 'lcl|ECI-2866|NODE_3_length_280483_cov_33.8271_ID_5': [{'START': 8974, 'STOP': 10131, 'ORIENTATION': '-', 'GENE_NAME': 'acrE', 'CUT_OFF': 'Perfect'}, {'START': 242623, 'STOP': 244104, 'ORIENTATION': '-', 'GENE_NAME': 'tolC', 'CUT_OFF': 'Strict'}, {'START': 5858, 'STOP': 8962, 'ORIENTATION': '-', 'GENE_NAME': 'mexD', 'CUT_OFF': 'Strict'}, {'START': 219445, 'STOP': 220266, 'ORIENTATION': '+', 'GENE_NAME': 'bacA', 'CUT_OFF': 'Strict'}, {'START': 10530, 'STOP': 11192, 'ORIENTATION': '+', 'GENE_NAME': 'acrS', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_2_length_413768_cov_33.1857_ID_3': [{'START': 58270, 'STOP': 59808, 'ORIENTATION': '+', 'GENE_NAME': 'emrB', 'CUT_OFF': 'Perfect'}, {'START': 57081, 'STOP': 58253, 'ORIENTATION': '+', 'GENE_NAME': 'emrA', 'CUT_OFF': 'Strict'}, {'START': 56424, 'STOP': 56954, 'ORIENTATION': '+', 'GENE_NAME': 'emrR', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_4_length_261081_cov_33.6293_ID_7': [{'START': 168499, 'STOP': 169530, 'ORIENTATION': '+', 'GENE_NAME': 'mdtN', 'CUT_OFF': 'Strict'}, {'START': 144200, 'STOP': 145843, 'ORIENTATION': '+', 'GENE_NAME': 'PmrC', 'CUT_OFF': 'Strict'}, {'START': 169530, 'STOP': 171581, 'ORIENTATION': '+', 'GENE_NAME': 'mdtO', 'CUT_OFF': 'Strict'}, {'START': 146509, 'STOP': 147609, 'ORIENTATION': '+', 'GENE_NAME': 'PmrB', 'CUT_OFF': 'Strict'}, {'START': 100860, 'STOP': 101993, 'ORIENTATION': '+', 'GENE_NAME': 'ACT-7', 'CUT_OFF': 'Strict'}, {'START': 171578, 'STOP': 173044, 'ORIENTATION': '+', 'GENE_NAME': 'mdtP', 'CUT_OFF': 'Strict'}, {'START': 145840, 'STOP': 146508, 'ORIENTATION': '+', 'GENE_NAME': 'PmrA', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_27_length_55805_cov_30.3115_ID_53': [{'START': 28418, 'STOP': 29650, 'ORIENTATION': '-', 'GENE_NAME': 'mdfA', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_34_length_40896_cov_32.935_ID_67': [{'START': 8444, 'STOP': 10624, 'ORIENTATION': '+', 'GENE_NAME': 'Mycobacterium tuberculosis katG mutations conferring resistance to isoniazid', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_24_length_71378_cov_29.2686_ID_47': [{'START': 19334, 'STOP': 19717, 'ORIENTATION': '-', 'GENE_NAME': 'marA', 'CUT_OFF': 'Perfect'}, {'START': 19737, 'STOP': 20171, 'ORIENTATION': '-', 'GENE_NAME': 'Escherichia coli marR mutant resulting in antibiotic resistance', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_29_length_51952_cov_29.7413_ID_57': [{'START': 11473, 'STOP': 12588, 'ORIENTATION': '-', 'GENE_NAME': 'macA', 'CUT_OFF': 'Strict'}, {'START': 9530, 'STOP': 11476, 'ORIENTATION': '-', 'GENE_NAME': 'macB', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_25_length_69574_cov_34.1736_ID_49': [{'START': 3823, 'STOP': 5196, 'ORIENTATION': '+', 'GENE_NAME': 'cpxA', 'CUT_OFF': 'Perfect'}, {'START': 3128, 'STOP': 3826, 'ORIENTATION': '+', 'GENE_NAME': 'cpxR', 'CUT_OFF': 'Perfect'}], 'lcl|ECI-2866|NODE_40_length_31124_cov_32.9565_ID_79': [{'START': 4262, 'STOP': 8290, 'ORIENTATION': '+', 'GENE_NAME': 'Mycobacterium tuberculosis rpoB mutants conferring resistance to rifampicin', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_1_length_488407_cov_30.2969_ID_1': [{'START': 234201, 'STOP': 235739, 'ORIENTATION': '+', 'GENE_NAME': 'emrY', 'CUT_OFF': 'Strict'}, {'START': 336987, 'STOP': 337955, 'ORIENTATION': '-', 'GENE_NAME': 'PmrF', 'CUT_OFF': 'Perfect'}, {'START': 335005, 'STOP': 336987, 'ORIENTATION': '-', 'GENE_NAME': 'arnA', 'CUT_OFF': 'Strict'}, {'START': 233038, 'STOP': 234201, 'ORIENTATION': '+', 'GENE_NAME': 'emrK', 'CUT_OFF': 'Strict'}, {'START': 135462, 'STOP': 138575, 'ORIENTATION': '-', 'GENE_NAME': 'mexD', 'CUT_OFF': 'Strict'}, {'START': 232008, 'STOP': 232622, 'ORIENTATION': '-', 'GENE_NAME': 'evgA', 'CUT_OFF': 'Perfect'}, {'START': 228410, 'STOP': 232003, 'ORIENTATION': '-', 'GENE_NAME': 'evgS', 'CUT_OFF': 'Strict'}], 'lcl|ECI-2866|NODE_30_length_50634_cov_30.1661_ID_59': [{'START': 46306, 'STOP': 46977, 'ORIENTATION': '+', 'GENE_NAME': 'phoP', 'CUT_OFF': 'Strict'}, {'START': 46977, 'STOP': 48437, 'ORIENTATION': '+', 'GENE_NAME': 'phoQ', 'CUT_OFF': 'Strict'}]}}
| 2,493.333333
| 7,373
| 0.648262
|
b5f46fbca1153eaf8a270e74c33cf7199b38ad76
| 13,232
|
py
|
Python
|
agents/dqn.py
|
JarvisEQ/Team7
|
88056bec8748d68fd46f03ca3a164dbce301ad8e
|
[
"MIT"
] | null | null | null |
agents/dqn.py
|
JarvisEQ/Team7
|
88056bec8748d68fd46f03ca3a164dbce301ad8e
|
[
"MIT"
] | null | null | null |
agents/dqn.py
|
JarvisEQ/Team7
|
88056bec8748d68fd46f03ca3a164dbce301ad8e
|
[
"MIT"
] | null | null | null |
import os
import time
import random
from collections import namedtuple, deque
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 1e-5 # learning rate
UPDATE_EVERY = 4 # how often to update the network
# this probably should probably be in a constatns file
NODE_CONNECTIONS = {
1: [2, 4],
2: [1, 3, 5],
3: [2, 4, 5, 6, 7],
4: [1, 3, 7],
5: [2, 3, 8, 9],
6: [3, 9],
7: [3, 4, 9, 10],
8: [5, 9, 11],
9: [5, 6, 7, 8, 10],
10: [7, 9, 11],
11: [8, 10]
}
NUM_GROUPS = 12
ENV_MAP = {
'everglades': 'Everglades-v0',
'everglades-vision': 'EvergladesVision-v0',
'everglades-stoch': 'EvergladesStochastic-v0',
'everglades-vision-stoch': 'EvergladesVisionStochastic-v0',
}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class dqn:
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, player_num, seed):
"""
Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def get_action(self, state, eps=0.):
"""
Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
# Updates state for the Q network
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
# Is local evalutation giving us state evaluation?
self.qnetwork_local.eval()
# Not sure what no_grad() is doing (no gradiant?)
with torch.no_grad():
action_values = self.qnetwork_local(state) # get action values?
# Does this train the network? if we only play one game I am sure the network does not train well
# need to figure out how to actually train a network
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps: # exploit
# return np.argmax(action_values.cpu().data.numpy())
# create the empty action space return value
actions = np.zeros((7, 2))
# not sure what np.flip does, maybe converts action values to a usable format and
# gets the best actions (what does cpu() do?)
prioritized_actions = np.flip(action_values.cpu().data.numpy().argsort(), axis=1)
#print(f"prioritized actions = {prioritized_actions}\n\n")
selected_groups = []
for action in prioritized_actions[0]:
# get the group from the action
group = np.floor(action / 11.).astype(int)
# get the node from the action
node = int(action % 11) + 1
# if we haven't tried to move the group yet (we can only move a group once)
# add the group movement to the array of actions
if group not in selected_groups:
actions[len(selected_groups), 0] = group
actions[len(selected_groups), 1] = node
selected_groups.append(group)
# we can only move 7 groups
if len(selected_groups) >= 7:
break
return actions
else: # explore (choose a random option)
# return random.choice(np.arange(self.action_size))
actions = np.zeros((7, 2))
actions[:, 0] = np.random.choice(12, 7, replace=False)
actions[:, 1] = np.random.choice(11, 7, replace=False) + 1
return actions
def learn(self, experiences, gamma):
"""
Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
where:
s = current state
a = action
r = reward
s' = new state
done = ?
gamma (float): discount factor
"""
print(f'learning...')
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
# look up detach
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
print(f'loss = {loss}')
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""
Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""
Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):
"""
Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return self.fc3(x)
def watch_untrained_agent(env, agent):
state = env.reset()
for step in range(200):
actions = np.zeros((7, 2))
groups_to_move = np.random.choice(12, 7, replace=False)
for i, group in enumerate(groups_to_move):
state[0] = group # replace step number with group
action = agent.act(state)
actions[i, 0] = group
actions[i, 1] = action
state, reward, done, info = env.step(actions)
if done:
break
return
def train_dqn(env, agent, n_episodes=2000, max_t=200, eps_start=1.0, eps_end=0.01, eps_decay=0.999):
"""
Deep Q-Learning
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores_deque = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
actions = agent.act(state, eps)
# print(actions)
next_state, reward, done, _ = env.step(actions)
# DQN step() can only train one action at a time, so step 7 times
for index in range(actions.shape[0]):
top_action = int(actions[index, 0] *
11 + actions[index, 1] - 1)
agent.step(state, top_action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_deque.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('Episode {}\tAverage Score: {:.4f}\tEpisode Score: {:.4f}'.format(
i_episode, np.mean(scores_deque), score))
if i_episode > 100 and np.mean(scores_deque) >= 0.8:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(
i_episode-100, np.mean(scores_deque)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return
def main(pub_socket=None):
server_address = os.getenv('SERVER_ADDRESS', 'server')
pub_socket = int(os.getenv('PUB_SOCKET', pub_socket))
if pub_socket is None:
raise Exception('Pub socket not set')
# print(f'Pub socket is {pub_socket}')
env_config = {
'await_connection_time': 120,
'server_address': server_address,
'pub_socket': pub_socket,
'sub_socket': '5563',
}
_env_name = os.getenv('ENV_NAME', 'everglades')
render_image = os.getenv('RENDER_IMAGE', 'false').lower() == 'true'
viewer = None
env_name = ENV_MAP[_env_name.lower()]
env = gym.make(env_name, env_config=env_config)
# DQN picks the highest value action from the available actions
# To make this feasible, each group-action combination must be an output
agent = Agent(state_size=105, action_size=12*11, seed=0)
# watch_untrained_agent(env, agent)
train_dqn(env, agent)
if __name__ == "__main__":
main()
| 35.762162
| 127
| 0.599305
|
a145d7b252375919a282b9e138e9a18322d8a414
| 665
|
py
|
Python
|
cs433/assignments/lec4/4-7.py
|
sankalpgambhir/fall2020
|
79a055493d62c229c2a4ece4a60c7b92c6e5b3c9
|
[
"WTFPL"
] | null | null | null |
cs433/assignments/lec4/4-7.py
|
sankalpgambhir/fall2020
|
79a055493d62c229c2a4ece4a60c7b92c6e5b3c9
|
[
"WTFPL"
] | 2
|
2020-08-27T06:47:50.000Z
|
2020-08-27T07:08:22.000Z
|
cs433/assignments/lec4/4-7.py
|
sankalpgambhir/fall2020
|
79a055493d62c229c2a4ece4a60c7b92c6e5b3c9
|
[
"WTFPL"
] | 2
|
2020-08-28T06:07:20.000Z
|
2020-08-28T06:10:18.000Z
|
#Exercise4.5
from z3 import *
u = DeclareSort('U') # declaring new sort
c = Const('c', u ) # declaring a constant of the sort
f = Function('f', u , u ) # declaring a function of the sort
# declaring a predicate of the sort
P = Function('P', u , BoolSort())
phi = And(f(c) == c, P(f(c)))
solve(phi)
print('c comes out to be U!Val!0, a constant sort.')
print('The function f(x) returns c for x = c, and returns c for x != c, implying it is a constant function.')
print('The Predicate P(x) returns true for x = c and True for x != c, implying it is a constant predicate that always evaluates to True.')
print('These assignments solve our phi')
| 35
| 139
| 0.658647
|
151e6a1809cc50e20d3ff13495193f6bfd02ee8d
| 5,635
|
py
|
Python
|
tensorflow_datasets/image/dsprites.py
|
manda-creator/datasets
|
040bccda79b096dc428e66e7d0a6dece7b22b8eb
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/image/dsprites.py
|
manda-creator/datasets
|
040bccda79b096dc428e66e7d0a6dece7b22b8eb
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/image/dsprites.py
|
manda-creator/datasets
|
040bccda79b096dc428e66e7d0a6dece7b22b8eb
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dSprites dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import numpy as np
from six import moves
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@misc{dsprites17,
author = {Loic Matthey and Irina Higgins and Demis Hassabis and Alexander Lerchner},
title = {dSprites: Disentanglement testing Sprites dataset},
howpublished= {https://github.com/deepmind/dsprites-dataset/},
year = "2017",
}
"""
_URL = ("https://github.com/deepmind/dsprites-dataset/blob/master/"
"dsprites_ndarray_co1sh3sc6or40x32y32_64x64.hdf5?raw=true")
_DESCRIPTION = """\
dSprites is a dataset of 2D shapes procedurally generated from 6 ground truth
independent latent factors. These factors are *color*, *shape*, *scale*,
*rotation*, *x* and *y* positions of a sprite.
All possible combinations of these latents are present exactly once,
generating N = 737280 total images.
### Latent factor values
* Color: white
* Shape: square, ellipse, heart
* Scale: 6 values linearly spaced in [0.5, 1]
* Orientation: 40 values in [0, 2 pi]
* Position X: 32 values in [0, 1]
* Position Y: 32 values in [0, 1]
We varied one latent at a time (starting from Position Y, then Position X, etc),
and sequentially stored the images in fixed order.
Hence the order along the first dimension is fixed and allows you to map back to
the value of the latents corresponding to that image.
We chose the latents values deliberately to have the smallest step changes
while ensuring that all pixel outputs were different. No noise was added.
"""
class Dsprites(tfds.core.GeneratorBasedBuilder):
"""dSprites data set."""
VERSION = tfds.core.Version("0.1.0",
experiments={tfds.core.Experiment.S3: False})
SUPPORTED_VERSIONS = [
tfds.core.Version(
"2.0.0", "New split API (https://tensorflow.org/datasets/splits)"),
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image":
tfds.features.Image(shape=(64, 64, 1)),
"label_shape":
tfds.features.ClassLabel(num_classes=3),
"label_scale":
tfds.features.ClassLabel(num_classes=6),
"label_orientation":
tfds.features.ClassLabel(num_classes=40),
"label_x_position":
tfds.features.ClassLabel(num_classes=32),
"label_y_position":
tfds.features.ClassLabel(num_classes=32),
"value_shape":
tfds.features.Tensor(shape=[], dtype=tf.float32),
"value_scale":
tfds.features.Tensor(shape=[], dtype=tf.float32),
"value_orientation":
tfds.features.Tensor(shape=[], dtype=tf.float32),
"value_x_position":
tfds.features.Tensor(shape=[], dtype=tf.float32),
"value_y_position":
tfds.features.Tensor(shape=[], dtype=tf.float32),
}),
homepage="https://github.com/deepmind/dsprites-dataset",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
filepath = dl_manager.download(_URL)
# There is no predefined train/val/test split for this dataset.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
num_shards=1,
gen_kwargs=dict(filepath=filepath)),
]
def _generate_examples(self, filepath):
"""Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file is >100x slower and the data set is small (26.7MB). Hence, we first
# load everything into memory before yielding the samples.
image_array, class_array, values_array = _load_data(filepath)
for i, (image, classes, values) in enumerate(moves.zip(
image_array, class_array, values_array)):
record = dict(
image=np.expand_dims(image, -1),
label_shape=classes[1],
label_scale=classes[2],
label_orientation=classes[3],
label_x_position=classes[4],
label_y_position=classes[5],
value_shape=values[1],
value_scale=values[2],
value_orientation=values[3],
value_x_position=values[4],
value_y_position=values[5])
yield i, record
def _load_data(filepath):
"""Loads the images, latent classes, and latent values into Numpy arrays."""
with h5py.File(filepath, "r") as h5dataset:
image_array = np.array(h5dataset["imgs"])
class_array = np.array(h5dataset["latents"]["classes"])
values_array = np.array(h5dataset["latents"]["values"])
return image_array, class_array, values_array
| 35
| 84
| 0.673647
|
5121a4a993b5af23df9de7bf717e6bcbed9a5898
| 6,892
|
py
|
Python
|
numba/tracing.py
|
tolysz/numba
|
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
numba/tracing.py
|
tolysz/numba
|
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 1
|
2019-02-11T13:46:30.000Z
|
2019-02-11T13:46:30.000Z
|
numba/tracing.py
|
asodeur/numba
|
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
import logging
import sys
import types
import threading
import inspect
from functools import wraps
from itertools import chain
from . import config
class TLS(threading.local):
"""Use a subclass to properly initialize the TLS variables in all threads."""
def __init__(self):
self.tracing = False
self.indent = 0
tls = TLS()
def find_function_info(func, spec, args):
"""Return function meta-data in a tuple.
(name, type)"""
module = getattr(func, '__module__', None)
name = getattr(func, '__name__', None)
self = getattr(func, '__self__', None)
cname = None
if self:
cname = self.__name__
#cname = self.__class__.__name__
# Try to deduce the class' name even for unbound methods from their
# first argument, which we assume to be a class instance if named 'self'...
elif len(spec.args) and spec.args[0] == 'self':
cname = args[0].__class__.__name__
# ...or a class object if named 'cls'
elif len(spec.args) and spec.args[0] == 'cls':
cname = args[0].__name__
if name:
qname = []
if module and module != '__main__':
qname.append(module)
qname.append('.')
if cname:
qname.append(cname)
qname.append('.')
qname.append(name)
name = ''.join(qname)
return name, None
def chop(value):
MAX_SIZE = 320
s = repr(value)
if len(s) > MAX_SIZE:
return s[:MAX_SIZE] + '...' + s[-1]
else:
return s
def create_events(fname, spec, args, kwds):
values = dict()
if spec.defaults:
values = dict(zip(spec.args[-len(spec.defaults):],spec.defaults))
values.update(kwds)
values.update(list(zip(spec.args[:len(args)], args)))
positional = ['%s=%r'%(a, values.pop(a)) for a in spec.args]
anonymous = [str(a) for a in args[len(positional):]]
keywords = ['%s=%r'%(k, values[k]) for k in sorted(values.keys())]
params = ', '.join([f for f in chain(positional, anonymous, keywords) if f])
enter = ['>> ', tls.indent * ' ', fname, '(', params, ')']
leave = ['<< ', tls.indent * ' ', fname]
return enter, leave
def dotrace(*args, **kwds):
"""Function decorator to trace a function's entry and exit.
*args: categories in which to trace this function. Example usage:
@trace
def function(...):...
@trace('mycategory')
def function(...):...
"""
recursive = kwds.get('recursive', False)
def decorator(func):
spec = None
logger = logging.getLogger('trace')
def wrapper(*args, **kwds):
if not logger.isEnabledFor(logging.INFO) or tls.tracing:
return func(*args, **kwds)
fname, ftype = find_function_info(func, spec, args)
try:
tls.tracing = True
enter, leave = create_events(fname, spec, args, kwds)
try:
logger.info(''.join(enter))
tls.indent += 1
try:
try:
tls.tracing = False
result = func(*args, **kwds)
finally:
tls.tracing = True
except:
type, value, traceback = sys.exc_info()
leave.append(' => exception thrown\n\traise ')
mname = type.__module__
if mname != '__main__':
leave.append(mname)
leave.append('.')
leave.append(type.__name__)
if value.args:
leave.append('(')
leave.append(', '.join(chop(v) for v in value.args))
leave.append(')')
else:
leave.append('()')
raise
else:
if result is not None:
leave.append(' -> ')
leave.append(chop(result))
finally:
tls.indent -= 1
logger.info(''.join(leave))
finally:
tls.tracing = False
return result
# wrapper end
result = None
rewrap = lambda x: x
# Unwrap already wrapped functions
# (to be rewrapped again later)
if type(func) == classmethod:
rewrap = type(func)
# Note: 'func.__func__' only works in Python 3
func = func.__get__(True).__func__
elif type(func) == staticmethod:
rewrap = type(func)
# Note: 'func.__func__' only works in Python 3
func = func.__get__(True)
elif type(func) == property:
raise NotImplementedError
spec = inspect.getfullargspec(func)
return rewrap(wraps(func)(wrapper))
arg0 = len(args) and args[0] or None
# not supported yet...
if recursive:
raise NotImplementedError
if inspect.ismodule(arg0):
for n, f in inspect.getmembers(arg0, inspect.isfunction):
setattr(arg0, n, decorator(f))
for n, c in inspect.getmembers(arg0, inspect.isclass):
dotrace(c, *args, recursive=recursive)
elif inspect.isclass(arg0):
for n, f in inspect.getmembers(arg0, lambda x: (inspect.isfunction(x) or
inspect.ismethod(x))):
setattr(arg0, n, decorator(f))
if callable(arg0) or type(arg0) in (classmethod, staticmethod):
return decorator(arg0)
elif type(arg0) == property:
# properties combine up to three functions: 'get', 'set', 'del',
# so let's wrap them all.
pget, pset, pdel = None, None, None
if arg0.fget:
pget = decorator(arg0.fget)
if arg0.fset:
pset = decorator(arg0.fset)
if arg0.fdel:
pdel = decorator(arg0.fdel)
return property(pget, pset, pdel)
else:
return decorator
def notrace(*args, **kwds):
"""Just a no-op in case tracing is disabled."""
def decorator(func):
return func
arg0 = len(args) and args[0] or None
if callable(arg0) or type(arg0) in (classmethod, staticmethod):
return decorator(arg0)
else:
return decorator
def doevent(msg):
msg = ['== ', tls.indent * ' ', msg]
logger = logging.getLogger('trace')
logger.info(''.join(msg))
def noevent(msg):
pass
if config.TRACE:
logger = logging.getLogger('trace')
logger.setLevel(logging.INFO)
logger.handlers = [logging.StreamHandler()]
trace = dotrace
event = doevent
else:
trace = notrace
event = noevent
| 31.760369
| 84
| 0.528294
|
7b623eb2fa14e2829f5f55a697d890ed81a99b42
| 2,160
|
py
|
Python
|
code/tests/test_BGI_class.py
|
claudiavr/AIS
|
5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea
|
[
"MIT"
] | null | null | null |
code/tests/test_BGI_class.py
|
claudiavr/AIS
|
5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea
|
[
"MIT"
] | null | null | null |
code/tests/test_BGI_class.py
|
claudiavr/AIS
|
5a9b9db8377efbfba3e8bfc8bf126845ef6e9aea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Flux Calculation class tests.
This script tests the operation of the Background Image Class.
Created on Thu Apr 22 13:44:35 2021
@author: denis
"""
from BGI import Background_Image
from CHC import Concrete_Channel_1
import pytest
import numpy as np
dic = {'em_mode': 0, 'em_gain': 1, 'binn': 1,
't_exp': 1, 'preamp': 1, 'hss': 1}
@pytest.fixture
def chc1():
return Concrete_Channel_1(ccd_temp=-70,
sparc4_acquisition_mode='phot')
@pytest.fixture
def bgi(chc1):
return Background_Image(abstract_channel_creator=chc1,
ccd_operation_mode=dic,
ccd_gain=3,
bias_level=500)
# ------------------------ Initialize the class --------------------------
def test_CHC(bgi):
var = 0
if bgi.CHC:
var = 1
assert var == 1
def test_FC(bgi):
var = 0
if bgi.FC:
var = 1
assert var == 1
def test_TSR(bgi):
var = 0
if bgi.TSR:
var = 1
assert var == 1
def test_ASR(bgi):
var = 0
if bgi.ASR:
var = 1
assert var == 1
def test_em_gain(bgi):
assert bgi.em_gain == 1
def test_bin(bgi):
assert bgi.binn == 1
def test_t_exp(bgi):
assert bgi.t_exp == 1
def test_ccd_gain(bgi):
assert bgi.ccd_gain == 3
def test_bias_level(bgi):
assert bgi.bias_level == 500
# ----------------------- Calculate sky flux -----------------------------
def test_calculate_sky_flux(bgi):
bgi._calculate_sky_flux()
assert bgi.sky_flux == 100
# ----------------------- Calculate dark current -------------------------
def test_calculate_dark_current(bgi):
bgi._calculate_dark_current()
assert round(bgi.dark_current, 7) == 5.86e-5
# -------------------------Calculate Read Noise -------------------------
def test_calculate_read_noise(bgi):
bgi._calculate_read_noise(dic)
assert bgi.read_noise == 6.67
# ----------------------- Calculate Background Image -------------------------
'''Como testar ?'''
# def test_create_background_image(bgi):
# assert bgi.create_background_image() == []
| 19.285714
| 78
| 0.553704
|
5746ac81e44d496c72c48f3add10b887fab290b7
| 23
|
py
|
Python
|
examples/track/controllers/fake_cam.py
|
aquilesC/experimentor
|
1a70760912ef40f0e2aaee44ed1a1e5594fd5b45
|
[
"MIT"
] | 4
|
2020-05-15T04:07:25.000Z
|
2020-09-30T22:20:46.000Z
|
examples/track/controllers/fake_cam.py
|
aquilesC/experimentor
|
1a70760912ef40f0e2aaee44ed1a1e5594fd5b45
|
[
"MIT"
] | null | null | null |
examples/track/controllers/fake_cam.py
|
aquilesC/experimentor
|
1a70760912ef40f0e2aaee44ed1a1e5594fd5b45
|
[
"MIT"
] | null | null | null |
class FakeCam:
pass
| 11.5
| 14
| 0.695652
|
ebab431ad7189e5370c483c27ea3fe28bae956c8
| 441
|
py
|
Python
|
client/Networking/payload_configuration.py
|
punfil/Proj_PR
|
832383a4042bf234a59ef8f108e85cdd92167635
|
[
"MIT"
] | null | null | null |
client/Networking/payload_configuration.py
|
punfil/Proj_PR
|
832383a4042bf234a59ef8f108e85cdd92167635
|
[
"MIT"
] | null | null | null |
client/Networking/payload_configuration.py
|
punfil/Proj_PR
|
832383a4042bf234a59ef8f108e85cdd92167635
|
[
"MIT"
] | null | null | null |
from ctypes import *
class PayloadConfiguration(Structure):
"""
Represents the configuration that is received from the server
"""
_fields_ = [
("width", c_uint32),
("height", c_uint32),
("background_scale", c_uint32),
("player_count", c_uint32),
("player_id", c_uint32),
("tank_spawn_x", c_uint32),
("tank_spawn_y", c_uint32),
("map_number", c_uint32),
]
| 24.5
| 65
| 0.582766
|
6554f402d9f496f4aaef2ff6549dee513051644f
| 5,791
|
py
|
Python
|
lib/jrdb_devkit/detection_eval/convert_labels_to_KITTI.py
|
VisualComputingInstitute/Person_MinkUNet
|
fa39764245a022740c0a3d8c85026532fff93e74
|
[
"MIT"
] | 4
|
2021-10-15T13:40:48.000Z
|
2022-03-07T06:24:07.000Z
|
lib/jrdb_devkit/detection_eval/convert_labels_to_KITTI.py
|
VisualComputingInstitute/Person_MinkUNet
|
fa39764245a022740c0a3d8c85026532fff93e74
|
[
"MIT"
] | 2
|
2022-01-29T23:54:01.000Z
|
2022-02-14T21:00:57.000Z
|
lib/jrdb_devkit/detection_eval/convert_labels_to_KITTI.py
|
VisualComputingInstitute/Person_MinkUNet
|
fa39764245a022740c0a3d8c85026532fff93e74
|
[
"MIT"
] | 2
|
2021-10-20T13:44:24.000Z
|
2022-01-30T00:13:58.000Z
|
import argparse
import collections
import glob
import json
import os
import numpy as np
INPUT_3D_LABELS_PATH = 'labels_3d/*.json'
INPUT_2D_LABELS_PATH = 'labels_2d_stitched/*.json'
LABEL_ROOT_KEY = 'labels'
ENUM_OCCLUSION = ('Fully_visible', 'Mostly_visible', 'Severely_occluded',
'Fully_occluded')
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('-o',
'--output_kitti_dir',
default='KITTI',
help='location of the output KITTI-like labels')
ap.add_argument('-i',
'--input_jrdb_dir',
default='test_dataset/labels',
help='location of the input jrdb labels')
return ap.parse_args()
def get_labels(input_dir):
"""Read label directory
Args:
input_dir (str): Input directory of the jrdb labels.
Returns:
dict: {(seq_name, seq_idx) -> ([labels_2d, ...], [labels_3d, ...])}
"""
def _parse_label_path(path):
"""Read label path of 2D/3D labels
Args:
path (str): Input path of the jrdb labels.
Returns:
dict: {(seq_name, seq_idx) -> [labels, ...]}
"""
seq_dicts = []
for json_f in glob.glob(os.path.join(input_dir, path)):
with open(json_f) as f:
labels = json.load(f)
seq_name = os.path.basename(os.path.splitext(json_f)[0])
seq_dicts.append({
(seq_name, os.path.splitext(file_name)[0]):
label for file_name, label in labels[LABEL_ROOT_KEY].items()})
return dict(collections.ChainMap(*seq_dicts))
# Read 2D/3D label files.
labels_2d = _parse_label_path(INPUT_2D_LABELS_PATH)
labels_3d = _parse_label_path(INPUT_3D_LABELS_PATH)
# Check if all 2D/3D sequence name/index matches.
if set(labels_2d) != set(labels_3d):
raise ValueError('Input jrdb 2D and 3D sequences mismatch')
return {f: (labels_2d[f], labels_3d[f]) for f in sorted(labels_2d)}
def convert_jr2kitti(labels, output_dir):
"""Write jrdb labels to output_dir in KITTI-like format of text file.
Args:
labels (dict): {(seq_name, seq_idx) ->
([labels_2d, ...], [labels_3d, ...])}
output_dir (str): Output directory of the converted label.
"""
def _label_key(label):
return label['label_id']
# Parse all sequences of the given label.
for (seq_name, seq_idx), (labels_2d, labels_3d) in labels.items():
# Join 2D/3D labels based on the given label key.
labels_2d = {_label_key(label): label for label in labels_2d}
labels_3d = {_label_key(label): label for label in labels_3d}
label_all = {
k: (labels_2d.get(k), labels_3d.get(k))
for k in set(labels_2d).union(labels_3d)
}
# Parse each pedestrian in a given sequence.
label_lines = []
for label_2d, label_3d in label_all.values():
# Sanity check.
if label_2d is not None and label_3d is not None:
assert _label_key(label_2d) == _label_key(label_3d)
assert not (label_2d is None and label_3d is None)
# Ignore all labels else than pedestrian.
if not _label_key(label_2d or label_3d).startswith('pedestrian:'):
continue
# Initialize all label attributes
rotation_y, num_points_3d, alpha, height_3d = -1, -1, -1, -1
width_3d, length_3d, centerx_3d, centery_3d = -1, -1, -1, -1
centerz_3d, x1_2d, y1_2d, x2_2d, y2_2d = -1, -1, -1, -1, -1
truncated, occlusion = -1, -1
# Fill in values extracted from 2D label.
if label_2d is not None:
x1_2d = label_2d['box'][0]
y1_2d = label_2d['box'][1]
x2_2d = label_2d['box'][0] + label_2d['box'][2]
y2_2d = label_2d['box'][1] + label_2d['box'][3]
attributes_2d = label_2d['attributes']
truncated = int(attributes_2d['truncated'].lower() == 'true')
occlusion = ENUM_OCCLUSION.index(attributes_2d['occlusion'])
# Fill in values extracted from 3D label.
if label_3d is not None:
rotation_y = (-label_3d['box']['rot_z'] if
label_3d['box']['rot_z'] < np.pi else
2 * np.pi - label_3d['box']['rot_z'])
attributes_3d = label_3d['attributes']
num_points_3d = attributes_3d['num_points']
alpha = label_3d['observation_angle']
height_3d = label_3d['box']['h']
width_3d = label_3d['box']['w']
length_3d = label_3d['box']['l']
centerx_3d = -label_3d['box']['cy']
centery_3d = -label_3d['box']['cz'] + label_3d['box']['h'] / 2
centerz_3d = label_3d['box']['cx']
# Append a line of text in a KITTI-like format.
label_lines.append(
"Pedestrian %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s 1\n" % \
(truncated, occlusion, num_points_3d, alpha, x1_2d, y1_2d, x2_2d, y2_2d, height_3d, width_3d, length_3d, centerx_3d, centery_3d, centerz_3d, rotation_y)
)
# Write label text file to the output directory.
seq_dir = os.path.join(output_dir, seq_name)
os.makedirs(seq_dir, exist_ok=True)
with open(os.path.join(seq_dir, str(seq_idx)+'.txt'), 'w') as f:
f.writelines(label_lines)
if __name__ == "__main__":
args = parse_args()
labels = get_labels(args.input_jrdb_dir)
convert_jr2kitti(labels, args.output_kitti_dir)
| 38.350993
| 173
| 0.573303
|
2801d271f6e950429c0cba3dd24afd991cedc6d8
| 1,484
|
py
|
Python
|
studies/migrations/0045_add_compensation_description.py
|
rhodricusack/lookit-api
|
6591bda6d5e408223a0428fa7da6fd86d4c01386
|
[
"MIT"
] | 9
|
2018-06-26T17:15:27.000Z
|
2021-11-21T17:19:01.000Z
|
studies/migrations/0045_add_compensation_description.py
|
rhodricusack/lookit-api
|
6591bda6d5e408223a0428fa7da6fd86d4c01386
|
[
"MIT"
] | 496
|
2018-02-19T19:18:24.000Z
|
2022-03-31T17:01:16.000Z
|
studies/migrations/0045_add_compensation_description.py
|
rhodricusack/lookit-api
|
6591bda6d5e408223a0428fa7da6fd86d4c01386
|
[
"MIT"
] | 16
|
2018-07-06T23:35:39.000Z
|
2021-11-21T17:52:58.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-04 19:45
from __future__ import unicode_literals
from django.db import migrations, models
import project.fields.datetime_aware_jsonfield
class Migration(migrations.Migration):
dependencies = [("studies", "0044_unified_ember_app")]
operations = [
migrations.AddField(
model_name="study",
name="compensation_description",
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name="consentruling",
name="action",
field=models.CharField(
choices=[
("accepted", "accepted"),
("rejected", "rejected"),
("pending", "pending"),
],
db_index=True,
max_length=100,
),
),
migrations.AlterField(
model_name="studytype",
name="configuration",
field=project.fields.datetime_aware_jsonfield.DateTimeAwareJSONField(
default={
"metadata": {
"fields": {
"last_known_player_sha": None,
"player_repo_url": "https://github.com/lookit/ember-lookit-frameplayer",
}
},
"task_module": "studies.tasks",
}
),
),
]
| 30.285714
| 100
| 0.491914
|
77e4261e95eebf25ab76f176e034b31c9ffe5a71
| 32,490
|
py
|
Python
|
src/prefect/engine/task_runner.py
|
dshahid380/prefect
|
045e8a019610411941b4f9c514dbf1a124756577
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/engine/task_runner.py
|
dshahid380/prefect
|
045e8a019610411941b4f9c514dbf1a124756577
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/engine/task_runner.py
|
dshahid380/prefect
|
045e8a019610411941b4f9c514dbf1a124756577
|
[
"Apache-2.0"
] | null | null | null |
import collections
import copy
import itertools
import threading
from functools import partial, wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Set,
Sized,
Tuple,
Union,
)
import pendulum
import prefect
from prefect import config
from prefect.core import Edge, Task
from prefect.engine import signals
from prefect.engine.result import NoResult, Result
from prefect.engine.runner import ENDRUN, Runner, call_state_handlers
from prefect.engine.state import (
Cached,
Failed,
Mapped,
Paused,
Pending,
Resume,
Retrying,
Running,
Scheduled,
Skipped,
State,
Submitted,
Success,
TimedOut,
TriggerFailed,
)
from prefect.utilities.executors import main_thread_timeout, run_with_heartbeat
if TYPE_CHECKING:
from prefect.engine.result_handlers import ResultHandler
TaskRunnerInitializeResult = NamedTuple(
"TaskRunnerInitializeResult", [("state", State), ("context", Dict[str, Any])]
)
class TaskRunner(Runner):
"""
TaskRunners handle the execution of Tasks and determine the State of a Task
before, during and after the Task is run.
In particular, through the TaskRunner you can specify the states of any upstream dependencies
and what state the Task should be initialized with.
Args:
- task (Task): the Task to be run / executed
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the task runner instance, the old (prior) state, and the new
(current) state, with the following signature: `state_handler(TaskRunner, old_state, new_state) -> Optional[State]`;
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
- result_handler (ResultHandler, optional): the handler to use for
retrieving and storing state results during execution (if the Task doesn't already have one);
if not provided here or by the Task, will default to the one specified in your config
"""
def __init__(
self,
task: Task,
state_handlers: Iterable[Callable] = None,
result_handler: "ResultHandler" = None,
):
self.task = task
self.result_handler = (
task.result_handler
or result_handler
or prefect.engine.get_default_result_handler_class()()
)
super().__init__(state_handlers=state_handlers)
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the TaskRunner uses to call its task's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
self.logger.debug(
"Task '{name}': Handling state change from {old} to {new}".format(
name=prefect.context.get("task_full_name", self.task.name),
old=type(old_state).__name__,
new=type(new_state).__name__,
)
)
for handler in self.task.state_handlers:
new_state = handler(self.task, old_state, new_state) or new_state
return new_state
def initialize_run( # type: ignore
self, state: Optional[State], context: Dict[str, Any]
) -> TaskRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
If the task is being retried, then we retrieve the run count from the initial Retry
state. Otherwise, we assume the run count is 1. The run count is stored in context as
task_run_count.
Also, if the task is being resumed through a `Resume` state, updates context to have `resume=True`.
Args:
- state (Optional[State]): the initial state of the run
- context (Dict[str, Any]): the context to be updated with relevant information
Returns:
- tuple: a tuple of the updated state, context, upstream_states, and inputs objects
"""
state, context = super().initialize_run(state=state, context=context)
if isinstance(state, Retrying):
run_count = state.run_count + 1
else:
run_count = 1
if isinstance(state, Resume):
context.update(resume=True)
context.update(task_run_count=run_count, task_name=self.task.name)
return TaskRunnerInitializeResult(state=state, context=context)
def run(
self,
state: State = None,
upstream_states: Dict[Edge, State] = None,
context: Dict[str, Any] = None,
executor: "prefect.engine.executors.Executor" = None,
) -> State:
"""
The main endpoint for TaskRunners. Calling this method will conditionally execute
`self.task.run` with any provided inputs, assuming the upstream dependencies are in a
state which allow this Task to run.
Args:
- state (State, optional): initial `State` to begin task run from;
defaults to `Pending()`
- upstream_states (Dict[Edge, State]): a dictionary
representing the states of any tasks upstream of this one. The keys of the
dictionary should correspond to the edges leading to the task.
- context (dict, optional): prefect Context to use for execution
- executor (Executor, optional): executor to use when performing
computation; defaults to the executor specified in your prefect configuration
Returns:
- `State` object representing the final post-run state of the Task
"""
upstream_states = upstream_states or {}
context = context or {}
map_index = context.setdefault("map_index", None)
context["task_full_name"] = "{name}{index}".format(
name=self.task.name,
index=("" if map_index is None else "[{}]".format(map_index)),
)
if executor is None:
executor = prefect.engine.get_default_executor_class()()
# if mapped is true, this task run is going to generate a Mapped state. It won't
# actually run, but rather spawn children tasks to map over its inputs. We
# detect this case by checking for:
# - upstream edges that are `mapped`
# - no `map_index` (which indicates that this is the child task, not the parent)
mapped = any([e.mapped for e in upstream_states]) and map_index is None
task_inputs = {} # type: Dict[str, Any]
self.logger.info(
"Task '{name}': Starting task run...".format(name=context["task_full_name"])
)
try:
# initialize the run
state, context = self.initialize_run(state, context)
# run state transformation pipeline
with prefect.context(context):
# check to make sure the task is in a pending state
state = self.check_task_is_ready(state)
# check if the task has reached its scheduled time
state = self.check_task_reached_start_time(state)
# Tasks never run if the upstream tasks haven't finished
state = self.check_upstream_finished(
state, upstream_states=upstream_states
)
# if the task is mapped, process the mapped children and exit
if mapped:
state = self.run_mapped_task(
state=state,
upstream_states=upstream_states,
context=context,
executor=executor,
)
state = self.wait_for_mapped_task(state=state, executor=executor)
self.logger.debug(
"Task '{name}': task has been mapped; ending run.".format(
name=context["task_full_name"]
)
)
raise ENDRUN(state)
# check if any upstream tasks skipped (and if we need to skip)
state = self.check_upstream_skipped(
state, upstream_states=upstream_states
)
# retrieve task inputs from upstream and also explicitly passed inputs
task_inputs = self.get_task_inputs(
state=state, upstream_states=upstream_states
)
# check to see if the task has a cached result
state = self.check_task_is_cached(state, inputs=task_inputs)
# check if the task's trigger passes
# triggers can raise Pauses, which require task_inputs to be available for caching
# so we run this after the previous step
state = self.check_task_trigger(state, upstream_states=upstream_states)
# set the task state to running
state = self.set_task_to_running(state)
# run the task
state = self.get_task_run_state(
state, inputs=task_inputs, timeout_handler=executor.timeout_handler
)
# cache the output, if appropriate
state = self.cache_result(state, inputs=task_inputs)
# check if the task needs to be retried
state = self.check_for_retry(state, inputs=task_inputs)
# for pending signals, including retries and pauses we need to make sure the
# task_inputs are set
except (ENDRUN, signals.PrefectStateSignal) as exc:
if exc.state.is_pending():
exc.state.cached_inputs = task_inputs or {} # type: ignore
state = exc.state
if not isinstance(exc, ENDRUN) and prefect.context.get(
"raise_on_exception"
):
raise exc
except Exception as exc:
msg = "Task '{name}': unexpected error while running task: {exc}".format(
name=context["task_full_name"], exc=repr(exc)
)
self.logger.error(msg)
state = Failed(message=msg, result=exc)
if prefect.context.get("raise_on_exception"):
raise exc
self.logger.info(
"Task '{name}': finished task run for task with final state: '{state}'".format(
name=context["task_full_name"], state=type(state).__name__
)
)
return state
@call_state_handlers
def check_upstream_finished(
self, state: State, upstream_states: Dict[Edge, State]
) -> State:
"""
Checks if the upstream tasks have all finshed.
Args:
- state (State): the current state of this task
- upstream_states (Dict[Edge, Union[State, List[State]]]): the upstream states
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if upstream tasks are not finished.
"""
if not all(s.is_finished() for s in upstream_states.values()):
self.logger.debug(
"Task '{name}': not all upstream states are finished; ending run.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(state)
return state
@call_state_handlers
def check_upstream_skipped(
self, state: State, upstream_states: Dict[Edge, State]
) -> State:
"""
Checks if any of the upstream tasks have skipped.
Args:
- state (State): the current state of this task
- upstream_states (Dict[Edge, State]): the upstream states
Returns:
- State: the state of the task after running the check
"""
all_states = set() # type: Set[State]
for upstream_state in upstream_states.values():
if isinstance(upstream_state, Mapped):
all_states.update(upstream_state.map_states)
else:
all_states.add(upstream_state)
if self.task.skip_on_upstream_skip and any(s.is_skipped() for s in all_states):
self.logger.debug(
"Task '{name}': Upstream states were skipped; ending run.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(
state=Skipped(
message=(
"Upstream task was skipped; if this was not the intended "
"behavior, consider changing `skip_on_upstream_skip=False` "
"for this task."
)
)
)
return state
@call_state_handlers
def check_task_trigger(
self, state: State, upstream_states: Dict[Edge, State]
) -> State:
"""
Checks if the task's trigger function passes. If the upstream_states is empty,
then the trigger is not called.
Args:
- state (State): the current state of this task
- upstream_states (Dict[Edge, Union[State, List[State]]]): the upstream states
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the trigger raises an error
"""
all_states = set() # type: Set[State]
for upstream_state in upstream_states.values():
if isinstance(upstream_state, Mapped):
all_states.update(upstream_state.map_states)
else:
all_states.add(upstream_state)
try:
if not upstream_states:
return state
elif not self.task.trigger(all_states):
raise signals.TRIGGERFAIL(message="Trigger failed")
except signals.PrefectStateSignal as exc:
self.logger.debug(
"Task '{name}': {signal} signal raised during execution.".format(
name=prefect.context.get("task_full_name", self.task.name),
signal=type(exc).__name__,
)
)
if prefect.context.get("raise_on_exception"):
raise exc
raise ENDRUN(exc.state)
# Exceptions are trapped and turned into TriggerFailed states
except Exception as exc:
self.logger.debug(
"Task '{name}': unexpected error while evaluating task trigger: {exc}".format(
exc=repr(exc),
name=prefect.context.get("task_full_name", self.task.name),
)
)
if prefect.context.get("raise_on_exception"):
raise exc
raise ENDRUN(
TriggerFailed(
"Unexpected error while checking task trigger: {}".format(
repr(exc)
),
result=exc,
)
)
return state
@call_state_handlers
def check_task_is_ready(self, state: State) -> State:
"""
Checks to make sure the task is ready to run (Pending or Mapped).
If the state is Paused, an ENDRUN is raised.
Args:
- state (State): the current state of this task
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the task is not ready to run
"""
# the task is paused
if isinstance(state, Paused):
self.logger.debug(
"Task '{name}': task is paused; ending run.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(state)
# the task is ready
elif state.is_pending():
return state
# the task is mapped, in which case we still proceed so that the children tasks
# are generated (note that if the children tasks)
elif state.is_mapped():
self.logger.debug(
"Task '{name}': task is mapped, but run will proceed so children are generated.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
return state
# this task is already running
elif state.is_running():
self.logger.debug(
"Task '{name}': task is already running.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(state)
elif state.is_cached():
return state
# this task is already finished
elif state.is_finished():
self.logger.debug(
"Task '{name}': task is already finished.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(state)
# this task is not pending
else:
self.logger.debug(
"Task '{name}' is not ready to run or state was unrecognized ({state}).".format(
name=prefect.context.get("task_full_name", self.task.name),
state=state,
)
)
raise ENDRUN(state)
@call_state_handlers
def check_task_reached_start_time(self, state: State) -> State:
"""
Checks if a task is in a Scheduled state and, if it is, ensures that the scheduled
time has been reached. Note: Scheduled states include Retry states.
Args:
- state (State): the current state of this task
Returns:
- State: the state of the task after performing the check
Raises:
- ENDRUN: if the task is Scheduled with a future scheduled time
"""
if isinstance(state, Scheduled):
if state.start_time and state.start_time > pendulum.now("utc"):
self.logger.debug(
"Task '{name}': start_time has not been reached; ending run.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(state)
return state
def get_task_inputs(
self, state: State, upstream_states: Dict[Edge, State]
) -> Dict[str, Result]:
"""
Given the task's current state and upstream states, generates the inputs for this task.
Upstream state result values are used. If the current state has `cached_inputs`, they
will override any upstream values which are `NoResult`.
Args:
- state (State): the task's current state.
- upstream_states (Dict[Edge, State]): the upstream state_handlers
Returns:
- Dict[str, Result]: the task inputs
"""
task_inputs = {} # type: Dict[str, Result]
for edge, upstream_state in upstream_states.items():
# construct task inputs
if edge.key is not None:
task_inputs[ # type: ignore
edge.key
] = upstream_state._result.to_result() # type: ignore
if state.is_pending() and state.cached_inputs is not None: # type: ignore
task_inputs.update(
{
k: r.to_result()
for k, r in state.cached_inputs.items() # type: ignore
if task_inputs.get(k, NoResult) == NoResult
}
)
return task_inputs
@call_state_handlers
def check_task_is_cached(self, state: State, inputs: Dict[str, Result]) -> State:
"""
Checks if task is cached and whether the cache is still valid.
Args:
- state (State): the current state of this task
- inputs (Dict[str, Result]): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the task is not ready to run
"""
if state.is_cached():
assert isinstance(state, Cached) # mypy assert
if self.task.cache_validator(
state, inputs, prefect.context.get("parameters")
):
state._result = state._result.to_result()
return state
else:
self.logger.debug(
"Task '{name}': can't use cache because it "
"is now invalid".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
return Pending("Cache was invalid; ready to run.")
return state
@call_state_handlers
def run_mapped_task(
self,
state: State,
upstream_states: Dict[Edge, State],
context: Dict[str, Any],
executor: "prefect.engine.executors.Executor",
) -> State:
"""
If the task is being mapped, submits children tasks for execution. Returns a `Mapped` state.
Args:
- state (State): the current task state
- upstream_states (Dict[Edge, State]): the upstream states
- context (dict, optional): prefect Context to use for execution
- executor (Executor): executor to use when performing computation
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the current state is not `Running`
"""
map_upstream_states = []
# we don't know how long the iterables are, but we want to iterate until we reach
# the end of the shortest one
counter = itertools.count()
# infinite loop, if upstream_states has any entries
while True and upstream_states:
i = next(counter)
states = {}
try:
for edge, upstream_state in upstream_states.items():
# if the edge is not mapped over, then we simply take its state
if not edge.mapped:
states[edge] = upstream_state
# if the edge is mapped and the upstream state is Mapped, then we are mapping
# over a mapped task. In this case, we take the appropriately-indexed upstream
# state from the upstream tasks's `Mapped.map_states` array.
# Note that these "states" might actually be futures at this time; we aren't
# blocking until they finish.
elif edge.mapped and upstream_state.is_mapped():
states[edge] = upstream_state.map_states[i] # type: ignore
# Otherwise, we are mapping over the result of a "vanilla" task. In this
# case, we create a copy of the upstream state but set the result to the
# appropriately-indexed item from the upstream task's `State.result`
# array.
else:
states[edge] = copy.copy(upstream_state)
# if the current state is already Mapped, then we might be executing
# a re-run of the mapping pipeline. In that case, the upstream states
# might not have `result` attributes (as any required results could be
# in the `cached_inputs` attribute of one of the child states).
# Therefore, we only try to get a result if EITHER this task's
# state is not already mapped OR the upstream result is not None.
if not state.is_mapped() or upstream_state.result != NoResult:
states[edge].result = upstream_state.result[ # type: ignore
i
]
elif state.is_mapped():
if i >= len(state.map_states): # type: ignore
raise IndexError()
# only add this iteration if we made it through all iterables
map_upstream_states.append(states)
# index error means we reached the end of the shortest iterable
except IndexError:
break
def run_fn(
state: State, map_index: int, upstream_states: Dict[Edge, State]
) -> State:
map_context = context.copy()
map_context.update(map_index=map_index)
return self.run(
upstream_states=upstream_states,
# if we set the state here, then it will not be processed by `initialize_run()`
state=state,
context=map_context,
executor=executor,
)
# generate initial states, if available
if isinstance(state, Mapped):
initial_states = list(state.map_states) # type: List[Optional[State]]
else:
initial_states = []
initial_states.extend([None] * (len(map_upstream_states) - len(initial_states)))
# map over the initial states, a counter representing the map_index, and also the mapped upstream states
map_states = executor.map(
run_fn, initial_states, range(len(map_upstream_states)), map_upstream_states
)
return Mapped(
message="Mapped tasks submitted for execution.", map_states=map_states
)
@call_state_handlers
def wait_for_mapped_task(
self, state: State, executor: "prefect.engine.executors.Executor"
) -> State:
"""
Blocks until a mapped state's children have finished running.
Args:
- state (State): the current `Mapped` state
- executor (Executor): the run's executor
Returns:
- State: the new state
"""
if state.is_mapped():
assert isinstance(state, Mapped) # mypy assert
state.map_states = executor.wait(state.map_states)
return state
@call_state_handlers
def set_task_to_running(self, state: State) -> State:
"""
Sets the task to running
Args:
- state (State): the current state of this task
Returns:
- State: the state of the task after running the check
Raises:
- ENDRUN: if the task is not ready to run
"""
if not state.is_pending():
self.logger.debug(
"Task '{name}': can't set state to Running because it "
"isn't Pending; ending run.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(state)
return Running(message="Starting task run.")
@run_with_heartbeat
@call_state_handlers
def get_task_run_state(
self,
state: State,
inputs: Dict[str, Result],
timeout_handler: Optional[Callable],
) -> State:
"""
Runs the task and traps any signals or errors it raises.
Also checkpoints the result of a successful task, if `task.checkpoint` is `True`.
Args:
- state (State): the current state of this task
- inputs (Dict[str, Result], optional): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
- timeout_handler (Callable, optional): function for timing out
task execution, with call signature `handler(fn, *args, **kwargs)`. Defaults to
`prefect.utilities.executors.main_thread_timeout`
Returns:
- State: the state of the task after running the check
Raises:
- signals.PAUSE: if the task raises PAUSE
- ENDRUN: if the task is not ready to run
"""
if not state.is_running():
self.logger.debug(
"Task '{name}': can't run task because it's not in a "
"Running state; ending run.".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
raise ENDRUN(state)
try:
self.logger.debug(
"Task '{name}': Calling task.run() method...".format(
name=prefect.context.get("task_full_name", self.task.name)
)
)
timeout_handler = timeout_handler or main_thread_timeout
raw_inputs = {k: r.value for k, r in inputs.items()}
result = timeout_handler(
self.task.run, timeout=self.task.timeout, **raw_inputs
)
# inform user of timeout
except TimeoutError as exc:
if prefect.context.get("raise_on_exception"):
raise exc
state = TimedOut(
"Task timed out during execution.", result=exc, cached_inputs=inputs
)
return state
result = Result(value=result, result_handler=self.result_handler)
state = Success(result=result, message="Task run succeeded.")
if state.is_successful() and self.task.checkpoint is True:
state._result.store_safe_value()
return state
@call_state_handlers
def cache_result(self, state: State, inputs: Dict[str, Result]) -> State:
"""
Caches the result of a successful task, if appropriate.
Tasks are cached if:
- task.cache_for is not None
- the task state is Successful
- the task state is not Skipped (which is a subclass of Successful)
Args:
- state (State): the current state of this task
- inputs (Dict[str, Result], optional): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
Returns:
- State: the state of the task after running the check
"""
if (
state.is_successful()
and not state.is_skipped()
and self.task.cache_for is not None
):
expiration = pendulum.now("utc") + self.task.cache_for
cached_state = Cached(
result=state._result,
cached_inputs=inputs,
cached_result_expiration=expiration,
cached_parameters=prefect.context.get("parameters"),
message=state.message,
)
return cached_state
return state
@call_state_handlers
def check_for_retry(self, state: State, inputs: Dict[str, Result]) -> State:
"""
Checks to see if a FAILED task should be retried.
Args:
- state (State): the current state of this task
- inputs (Dict[str, Result], optional): a dictionary of inputs whose keys correspond
to the task's `run()` arguments.
Returns:
- State: the state of the task after running the check
"""
if state.is_failed():
run_count = prefect.context.get("task_run_count", 1)
if run_count <= self.task.max_retries:
start_time = pendulum.now("utc") + self.task.retry_delay
msg = "Retrying Task (after attempt {n} of {m})".format(
n=run_count, m=self.task.max_retries + 1
)
retry_state = Retrying(
start_time=start_time,
cached_inputs=inputs,
message=msg,
run_count=run_count,
)
return retry_state
return state
| 37.259174
| 128
| 0.568359
|
2a95db77f618cc6806336c6c4dfc1792b9707c33
| 1,141
|
py
|
Python
|
virtualscreening/vina/python/analysis/call_analysis.py
|
rodrigofaccioli/drugdesign
|
de15880af361a010729b1f4fbc8a75a2b36688a6
|
[
"Apache-2.0"
] | 3
|
2015-01-19T20:12:59.000Z
|
2019-02-21T18:43:04.000Z
|
virtualscreening/vina/python/analysis/call_analysis.py
|
rodrigofaccioli/drugdesign
|
de15880af361a010729b1f4fbc8a75a2b36688a6
|
[
"Apache-2.0"
] | 22
|
2015-01-05T16:48:54.000Z
|
2017-01-21T16:36:10.000Z
|
virtualscreening/vina/python/analysis/call_analysis.py
|
rodrigofaccioli/drugdesign
|
de15880af361a010729b1f4fbc8a75a2b36688a6
|
[
"Apache-2.0"
] | 11
|
2015-03-03T13:32:24.000Z
|
2020-04-03T11:22:24.000Z
|
#! /usr/bin/env python
"""
Routines to performe the analysis of virtual screening
These routines were developed by:
Rodrigo Antonio Faccioli - rodrigo.faccioli@usp.br / rodrigo.faccioli@gmail.com
Leandro Oliveira Bortot - leandro.bortot@usp.br / leandro.obt@gmail.com
"""
import analysis
import analysisio as ana_io
import xvg_histogram_energy_values as xvghist
def call_vs_analysis(path_analysis, path_log):
"""
Executes the analysis of Virtual Screening.
The analysis consists of:
1) Creating a txt file that contains receptor, ligand and mode sorted by energies.
2) Creating a xvg file that is a histogram of energies
Example:
>>> call_vs_analysis(path_analysis, path_log)
@param path_analysis: place where analysis files will be saved
@type path_analysis: string
@param path_log: place where log files were saved
@type path_log: string
"""
log_dict = analysis.log_files_by_energy( path_log )
log_sorted_dict = ana_io.create_file_by_sorted_energy(path_analysis, log_dict)
xvghist.create_xvg_histogram_energy_values(path_analysis, log_sorted_dict)
| 36.806452
| 85
| 0.751972
|
9ae3916199883ba8fbd9f2533445add8f14f5a84
| 5,017
|
py
|
Python
|
outputbot.py
|
ChoudhuryVishal/LOCO-TRIVIA-MINDS
|
6147125ac22712da30fea375adf43aaf8539acfc
|
[
"Apache-2.0"
] | null | null | null |
outputbot.py
|
ChoudhuryVishal/LOCO-TRIVIA-MINDS
|
6147125ac22712da30fea375adf43aaf8539acfc
|
[
"Apache-2.0"
] | 2
|
2021-03-31T19:07:35.000Z
|
2021-12-13T20:04:29.000Z
|
outputbot.py
|
ChoudhuryVishal/LOCO-TRIVIA-MINDS
|
6147125ac22712da30fea375adf43aaf8539acfc
|
[
"Apache-2.0"
] | 1
|
2019-11-25T15:58:24.000Z
|
2019-11-25T15:58:24.000Z
|
import asyncio
import discord
import data
token = data.bot_token
client = discord.Client()
number1 = 0
number2 = 0
number3 = 0
game_running = False
local_game_running = False
def update_embed():
global number1
global number2
global number3
highlighter1 = highlighter2 = highlighter3 = ' '
best_answer = '``Undefined``'
if (number1 == number2) and (number1 == number3):
highlighter1 = highlighter2 = highlighter3 = ' '
else:
if number1 == max(number1, number2, number3):
highlighter1 = '✅1️⃣'
best_answer = ':one:'
if number2 == max(number1, number2, number3):
highlighter2 = '✅2️⃣'
best_answer = ':two:'
if number3 == max(number1, number2, number3):
highlighter3 = '✅3️⃣'
best_answer = ':three:'
if data.embed is None:
data.embed = discord.Embed(title='***LOCO SEVER***', description="***__LOCO__***", color=0xC46210)
data.embed.add_field(name="Answer 1", value= highlighter1 + str(number1) + highlighter1, inline=False)
data.embed.add_field(name="Answer 2", value= highlighter2 + str(number2) + highlighter2, inline=False)
data.embed.add_field(name="Answer 3", value= highlighter3 + str(number3) + highlighter3, inline=False)
data.embed.add_field(name="Best answer:", value= best_answer , inline=True)
data.embed.set_image(url="https://cdn.discordapp.com/attachments/459865236393164810/493986426745126932/multicolours_1.gif")
data.embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/589247319094853672/601964267650023449/unnamed.png")
data.embed.set_footer(text= 'Created by Amit',icon_url="https://media.discordapp.net/attachments/599923364999593985/600700142248525835/images.jpg")
else:
data.embed = discord.Embed(title='**__LOCO SEVER__**', description="***__LOCO__***", color=0xC46210)
data.embed.add_field(name="Answer 1", value= highlighter1 + str(number1) + highlighter1, inline=False)
data.embed.add_field(name="Answer 2", value= highlighter2 + str(number2) + highlighter2, inline=False)
data.embed.add_field(name="Answer 3", value= highlighter3 + str(number3) + highlighter3, inline=False)
data.embed.add_field(name="Best answer:", value= best_answer , inline=True)
data.embed.set_image(url="https://cdn.discordapp.com/attachments/459865236393164810/493986426745126932/multicolours_1.gif")
data.embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/589247319094853672/601964267650023449/unnamed.png")
data.embed.set_footer(text= 'Created by DEV',icon_url="https://cdn.discordapp.com/attachments/555059875885875201/603338674985500683/trademark-monogram.jpg")
def update_data():
global game_running
global number1
global number2
global number3
file1 = open('data/num1.txt', 'r')
file2 = open('data/num2.txt', 'r')
file3 = open('data/num3.txt', 'r')
file4 = open('data/gamerunning.txt', 'r')
try:
number1 = float(file1.read())
number2 = float(file2.read())
number3 = float(file3.read())
game_running = int(file4.read())
game_running = True if game_running != 0 else False
except:
pass
file1.close()
file2.close()
file3.close()
file4.close()
async def check_for_updates():
global local_game_running
global game_running
global number1
global number2
global number3
await client.wait_until_ready()
while not client.is_closed:
await asyncio.sleep(1)
update_data()
if game_running and local_game_running == False:
update_embed()
data.message = await client.send_message(data.output_channel, embed=data.embed)
local_game_running = True
if game_running:
update_embed()
await client.edit_message(data.message, embed=data.embed)
if game_running == False:
local_game_running = False
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('!say') and message.channel.id == data.admin_chat:
await client.send_message(message.channel, 'What do you want to be announced?')
response1 = await client.wait_for_message(author=message.author, timeout=60)
if response1.clean_content:
vw = response1
return await client.send_message(data.output_channel, (vw.content))
if message.content.startswith('!game') and message.channel.id in data.input_hq_private:
await client.send_message(message.channel, '**__```NEXT GAME LCOCO @ 10:00 IST__**```')
@client.event
async def on_ready():
print("Bot is ready!")
client.loop.create_task(check_for_updates())
client.run(token)
| 38.592308
| 165
| 0.653578
|
9c76e19286ac9a2617dff55dd89ef56040a49bb8
| 4,144
|
py
|
Python
|
movr/movr.py
|
KenMwaura1/movr-app-1
|
0946fe28375a91c76ba1a3d1a2d4a96b598f2824
|
[
"Apache-2.0"
] | null | null | null |
movr/movr.py
|
KenMwaura1/movr-app-1
|
0946fe28375a91c76ba1a3d1a2d4a96b598f2824
|
[
"Apache-2.0"
] | null | null | null |
movr/movr.py
|
KenMwaura1/movr-app-1
|
0946fe28375a91c76ba1a3d1a2d4a96b598f2824
|
[
"Apache-2.0"
] | null | null | null |
"""
Defines the connection to the database for the MovR app.
"""
from cockroachdb.sqlalchemy import run_transaction
from sqlalchemy import create_engine
from sqlalchemy.dialects import registry
from sqlalchemy.orm import sessionmaker
from .transactions import (add_vehicle_txn, end_ride_txn, get_vehicle_txn,
get_vehicles_txn, remove_vehicle_txn,
start_ride_txn)
registry.register("cockroachdb", "cockroachdb.sqlalchemy.dialect",
"CockroachDBDialect")
class MovR:
"""
Wraps the database connection. The class methods wrap transactions.
"""
def __init__(self, conn_string, max_records=20):
"""
Establish a connection to the database, creating an Engine instance.
Arguments:
conn_string {String} -- CockroachDB connection string.
"""
self.engine = create_engine(conn_string, convert_unicode=True, echo=True)
self.connection_string = conn_string
self.max_records = max_records
def start_ride(self, vehicle_id):
"""
Wraps a `run_transaction` call that starts a ride.
Arguments:
vehicle_id {UUID} -- The vehicle's unique ID.
"""
return run_transaction(
sessionmaker(bind=self.engine),
lambda session: start_ride_txn(session, vehicle_id))
def end_ride(self, vehicle_id, new_longitude, new_latitude, new_battery):
"""
Wraps a `run_transaction` call that ends a ride.
Updates position (lat & long), battery & timestamp.
Arguments:
vehicle_id {UUID} -- The vehicle's unique ID.
new_longitude {float} -- Vehicle's new longitude coordinate
new_latitude {float} -- Vehicle's new latitude coordinate
new_battery {int} -- Vehicle's new battery reading
Returns:
{datetime} -- Timestamp of the end of the ride from the server.
"""
return run_transaction(
sessionmaker(bind=self.engine),
lambda session: end_ride_txn(session, vehicle_id, new_longitude,
new_latitude, new_battery))
def remove_vehicle(self, vehicle_id):
"""
Wraps a `run_transaction` call that "removes" a vehicle.
Arguments:
id {UUID} -- The vehicle's unique ID.
"""
return run_transaction(
sessionmaker(bind=self.engine),
lambda session: remove_vehicle_txn(session, vehicle_id))
def add_vehicle(self, vehicle_type, longitude, latitude, battery):
"""
Wraps a `run_transaction` call that adds a vehicle.
Arguments:
vehicle_type {String} -- The type of vehicle.
"""
return run_transaction(sessionmaker(bind=self.engine),
lambda session: add_vehicle_txn(session,
vehicle_type,
longitude,
latitude,
battery))
def get_vehicles(self, max_vehicles=None):
"""
Wraps a `run_transaction` call that gets all vehicle.
Returns:
A list of dictionaries containing vehicle data.
"""
if max_vehicles is None:
max_vehicles = self.max_records
return run_transaction(
sessionmaker(bind=self.engine),
lambda session: get_vehicles_txn(session, max_vehicles))
def get_vehicle(self, vehicle_id):
"""
Get a single vehicle from its id.
"""
return run_transaction(sessionmaker(bind=self.engine),
lambda session: get_vehicle_txn(session,
vehicle_id))
def show_tables(self):
"""
Returns:
List -- A list of tables in the database it's connected to.
"""
return self.engine.table_names()
| 35.418803
| 81
| 0.573118
|
41dd6882fa9a583fe570a9949b16d9ddce95a726
| 393
|
py
|
Python
|
qcloudsdkcdn/RefreshCdnUrlRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkcdn/RefreshCdnUrlRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkcdn/RefreshCdnUrlRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class RefreshCdnUrlRequest(Request):
def __init__(self):
super(RefreshCdnUrlRequest, self).__init__(
'cdn', 'qcloudcliV1', 'RefreshCdnUrl', 'cdn.api.qcloud.com')
def get_urls(self):
return self.get_params().get('urls')
def set_urls(self, urls):
self.add_param('urls', urls)
| 24.5625
| 72
| 0.648855
|
f8324896046d8b47814ebc02e95f5bd01f8df6da
| 8,045
|
py
|
Python
|
inference.py
|
ishine/tacotron2-1
|
ec155d485a62b954b91642aff10e7bcbd2025d74
|
[
"BSD-3-Clause"
] | null | null | null |
inference.py
|
ishine/tacotron2-1
|
ec155d485a62b954b91642aff10e7bcbd2025d74
|
[
"BSD-3-Clause"
] | null | null | null |
inference.py
|
ishine/tacotron2-1
|
ec155d485a62b954b91642aff10e7bcbd2025d74
|
[
"BSD-3-Clause"
] | null | null | null |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import numpy as np
import os
import sys
import time
import torch
# from apex import amp
from scipy.io.wavfile import write
from tacotron2.loader import parse_tacotron2_args
from tacotron2.loader import get_tacotron2_model
from tacotron2.text import text_to_sequence
from dllogger.logger import LOGGER
import dllogger.logger as dllg
from dllogger.autologging import log_hardware, log_args
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input-file', type=str, default="text.txt", help='full path to the input text (phareses separated by new line)')
parser.add_argument('-o', '--output', type=str, default="outputs", help='output folder to save audio (file per phrase)')
parser.add_argument('--checkpoint', type=str, default="logs/checkpoint_latest.pt", help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('-id', '--speaker-id', default=0, type=int, help='Speaker identity')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int, help='Sampling rate')
parser.add_argument('--amp-run', action='store_true', help='inference with AMP')
parser.add_argument('--log-file', type=str, default='nvlog.json', help='Filename for logging')
parser.add_argument('--include-warmup', action='store_true', help='Include warmup')
parser.add_argument('--stft-hop-length', type=int, default=275, help='STFT hop length for estimating audio length from mel size')
return parser
def load_checkpoint(checkpoint_path, model_name):
assert os.path.isfile(checkpoint_path)
model.load_state_dict(torch.load(checkpoint_path))
print(f"Loaded checkpoint: {checkpoint_path}")
return model
def load_and_setup_model(parser, args):
checkpoint_path = args.checkpoint
parser = parse_tacotron2_args(parser, add_help=False)
args, _ = parser.parse_known_args()
model = get_tacotron2_model(args, 4, is_training=False)
model.restore_checkpoint(checkpoint_path)
model.eval()
if args.amp_run:
model, _ = amp.initialize(model, [], opt_level="O3")
return model
# taken from tacotron2/data_function.py:TextMelCollate.__call__
def pad_sequences(sequences):
# Right zero-pad all one-hot text sequences to max input length
text_lengths, ids_sorted_decreasing = torch.sort(
torch.IntTensor([len(x) for x in sequences]),
dim=0, descending=True)
max_text_len = text_lengths[0]
texts = []
for i in range(len(ids_sorted_decreasing)):
text = sequences[ids_sorted_decreasing[i]]
texts.append(np.pad(text, [0, max_text_len - len(text)], mode='constant'))
texts = torch.from_numpy(np.stack(texts))
return texts, text_lengths, ids_sorted_decreasing
def prepare_input_sequence(texts, speaker_id):
sequences = [text_to_sequence(text, speaker_id, ['basic_cleaners'])[:] for text in texts]
texts, text_lengths, ids_sorted_decreasing = pad_sequences(sequences)
if torch.cuda.is_available():
texts = texts.cuda().long()
text_lengths = text_lengths.cuda().int()
else:
texts = texts.long()
text_lengths = text_lengths.int()
return texts, text_lengths, ids_sorted_decreasing
class MeasureTime():
def __init__(self, measurements, key):
self.measurements = measurements
self.key = key
def __enter__(self):
torch.cuda.synchronize()
self.t0 = time.perf_counter()
def __exit__(self, exc_type, exc_value, exc_traceback):
torch.cuda.synchronize()
self.measurements[self.key] = time.perf_counter() - self.t0
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU.
"""
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
LOGGER.set_model_name("Tacotron2_PyT")
LOGGER.set_backends([
dllg.StdOutBackend(log_file=None, logging_scope=dllg.TRAIN_ITER_SCOPE, iteration_interval=1),
dllg.JsonBackend(log_file=args.log_file, logging_scope=dllg.TRAIN_ITER_SCOPE, iteration_interval=1)
])
LOGGER.register_metric("tacotron2_frames_per_sec", metric_scope=dllg.TRAIN_ITER_SCOPE)
LOGGER.register_metric("tacotron2_latency", metric_scope=dllg.TRAIN_ITER_SCOPE)
LOGGER.register_metric("latency", metric_scope=dllg.TRAIN_ITER_SCOPE)
model = load_and_setup_model(parser, args)
log_hardware()
log_args(args)
if args.include_warmup:
sequences = torch.randint(low=0, high=148, size=(1,50),
dtype=torch.long).cuda()
text_lengths = torch.IntTensor([sequence.size(1)]).cuda().long()
for i in range(3):
with torch.no_grad():
_, mels, _, _, mel_lengths = model.infer(sequences, text_lengths)
try:
f = open(args.input_file)
sentences = list(map(lambda s : s.strip(), f.readlines()))
except UnicodeDecodeError:
f = open(args.input_file, encoding='gbk')
sentences = list(map(lambda s : s.strip(), f.readlines()))
os.makedirs(args.output, exist_ok=True)
texts = sentences[1::2]
LOGGER.iteration_start()
measurements = {}
sequences, text_lengths, ids_sorted_decreasing = prepare_input_sequence(texts, args.speaker_id)
with torch.no_grad(), MeasureTime(measurements, "tacotron2_time"):
_, mels, _, _, mel_lengths = model.infer(sequences, text_lengths)
tacotron2_infer_perf = mels.size(0)*mels.size(2)/measurements['tacotron2_time']
LOGGER.log(key="tacotron2_frames_per_sec", value=tacotron2_infer_perf)
LOGGER.log(key="tacotron2_latency", value=measurements['tacotron2_time'])
LOGGER.log(key="latency", value=(measurements['tacotron2_time']))
LOGGER.iteration_stop()
LOGGER.finish()
# recover to the original order and concatenate
ids_sorted_decreasing = ids_sorted_decreasing.numpy().tolist()
mels = [mel[:, :length] for mel, length in zip(mels, mel_lengths)]
mels = [mels[ids_sorted_decreasing.index(i)] for i in range(len(ids_sorted_decreasing))]
np.save(os.path.join(args.output, 'eval_mel.npy'), np.concatenate(mels, axis=-1), allow_pickle=False)
if __name__ == '__main__':
main()
| 41.25641
| 144
| 0.704413
|
796a0b8231a1ba5e94e84bb083a60a754bda986f
| 177
|
py
|
Python
|
pyxrd/__version.py
|
Haiqian-MA/PyXRD
|
27966da2270310ced9cce8c25c5b7c9d8de301dc
|
[
"BSD-2-Clause"
] | null | null | null |
pyxrd/__version.py
|
Haiqian-MA/PyXRD
|
27966da2270310ced9cce8c25c5b7c9d8de301dc
|
[
"BSD-2-Clause"
] | null | null | null |
pyxrd/__version.py
|
Haiqian-MA/PyXRD
|
27966da2270310ced9cce8c25c5b7c9d8de301dc
|
[
"BSD-2-Clause"
] | null | null | null |
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
__version__ = "0.8.2"
| 12.642857
| 52
| 0.677966
|
e8028334a272e47b501676afa2b552a725c2560e
| 16,096
|
py
|
Python
|
source/synthDrivers/oneCore.py
|
SWEN-712/screen-reader-brandonp728
|
e30c25ad2d10ce632fac0548696a61a872328f59
|
[
"bzip2-1.0.6"
] | null | null | null |
source/synthDrivers/oneCore.py
|
SWEN-712/screen-reader-brandonp728
|
e30c25ad2d10ce632fac0548696a61a872328f59
|
[
"bzip2-1.0.6"
] | null | null | null |
source/synthDrivers/oneCore.py
|
SWEN-712/screen-reader-brandonp728
|
e30c25ad2d10ce632fac0548696a61a872328f59
|
[
"bzip2-1.0.6"
] | null | null | null |
#synthDrivers/oneCore.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2016-2019 Tyler Spivey, NV Access Limited, James Teh, Leonard de Ruijter
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Synth driver for Windows OneCore voices.
"""
import os
import sys
from collections import OrderedDict
import ctypes
import winreg
import wave
from synthDriverHandler import SynthDriver, VoiceInfo, synthIndexReached, synthDoneSpeaking
import io
from logHandler import log
import config
import nvwave
import speech
import speechXml
import languageHandler
import winVersion
import NVDAHelper
#: The number of 100-nanosecond units in 1 second.
HUNDRED_NS_PER_SEC = 10000000 # 1000000000 ns per sec / 100 ns
ocSpeech_Callback = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int, ctypes.c_wchar_p)
class _OcSsmlConverter(speechXml.SsmlConverter):
def _convertProsody(self, command, attr, default, base=None):
if base is None:
base = default
if command.multiplier == 1 and base == default:
# Returning to synth default.
return speechXml.DelAttrCommand("prosody", attr)
else:
# Multiplication isn't supported, only addition/subtraction.
# The final value must therefore be relative to the synthesizer's default.
val = base * command.multiplier - default
return speechXml.SetAttrCommand("prosody", attr, "%d%%" % val)
def convertRateCommand(self, command):
return self._convertProsody(command, "rate", 50)
def convertPitchCommand(self, command):
return self._convertProsody(command, "pitch", 50)
def convertVolumeCommand(self, command):
return self._convertProsody(command, "volume", 100)
def convertCharacterModeCommand(self, command):
# OneCore's character speech sounds weird and doesn't support pitch alteration.
# Therefore, we don't use it.
return None
def convertLangChangeCommand(self, command):
lcid = languageHandler.localeNameToWindowsLCID(command.lang)
if lcid is languageHandler.LCID_NONE:
log.debugWarning("Invalid language: %s" % command.lang)
return None
return super(_OcSsmlConverter, self).convertLangChangeCommand(command)
class _OcPreAPI5SsmlConverter(_OcSsmlConverter):
def __init__(self, defaultLanguage, rate, pitch, volume):
super(_OcPreAPI5SsmlConverter, self).__init__(defaultLanguage)
self._rate = rate
self._pitch = pitch
self._volume = volume
def generateBalancerCommands(self, speechSequence):
commands = super(_OcPreAPI5SsmlConverter, self).generateBalancerCommands(speechSequence)
# The EncloseAllCommand from SSML must be first.
yield next(commands)
# OneCore didn't provide a way to set base prosody values before API version 5.
# Therefore, the base values need to be set using SSML.
yield self.convertRateCommand(speech.RateCommand(multiplier=1))
yield self.convertVolumeCommand(speech.VolumeCommand(multiplier=1))
yield self.convertPitchCommand(speech.PitchCommand(multiplier=1))
for command in commands:
yield command
def convertRateCommand(self, command):
return self._convertProsody(command, "rate", 50, self._rate)
def convertPitchCommand(self, command):
return self._convertProsody(command, "pitch", 50, self._pitch)
def convertVolumeCommand(self, command):
return self._convertProsody(command, "volume", 100, self._volume)
class SynthDriver(SynthDriver):
MIN_PITCH = 0.0
MAX_PITCH = 2.0
MIN_RATE = 0.5
DEFAULT_MAX_RATE = 1.5
BOOSTED_MAX_RATE = 6.0
name = "oneCore"
# Translators: Description for a speech synthesizer.
description = _("Windows OneCore voices")
supportedCommands = {
speech.IndexCommand,
speech.CharacterModeCommand,
speech.LangChangeCommand,
speech.BreakCommand,
speech.PitchCommand,
speech.RateCommand,
speech.VolumeCommand,
speech.PhonemeCommand,
}
supportedNotifications = {synthIndexReached, synthDoneSpeaking}
@classmethod
def check(cls):
# Only present this as an available synth if this is Windows 10.
return winVersion.isWin10()
def _get_supportsProsodyOptions(self):
self.supportsProsodyOptions = self._dll.ocSpeech_supportsProsodyOptions()
return self.supportsProsodyOptions
def _get_supportedSettings(self):
self.supportedSettings = settings = [
SynthDriver.VoiceSetting(),
SynthDriver.RateSetting(),
]
if self.supportsProsodyOptions:
settings.append(SynthDriver.RateBoostSetting())
settings.extend([
SynthDriver.PitchSetting(),
SynthDriver.VolumeSetting(),
])
return settings
def __init__(self):
super(SynthDriver, self).__init__()
self._dll = NVDAHelper.getHelperLocalWin10Dll()
self._dll.ocSpeech_getCurrentVoiceLanguage.restype = ctypes.c_wchar_p
# Set initial values for parameters that can't be queried when prosody is not supported.
# This initialises our cache for the value.
# When prosody is supported, the values are used for cachign reasons.
self._rate = 50
self._pitch = 50
self._volume = 100
if self.supportsProsodyOptions:
self._dll.ocSpeech_getPitch.restype = ctypes.c_double
self._dll.ocSpeech_getVolume.restype = ctypes.c_double
self._dll.ocSpeech_getRate.restype = ctypes.c_double
else:
log.debugWarning("Prosody options not supported")
self._handle = self._dll.ocSpeech_initialize()
self._callbackInst = ocSpeech_Callback(self._callback)
self._dll.ocSpeech_setCallback(self._handle, self._callbackInst)
self._dll.ocSpeech_getVoices.restype = NVDAHelper.bstrReturn
self._dll.ocSpeech_getCurrentVoiceId.restype = ctypes.c_wchar_p
self._player= None
# Initialize state.
self._queuedSpeech = []
self._wasCancelled = False
self._isProcessing = False
# Initialize the voice to a sane default
self.voice=self._getDefaultVoice()
def _maybeInitPlayer(self, wav):
"""Initialize audio playback based on the wave header provided by the synthesizer.
If the sampling rate has not changed, the existing player is used.
Otherwise, a new one is created with the appropriate parameters.
"""
samplesPerSec = wav.getframerate()
if self._player and self._player.samplesPerSec == samplesPerSec:
return
if self._player:
# Finalise any pending audio.
self._player.idle()
bytesPerSample = wav.getsampwidth()
self._bytesPerSec = samplesPerSec * bytesPerSample
self._player = nvwave.WavePlayer(channels=wav.getnchannels(),
samplesPerSec=samplesPerSec, bitsPerSample=bytesPerSample * 8,
outputDevice=config.conf["speech"]["outputDevice"])
def terminate(self):
super(SynthDriver, self).terminate()
self._dll.ocSpeech_terminate(self._handle)
# Drop the ctypes function instance for the callback,
# as it is holding a reference to an instance method, which causes a reference cycle.
self._callbackInst = None
def cancel(self):
# Set a flag to tell the callback not to push more audio.
self._wasCancelled = True
log.debug("Cancelling")
# There might be more text pending. Throw it away.
if self.supportsProsodyOptions:
# In this case however, we must keep any parameter changes.
self._queuedSpeech = [item for item in self._queuedSpeech
if not isinstance(item, str)]
else:
self._queuedSpeech = []
if self._player:
self._player.stop()
def speak(self, speechSequence):
if self.supportsProsodyOptions:
conv = _OcSsmlConverter(self.language)
else:
conv = _OcPreAPI5SsmlConverter(self.language, self._rate, self._pitch, self._volume)
text = conv.convertToXml(speechSequence)
# #7495: Calling WaveOutOpen blocks for ~100 ms if called from the callback
# when the SSML includes marks.
# We're not quite sure why.
# To work around this, open the device before queuing.
if self._player:
self._player.open()
self._queueSpeech(text)
def _queueSpeech(self, item):
self._queuedSpeech.append(item)
# We only process the queue here if it isn't already being processed.
if not self._isProcessing:
self._processQueue()
@classmethod
def _percentToParam(self, percent, min, max):
"""Overrides SynthDriver._percentToParam to return floating point parameter values.
"""
return float(percent) / 100 * (max - min) + min
def _get_pitch(self):
if not self.supportsProsodyOptions:
return self._pitch
rawPitch = self._dll.ocSpeech_getPitch(self._handle)
return self._paramToPercent(rawPitch, self.MIN_PITCH, self.MAX_PITCH)
def _set_pitch(self, pitch):
self._pitch = pitch
if not self.supportsProsodyOptions:
return
rawPitch = self._percentToParam(pitch, self.MIN_PITCH, self.MAX_PITCH)
self._queuedSpeech.append((self._dll.ocSpeech_setPitch, rawPitch))
def _get_volume(self):
if not self.supportsProsodyOptions:
return self._volume
rawVolume = self._dll.ocSpeech_getVolume(self._handle)
return int(rawVolume * 100)
def _set_volume(self, volume):
self._volume = volume
if not self.supportsProsodyOptions:
return
rawVolume = volume / 100.0
self._queuedSpeech.append((self._dll.ocSpeech_setVolume, rawVolume))
def _get_rate(self):
if not self.supportsProsodyOptions:
return self._rate
rawRate = self._dll.ocSpeech_getRate(self._handle)
maxRate = self.BOOSTED_MAX_RATE if self._rateBoost else self.DEFAULT_MAX_RATE
return self._paramToPercent(rawRate, self.MIN_RATE, maxRate)
def _set_rate(self, rate):
self._rate = rate
if not self.supportsProsodyOptions:
return
maxRate = self.BOOSTED_MAX_RATE if self._rateBoost else self.DEFAULT_MAX_RATE
rawRate = self._percentToParam(rate, self.MIN_RATE, maxRate)
self._queuedSpeech.append((self._dll.ocSpeech_setRate, rawRate))
_rateBoost = False
def _get_rateBoost(self):
return self._rateBoost
def _set_rateBoost(self, enable):
if enable == self._rateBoost:
return
# Use the cached rate to calculate the new rate with rate boost enabled.
# If we don't, getting the rate property will return the default rate when initializing the driver and applying settings.
rate = self._rate
self._rateBoost = enable
self.rate = rate
def _processQueue(self):
if not self._queuedSpeech:
# There are no more queued utterances at this point, so call idle.
# This blocks while waiting for the final chunk to play,
# so by the time this is done, there might be something queued.
log.debug("Calling idle on audio player")
self._player.idle()
synthDoneSpeaking.notify(synth=self)
while self._queuedSpeech:
item = self._queuedSpeech.pop(0)
if isinstance(item, tuple):
# Parameter change.
# Note that, if prosody otions aren't supported, this code will never be executed.
func, value = item
value = ctypes.c_double(value)
func(self._handle, value)
continue
self._wasCancelled = False
log.debug("Begin processing speech")
self._isProcessing = True
# ocSpeech_speak is async.
# It will call _callback in a background thread once done,
# which will eventually process the queue again.
self._dll.ocSpeech_speak(self._handle, item)
return
log.debug("Queue empty, done processing")
self._isProcessing = False
def _callback(self, bytes, len, markers):
if len == 0:
# The C++ code will log an error with details.
log.debugWarning("ocSpeech_speak failed!")
self._processQueue()
return
# This gets called in a background thread.
stream = io.BytesIO(ctypes.string_at(bytes, len))
wav = wave.open(stream, "r")
self._maybeInitPlayer(wav)
data = wav.readframes(wav.getnframes())
if markers:
markers = markers.split('|')
else:
markers = []
prevPos = 0
# Push audio up to each marker so we can sync the audio with the markers.
for marker in markers:
if self._wasCancelled:
break
name, pos = marker.split(':')
index = int(name)
pos = int(pos)
# pos is a time offset in 100-nanosecond units.
# Convert this to a byte offset.
# Order the equation so we don't have to do floating point.
pos = pos * self._bytesPerSec // HUNDRED_NS_PER_SEC
# Push audio up to this marker.
self._player.feed(data[prevPos:pos],
onDone=lambda index=index: synthIndexReached.notify(synth=self, index=index))
prevPos = pos
if self._wasCancelled:
log.debug("Cancelled, stopped pushing audio")
else:
self._player.feed(data[prevPos:])
log.debug("Done pushing audio")
self._processQueue()
def _getVoiceInfoFromOnecoreVoiceString(self, voiceStr):
"""
Produces an NVDA VoiceInfo object representing the given voice string from Onecore speech.
"""
# The voice string is made up of the ID, the language, and the display name.
ID,language,name=voiceStr.split(':')
language=language.replace('-','_')
return VoiceInfo(ID,name,language=language)
def _getAvailableVoices(self):
voices = OrderedDict()
# Fetch the full list of voices that Onecore speech knows about.
# Note that it may give back voices that are uninstalled or broken.
voicesStr = self._dll.ocSpeech_getVoices(self._handle).split('|')
for index,voiceStr in enumerate(voicesStr):
voiceInfo=self._getVoiceInfoFromOnecoreVoiceString(voiceStr)
# Filter out any invalid voices.
if not self._isVoiceValid(voiceInfo.id):
continue
voiceInfo.onecoreIndex=index
voices[voiceInfo.id] = voiceInfo
return voices
def _isVoiceValid(self,ID):
"""
Checks that the given voice actually exists and is valid.
It checks the Registry, and also ensures that its data files actually exist on this machine.
@param ID: the ID of the requested voice.
@type ID: string
@returns: True if the voice is valid, false otherwise.
@rtype: boolean
"""
IDParts = ID.split('\\')
rootKey = getattr(winreg, IDParts[0])
subkey = "\\".join(IDParts[1:])
try:
hkey = winreg.OpenKey(rootKey, subkey)
except WindowsError as e:
log.debugWarning("Could not open registry key %s, %r" % (ID, e))
return False
try:
langDataPath = winreg.QueryValueEx(hkey, 'langDataPath')
except WindowsError as e:
log.debugWarning("Could not open registry value 'langDataPath', %r" % e)
return False
if not langDataPath or not isinstance(langDataPath[0], str):
log.debugWarning("Invalid langDataPath value")
return False
if not os.path.isfile(os.path.expandvars(langDataPath[0])):
log.debugWarning("Missing language data file: %s" % langDataPath[0])
return False
try:
voicePath = winreg.QueryValueEx(hkey, 'voicePath')
except WindowsError as e:
log.debugWarning("Could not open registry value 'langDataPath', %r" % e)
return False
if not voicePath or not isinstance(voicePath[0],str):
log.debugWarning("Invalid voicePath value")
return False
if not os.path.isfile(os.path.expandvars(voicePath[0] + '.apm')):
log.debugWarning("Missing voice file: %s" % voicePath[0] + ".apm")
return False
return True
def _get_voice(self):
return self._dll.ocSpeech_getCurrentVoiceId(self._handle)
def _set_voice(self, id):
voices = self.availableVoices
# Try setting the requested voice
for voice in voices.values():
if voice.id == id:
self._dll.ocSpeech_setVoice(self._handle, voice.onecoreIndex)
return
raise LookupError("No such voice: %s"%id)
def _getDefaultVoice(self):
"""
Finds the best available voice that can be used as a default.
It first tries finding a voice with the same language and country as the user's configured Windows language (E.g. en_AU),
else one that matches just the language (E.g. en),
else simply the first available.
@returns: the ID of the voice, suitable for passing to self.voice for setting.
@rtype: string
"""
voices = self.availableVoices
# Try matching to NVDA language
fullLanguage=languageHandler.getWindowsLanguage()
for voice in voices.values():
if voice.language==fullLanguage:
return voice.id
baseLanguage=fullLanguage.split('_')[0]
if baseLanguage!=fullLanguage:
for voice in voices.values():
if voice.language.startswith(baseLanguage):
return voice.id
# Just use the first available
for voice in voices.values():
return voice.id
raise RuntimeError("No voices available")
def _get_language(self):
return self._dll.ocSpeech_getCurrentVoiceLanguage(self._handle)
def pause(self, switch):
if self._player:
self._player.pause(switch)
| 34.764579
| 124
| 0.750435
|
fe1ad6251b01f58dca330a07d911002c27a6196b
| 16,952
|
py
|
Python
|
sympy/discrete/tests/test_convolutions.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 2
|
2019-12-16T16:02:58.000Z
|
2020-01-20T04:07:18.000Z
|
sympy/discrete/tests/test_convolutions.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/discrete/tests/test_convolutions.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import sqrt, pi, E, exp, Rational
from sympy.core import S, symbols, I
from sympy.discrete.convolutions import (
convolution, convolution_fft, convolution_ntt, convolution_fwht,
convolution_subset, covering_product, intersecting_product)
from sympy.utilities.pytest import raises
from sympy.abc import x, y
def test_convolution():
# fft
a = [1, Rational(5, 3), sqrt(3), Rational(7, 5)]
b = [9, 5, 5, 4, 3, 2]
c = [3, 5, 3, 7, 8]
d = [1422, 6572, 3213, 5552]
assert convolution(a, b) == convolution_fft(a, b)
assert convolution(a, b, dps=9) == convolution_fft(a, b, dps=9)
assert convolution(a, d, dps=7) == convolution_fft(d, a, dps=7)
assert convolution(a, d[1:], dps=3) == convolution_fft(d[1:], a, dps=3)
# prime moduli of the form (m*2**k + 1), sequence length
# should be a divisor of 2**k
p = 7*17*2**23 + 1
q = 19*2**10 + 1
# ntt
assert convolution(d, b, prime=q) == convolution_ntt(b, d, prime=q)
assert convolution(c, b, prime=p) == convolution_ntt(b, c, prime=p)
assert convolution(d, c, prime=p) == convolution_ntt(c, d, prime=p)
raises(TypeError, lambda: convolution(b, d, dps=5, prime=q))
raises(TypeError, lambda: convolution(b, d, dps=6, prime=q))
# fwht
assert convolution(a, b, dyadic=True) == convolution_fwht(a, b)
assert convolution(a, b, dyadic=False) == convolution(a, b)
raises(TypeError, lambda: convolution(b, d, dps=2, dyadic=True))
raises(TypeError, lambda: convolution(b, d, prime=p, dyadic=True))
raises(TypeError, lambda: convolution(a, b, dps=2, dyadic=True))
raises(TypeError, lambda: convolution(b, c, prime=p, dyadic=True))
# subset
assert convolution(a, b, subset=True) == convolution_subset(a, b) == \
convolution(a, b, subset=True, dyadic=False) == \
convolution(a, b, subset=True)
assert convolution(a, b, subset=False) == convolution(a, b)
raises(TypeError, lambda: convolution(a, b, subset=True, dyadic=True))
raises(TypeError, lambda: convolution(c, d, subset=True, dps=6))
raises(TypeError, lambda: convolution(a, c, subset=True, prime=q))
def test_cyclic_convolution():
# fft
a = [1, Rational(5, 3), sqrt(3), Rational(7, 5)]
b = [9, 5, 5, 4, 3, 2]
assert convolution([1, 2, 3], [4, 5, 6], cycle=0) == \
convolution([1, 2, 3], [4, 5, 6], cycle=5) == \
convolution([1, 2, 3], [4, 5, 6])
assert convolution([1, 2, 3], [4, 5, 6], cycle=3) == [31, 31, 28]
a = [Rational(1, 3), Rational(7, 3), Rational(5, 9), Rational(2, 7), Rational(5, 8)]
b = [Rational(3, 5), Rational(4, 7), Rational(7, 8), Rational(8, 9)]
assert convolution(a, b, cycle=0) == \
convolution(a, b, cycle=len(a) + len(b) - 1)
assert convolution(a, b, cycle=4) == [Rational(87277, 26460), Rational(30521, 11340),
Rational(11125, 4032), Rational(3653, 1080)]
assert convolution(a, b, cycle=6) == [Rational(20177, 20160), Rational(676, 315), Rational(47, 24),
Rational(3053, 1080), Rational(16397, 5292), Rational(2497, 2268)]
assert convolution(a, b, cycle=9) == \
convolution(a, b, cycle=0) + [S.Zero]
# ntt
a = [2313, 5323532, S(3232), 42142, 42242421]
b = [S(33456), 56757, 45754, 432423]
assert convolution(a, b, prime=19*2**10 + 1, cycle=0) == \
convolution(a, b, prime=19*2**10 + 1, cycle=8) == \
convolution(a, b, prime=19*2**10 + 1)
assert convolution(a, b, prime=19*2**10 + 1, cycle=5) == [96, 17146, 2664,
15534, 3517]
assert convolution(a, b, prime=19*2**10 + 1, cycle=7) == [4643, 3458, 1260,
15534, 3517, 16314, 13688]
assert convolution(a, b, prime=19*2**10 + 1, cycle=9) == \
convolution(a, b, prime=19*2**10 + 1) + [0]
# fwht
u, v, w, x, y = symbols('u v w x y')
p, q, r, s, t = symbols('p q r s t')
c = [u, v, w, x, y]
d = [p, q, r, s, t]
assert convolution(a, b, dyadic=True, cycle=3) == \
[2499522285783, 19861417974796, 4702176579021]
assert convolution(a, b, dyadic=True, cycle=5) == [2718149225143,
2114320852171, 20571217906407, 246166418903, 1413262436976]
assert convolution(c, d, dyadic=True, cycle=4) == \
[p*u + p*y + q*v + r*w + s*x + t*u + t*y,
p*v + q*u + q*y + r*x + s*w + t*v,
p*w + q*x + r*u + r*y + s*v + t*w,
p*x + q*w + r*v + s*u + s*y + t*x]
assert convolution(c, d, dyadic=True, cycle=6) == \
[p*u + q*v + r*w + r*y + s*x + t*w + t*y,
p*v + q*u + r*x + s*w + s*y + t*x,
p*w + q*x + r*u + s*v,
p*x + q*w + r*v + s*u,
p*y + t*u,
q*y + t*v]
# subset
assert convolution(a, b, subset=True, cycle=7) == [18266671799811,
178235365533, 213958794, 246166418903, 1413262436976,
2397553088697, 1932759730434]
assert convolution(a[1:], b, subset=True, cycle=4) == \
[178104086592, 302255835516, 244982785880, 3717819845434]
assert convolution(a, b[:-1], subset=True, cycle=6) == [1932837114162,
178235365533, 213958794, 245166224504, 1413262436976, 2397553088697]
assert convolution(c, d, subset=True, cycle=3) == \
[p*u + p*x + q*w + r*v + r*y + s*u + t*w,
p*v + p*y + q*u + s*y + t*u + t*x,
p*w + q*y + r*u + t*v]
assert convolution(c, d, subset=True, cycle=5) == \
[p*u + q*y + t*v,
p*v + q*u + r*y + t*w,
p*w + r*u + s*y + t*x,
p*x + q*w + r*v + s*u,
p*y + t*u]
raises(ValueError, lambda: convolution([1, 2, 3], [4, 5, 6], cycle=-1))
def test_convolution_fft():
assert all(convolution_fft([], x, dps=y) == [] for x in ([], [1]) for y in (None, 3))
assert convolution_fft([1, 2, 3], [4, 5, 6]) == [4, 13, 28, 27, 18]
assert convolution_fft([1], [5, 6, 7]) == [5, 6, 7]
assert convolution_fft([1, 3], [5, 6, 7]) == [5, 21, 25, 21]
assert convolution_fft([1 + 2*I], [2 + 3*I]) == [-4 + 7*I]
assert convolution_fft([1 + 2*I, 3 + 4*I, 5 + Rational(3, 5)*I], [Rational(2, 5) + Rational(4, 7)*I]) == \
[Rational(-26, 35) + I*Rational(48, 35), Rational(-38, 35) + I*Rational(116, 35), Rational(58, 35) + I*Rational(542, 175)]
assert convolution_fft([Rational(3, 4), Rational(5, 6)], [Rational(7, 8), Rational(1, 3), Rational(2, 5)]) == \
[Rational(21, 32), Rational(47, 48), Rational(26, 45), Rational(1, 3)]
assert convolution_fft([Rational(1, 9), Rational(2, 3), Rational(3, 5)], [Rational(2, 5), Rational(3, 7), Rational(4, 9)]) == \
[Rational(2, 45), Rational(11, 35), Rational(8152, 14175), Rational(523, 945), Rational(4, 15)]
assert convolution_fft([pi, E, sqrt(2)], [sqrt(3), 1/pi, 1/E]) == \
[sqrt(3)*pi, 1 + sqrt(3)*E, E/pi + pi*exp(-1) + sqrt(6),
sqrt(2)/pi + 1, sqrt(2)*exp(-1)]
assert convolution_fft([2321, 33123], [5321, 6321, 71323]) == \
[12350041, 190918524, 374911166, 2362431729]
assert convolution_fft([312313, 31278232], [32139631, 319631]) == \
[10037624576503, 1005370659728895, 9997492572392]
raises(TypeError, lambda: convolution_fft(x, y))
raises(ValueError, lambda: convolution_fft([x, y], [y, x]))
def test_convolution_ntt():
# prime moduli of the form (m*2**k + 1), sequence length
# should be a divisor of 2**k
p = 7*17*2**23 + 1
q = 19*2**10 + 1
r = 2*500000003 + 1 # only for sequences of length 1 or 2
# s = 2*3*5*7 # composite modulus
assert all(convolution_ntt([], x, prime=y) == [] for x in ([], [1]) for y in (p, q, r))
assert convolution_ntt([2], [3], r) == [6]
assert convolution_ntt([2, 3], [4], r) == [8, 12]
assert convolution_ntt([32121, 42144, 4214, 4241], [32132, 3232, 87242], p) == [33867619,
459741727, 79180879, 831885249, 381344700, 369993322]
assert convolution_ntt([121913, 3171831, 31888131, 12], [17882, 21292, 29921, 312], q) == \
[8158, 3065, 3682, 7090, 1239, 2232, 3744]
assert convolution_ntt([12, 19, 21, 98, 67], [2, 6, 7, 8, 9], p) == \
convolution_ntt([12, 19, 21, 98, 67], [2, 6, 7, 8, 9], q)
assert convolution_ntt([12, 19, 21, 98, 67], [21, 76, 17, 78, 69], p) == \
convolution_ntt([12, 19, 21, 98, 67], [21, 76, 17, 78, 69], q)
raises(ValueError, lambda: convolution_ntt([2, 3], [4, 5], r))
raises(ValueError, lambda: convolution_ntt([x, y], [y, x], q))
raises(TypeError, lambda: convolution_ntt(x, y, p))
def test_convolution_fwht():
assert convolution_fwht([], []) == []
assert convolution_fwht([], [1]) == []
assert convolution_fwht([1, 2, 3], [4, 5, 6]) == [32, 13, 18, 27]
assert convolution_fwht([Rational(5, 7), Rational(6, 8), Rational(7, 3)], [2, 4, Rational(6, 7)]) == \
[Rational(45, 7), Rational(61, 14), Rational(776, 147), Rational(419, 42)]
a = [1, Rational(5, 3), sqrt(3), Rational(7, 5), 4 + 5*I]
b = [94, 51, 53, 45, 31, 27, 13]
c = [3 + 4*I, 5 + 7*I, 3, Rational(7, 6), 8]
assert convolution_fwht(a, b) == [53*sqrt(3) + 366 + 155*I,
45*sqrt(3) + Rational(5848, 15) + 135*I,
94*sqrt(3) + Rational(1257, 5) + 65*I,
51*sqrt(3) + Rational(3974, 15),
13*sqrt(3) + 452 + 470*I,
Rational(4513, 15) + 255*I,
31*sqrt(3) + Rational(1314, 5) + 265*I,
27*sqrt(3) + Rational(3676, 15) + 225*I]
assert convolution_fwht(b, c) == [Rational(1993, 2) + 733*I, Rational(6215, 6) + 862*I,
Rational(1659, 2) + 527*I, Rational(1988, 3) + 551*I, 1019 + 313*I, Rational(3955, 6) + 325*I,
Rational(1175, 2) + 52*I, Rational(3253, 6) + 91*I]
assert convolution_fwht(a[3:], c) == [Rational(-54, 5) + I*Rational(293, 5), -1 + I*Rational(204, 5),
Rational(133, 15) + I*Rational(35, 6), Rational(409, 30) + 15*I, Rational(56, 5), 32 + 40*I, 0, 0]
u, v, w, x, y, z = symbols('u v w x y z')
assert convolution_fwht([u, v], [x, y]) == [u*x + v*y, u*y + v*x]
assert convolution_fwht([u, v, w], [x, y]) == \
[u*x + v*y, u*y + v*x, w*x, w*y]
assert convolution_fwht([u, v, w], [x, y, z]) == \
[u*x + v*y + w*z, u*y + v*x, u*z + w*x, v*z + w*y]
raises(TypeError, lambda: convolution_fwht(x, y))
raises(TypeError, lambda: convolution_fwht(x*y, u + v))
def test_convolution_subset():
assert convolution_subset([], []) == []
assert convolution_subset([], [Rational(1, 3)]) == []
assert convolution_subset([6 + I*Rational(3, 7)], [Rational(2, 3)]) == [4 + I*Rational(2, 7)]
a = [1, Rational(5, 3), sqrt(3), 4 + 5*I]
b = [64, 71, 55, 47, 33, 29, 15]
c = [3 + I*Rational(2, 3), 5 + 7*I, 7, Rational(7, 5), 9]
assert convolution_subset(a, b) == [64, Rational(533, 3), 55 + 64*sqrt(3),
71*sqrt(3) + Rational(1184, 3) + 320*I, 33, 84,
15 + 33*sqrt(3), 29*sqrt(3) + 157 + 165*I]
assert convolution_subset(b, c) == [192 + I*Rational(128, 3), 533 + I*Rational(1486, 3),
613 + I*Rational(110, 3), Rational(5013, 5) + I*Rational(1249, 3),
675 + 22*I, 891 + I*Rational(751, 3),
771 + 10*I, Rational(3736, 5) + 105*I]
assert convolution_subset(a, c) == convolution_subset(c, a)
assert convolution_subset(a[:2], b) == \
[64, Rational(533, 3), 55, Rational(416, 3), 33, 84, 15, 25]
assert convolution_subset(a[:2], c) == \
[3 + I*Rational(2, 3), 10 + I*Rational(73, 9), 7, Rational(196, 15), 9, 15, 0, 0]
u, v, w, x, y, z = symbols('u v w x y z')
assert convolution_subset([u, v, w], [x, y]) == [u*x, u*y + v*x, w*x, w*y]
assert convolution_subset([u, v, w, x], [y, z]) == \
[u*y, u*z + v*y, w*y, w*z + x*y]
assert convolution_subset([u, v], [x, y, z]) == \
convolution_subset([x, y, z], [u, v])
raises(TypeError, lambda: convolution_subset(x, z))
raises(TypeError, lambda: convolution_subset(Rational(7, 3), u))
def test_covering_product():
assert covering_product([], []) == []
assert covering_product([], [Rational(1, 3)]) == []
assert covering_product([6 + I*Rational(3, 7)], [Rational(2, 3)]) == [4 + I*Rational(2, 7)]
a = [1, Rational(5, 8), sqrt(7), 4 + 9*I]
b = [66, 81, 95, 49, 37, 89, 17]
c = [3 + I*Rational(2, 3), 51 + 72*I, 7, Rational(7, 15), 91]
assert covering_product(a, b) == [66, Rational(1383, 8), 95 + 161*sqrt(7),
130*sqrt(7) + 1303 + 2619*I, 37,
Rational(671, 4), 17 + 54*sqrt(7),
89*sqrt(7) + Rational(4661, 8) + 1287*I]
assert covering_product(b, c) == [198 + 44*I, 7740 + 10638*I,
1412 + I*Rational(190, 3), Rational(42684, 5) + I*Rational(31202, 3),
9484 + I*Rational(74, 3), 22163 + I*Rational(27394, 3),
10621 + I*Rational(34, 3), Rational(90236, 15) + 1224*I]
assert covering_product(a, c) == covering_product(c, a)
assert covering_product(b, c[:-1]) == [198 + 44*I, 7740 + 10638*I,
1412 + I*Rational(190, 3), Rational(42684, 5) + I*Rational(31202, 3),
111 + I*Rational(74, 3), 6693 + I*Rational(27394, 3),
429 + I*Rational(34, 3), Rational(23351, 15) + 1224*I]
assert covering_product(a, c[:-1]) == [3 + I*Rational(2, 3),
Rational(339, 4) + I*Rational(1409, 12), 7 + 10*sqrt(7) + 2*sqrt(7)*I/3,
-403 + 772*sqrt(7)/15 + 72*sqrt(7)*I + I*Rational(12658, 15)]
u, v, w, x, y, z = symbols('u v w x y z')
assert covering_product([u, v, w], [x, y]) == \
[u*x, u*y + v*x + v*y, w*x, w*y]
assert covering_product([u, v, w, x], [y, z]) == \
[u*y, u*z + v*y + v*z, w*y, w*z + x*y + x*z]
assert covering_product([u, v], [x, y, z]) == \
covering_product([x, y, z], [u, v])
raises(TypeError, lambda: covering_product(x, z))
raises(TypeError, lambda: covering_product(Rational(7, 3), u))
def test_intersecting_product():
assert intersecting_product([], []) == []
assert intersecting_product([], [Rational(1, 3)]) == []
assert intersecting_product([6 + I*Rational(3, 7)], [Rational(2, 3)]) == [4 + I*Rational(2, 7)]
a = [1, sqrt(5), Rational(3, 8) + 5*I, 4 + 7*I]
b = [67, 51, 65, 48, 36, 79, 27]
c = [3 + I*Rational(2, 5), 5 + 9*I, 7, Rational(7, 19), 13]
assert intersecting_product(a, b) == [195*sqrt(5) + Rational(6979, 8) + 1886*I,
178*sqrt(5) + 520 + 910*I, Rational(841, 2) + 1344*I,
192 + 336*I, 0, 0, 0, 0]
assert intersecting_product(b, c) == [Rational(128553, 19) + I*Rational(9521, 5),
Rational(17820, 19) + 1602*I, Rational(19264, 19), Rational(336, 19), 1846, 0, 0, 0]
assert intersecting_product(a, c) == intersecting_product(c, a)
assert intersecting_product(b[1:], c[:-1]) == [Rational(64788, 19) + I*Rational(8622, 5),
Rational(12804, 19) + 1152*I, Rational(11508, 19), Rational(252, 19), 0, 0, 0, 0]
assert intersecting_product(a, c[:-2]) == \
[Rational(-99, 5) + 10*sqrt(5) + 2*sqrt(5)*I/5 + I*Rational(3021, 40),
-43 + 5*sqrt(5) + 9*sqrt(5)*I + 71*I, Rational(245, 8) + 84*I, 0]
u, v, w, x, y, z = symbols('u v w x y z')
assert intersecting_product([u, v, w], [x, y]) == \
[u*x + u*y + v*x + w*x + w*y, v*y, 0, 0]
assert intersecting_product([u, v, w, x], [y, z]) == \
[u*y + u*z + v*y + w*y + w*z + x*y, v*z + x*z, 0, 0]
assert intersecting_product([u, v], [x, y, z]) == \
intersecting_product([x, y, z], [u, v])
raises(TypeError, lambda: intersecting_product(x, z))
raises(TypeError, lambda: intersecting_product(u, Rational(8, 3)))
| 46.443836
| 134
| 0.512683
|
bb201f4ab12a75dc3488a50f8605fdc80337dcf1
| 330
|
py
|
Python
|
main.py
|
omniaura/omnisynth-dev
|
ce9e3cfe1302ff53c8adac4723a4d7df15350caa
|
[
"Apache-2.0"
] | 1
|
2022-01-02T03:16:08.000Z
|
2022-01-02T03:16:08.000Z
|
main.py
|
omniaura/omnisynth-dev
|
ce9e3cfe1302ff53c8adac4723a4d7df15350caa
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
omniaura/omnisynth-dev
|
ce9e3cfe1302ff53c8adac4723a4d7df15350caa
|
[
"Apache-2.0"
] | null | null | null |
'''
Boots the OmniSynth Server for API calls and starts an instance of SuperCollider (OmniSynth DSP)
'''
import gevent.subprocess as subprocess
# starts the server as a subprocess
def start_server():
subprocess.call(['python', 'modules/start_server.py'])
if __name__ == "__main__":
start_server()
| 23.571429
| 97
| 0.690909
|
9d7ca135fe3ecd99f18dc7ec5b4dbbba46a9813a
| 1,560
|
py
|
Python
|
adafruit_display_notification/apple.py
|
tannewt/Adafruit_CircuitPython_Display_Notification
|
c881874fc891a3a119f073f6665ba71593041a5a
|
[
"MIT"
] | null | null | null |
adafruit_display_notification/apple.py
|
tannewt/Adafruit_CircuitPython_Display_Notification
|
c881874fc891a3a119f073f6665ba71593041a5a
|
[
"MIT"
] | null | null | null |
adafruit_display_notification/apple.py
|
tannewt/Adafruit_CircuitPython_Display_Notification
|
c881874fc891a3a119f073f6665ba71593041a5a
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2019 Scott Shawcroft for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from . import PlainNotification
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Display_Notification.git"
def create_notification_widget(notification, max_width, max_height, *, color_count=2**16):
"""Creates a notification widget for the given Apple notification."""
return PlainNotification(notification.title, notification.message, max_width, max_height)
| 50.322581
| 93
| 0.783333
|
dd688bedfe601d33525df6b0bd2993076a875c6d
| 32,184
|
py
|
Python
|
selfdrive/controls/controlsd.py
|
baldwalker/openpilot-1
|
c85e482613ce7fc4ea9523c3d0cd8e01679ab5ca
|
[
"MIT"
] | 1
|
2021-07-28T01:46:34.000Z
|
2021-07-28T01:46:34.000Z
|
selfdrive/controls/controlsd.py
|
baldwalker/openpilot-1
|
c85e482613ce7fc4ea9523c3d0cd8e01679ab5ca
|
[
"MIT"
] | null | null | null |
selfdrive/controls/controlsd.py
|
baldwalker/openpilot-1
|
c85e482613ce7fc4ea9523c3d0cd8e01679ab5ca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import math
from cereal import car, log
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
from selfdrive.car.hyundai.scc_smoother import SccSmoother
from selfdrive.ntune import ntune_common_get, ntune_common_enabled, ntune_scc_get
LDW_MIN_SPEED = 20 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.01
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
hw_type = messaging.recv_one(self.sm.sock['pandaState']).pandaState.pandaType
has_relay = hw_type in [PandaType.blackPanda, PandaType.uno, PandaType.dos]
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay)
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or \
self.CP.fingerprintSource == car.CarParams.FingerprintSource.can
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.v_target = 0.0
self.a_target = 0.0
# scc smoother
self.is_cruise_enabled = False
self.applyMaxSpeed = 0
self.clu_speed_ms = 0.
self.apply_accel = 0.
self.fused_accel = 0.
self.lead_drel = 0.
self.aReqValue = 0.
self.aReqValueMin = 0.
self.aReqValueMax = 0.
self.sccStockCamStatus = 0
self.sccStockCamAct = 0
self.left_lane_visible = False
self.right_lane_visible = False
self.wide_camera = TICI and params.get_bool('EnableWideCamera')
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, self.CP.fuzzyFingerprint,
len(self.CP.carFw) > 0)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed and car_recognized and not self.CP.dashcamOnly:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
self.events.add_from_msg(self.sm['longitudinalPlan'].eventsDEPRECATED)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
if max(cpus, default=0) > 95:
self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0:
self.events.add(EventName.autoLaneChange)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or not CS.canValid:
self.events.add(EventName.canError)
safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel or self.sm['pandaState'].safetyParam != self.CP.safetyParam
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaState"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.turningIndicatorOn in self.events.names):
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw or (self.enabled and self.sm['modelV2'].meta.hardBrakePredicted):
self.events.add(EventName.fcw)
if TICI:
logs = messaging.drain_sock(self.log_sock, wait_for_one=False)
messages = []
for m in logs:
try:
messages.append(m.androidLog.message)
except UnicodeDecodeError:
pass
for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]:
for m in messages:
if err not in m:
continue
csid = m.split("CSID:")[-1].split(" ")[0]
evt = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError,
"2": EventName.driverCameraError}.get(csid, None)
if evt is not None:
self.events.add(evt)
# TODO: fix simulator
if not SIMULATION:
#if not NOSENSOR:
# if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
#if CS.brakePressed and v_future >= STARTING_TARGET_SPEED \
# and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
# self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 3.5):
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
self.CP.pcmCruise = self.CI.CP.pcmCruise
#if not self.CP.pcmCruise:
# self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled, self.is_metric)
#elif self.CP.pcmCruise and CS.cruiseState.enabled:
# self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
SccSmoother.update_cruise_buttons(self, CS, self.CP.openpilotLongitudinalControl)
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 50 # 0.5s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
if ntune_common_enabled('useLiveSteerRatio'):
sr = max(params.steerRatio, 0.1)
else:
sr = max(ntune_common_get('steerRatio'), 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not CS.cruiseState.enabledAcc:
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
# Gas/Brake PID loop
actuators.gas, actuators.brake, self.v_target, self.a_target = self.LoC.update(self.active and CS.cruiseState.enabledAcc,
CS, self.CP, long_plan)
# Steering PID loop and lateral MPC
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params,
desired_curvature, desired_curvature_rate)
actuators.steeringAngleDeg = (math.degrees(self.VM.get_steer_from_curvature(-desired_curvature, CS.vEgo)) * 180) / 200
actuators.steeringAngleDeg += params.angleOffsetDeg
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
gb = clip(self.sm['testJoystick'].axes[0], -1, 1)
actuators.gas, actuators.brake = max(gb, 0), max(-gb, 0)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
self.steerSaturated = True
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
if not math.isfinite(getattr(actuators, p)):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = self.CP.pcmCruise and not self.enabled and CS.cruiseState.enabled
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
# TODO remove car specific stuff in controls
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.pcmCruise else 0.0)
CC.cruiseControl.accelOverride = float(self.CI.calc_accel_override(CS.aEgo, self.a_target,
CS.vEgo, self.v_target))
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
if self.sm.frame % 100 == 0:
self.right_lane_visible = right_lane_visible
self.left_lane_visible = left_lane_visible
CC.hudControl.rightLaneVisible = self.right_lane_visible
CC.hudControl.leftLaneVisible = self.left_lane_visible
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
cameraOffset = ntune_common_get("cameraOffset") + 0.08 if self.wide_camera else ntune_common_get("cameraOffset")
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + cameraOffset))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - cameraOffset))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
can_sends = self.CI.apply(CC, self)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.applyMaxSpeed if self.CP.openpilotLongitudinalControl else self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
controlsState.angleSteers = steer_angle_without_offset * CV.RAD_TO_DEG
controlsState.cluSpeedMs = self.clu_speed_ms
controlsState.applyAccel = self.apply_accel
controlsState.aReqValue = self.aReqValue
controlsState.aReqValueMin = self.aReqValueMin
controlsState.aReqValueMax = self.aReqValueMax
controlsState.sccStockCamAct = self.sccStockCamAct
controlsState.sccStockCamStatus = self.sccStockCamStatus
controlsState.steerRatio = self.VM.sR
controlsState.steerRateCost = ntune_common_get('steerRateCost')
controlsState.steerActuatorDelay = ntune_common_get('steerActuatorDelay')
controlsState.sccGasFactor = ntune_scc_get('sccGasFactor')
controlsState.sccBrakeFactor = ntune_scc_get('sccBrakeFactor')
controlsState.sccCurvatureFactor = ntune_scc_get('sccCurvatureFactor')
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| 42.459103
| 138
| 0.703859
|
59bdf952ecf56f6b5e2c5e89f6068302650eb90b
| 1,388
|
py
|
Python
|
poom/records.py
|
xDiaym/poom
|
8f0e59bc0acc39b77fe761f9c1e2386e37bc6d78
|
[
"MIT"
] | 3
|
2022-01-01T10:28:17.000Z
|
2022-02-06T19:06:24.000Z
|
poom/records.py
|
xDiaym/poom
|
8f0e59bc0acc39b77fe761f9c1e2386e37bc6d78
|
[
"MIT"
] | 4
|
2022-01-09T13:01:20.000Z
|
2022-02-11T14:55:01.000Z
|
poom/records.py
|
xDiaym/poom
|
8f0e59bc0acc39b77fe761f9c1e2386e37bc6d78
|
[
"MIT"
] | null | null | null |
import json
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Optional
@dataclass
class Record:
game_time: float # UNIX delta time
health: float
def merge_best(self, other: "Record") -> None:
self.game_time = min(self.game_time, other.game_time)
self.health = max(self.health, other.health)
class JsonRecordEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, Record):
return {
"game_time": o.game_time, # Time in s
"health": o.health,
}
return super().default(o)
def has_record(path: Path) -> bool:
return path.exists()
def load_record(path: Path) -> Optional[Record]:
if not has_record(path):
return None
with open(path, "r") as fp:
raw_record = json.load(fp)
return Record(
game_time=raw_record["game_time"],
health=raw_record["health"],
)
def save_record(path: Path, record: Record) -> None:
with open(path, "w") as fp:
json.dump(record, fp, cls=JsonRecordEncoder)
def update_record(path: Path, new_record: Record) -> None:
if has_record(path): # TODO: TypeGuard
old_record = load_record(path)
assert old_record, "Old record can't be None here."
new_record.merge_best(old_record)
save_record(path, new_record)
| 26.188679
| 61
| 0.636888
|
05e6c5fb6a589448ec23fa6709b7514a3013b16f
| 1,266
|
py
|
Python
|
recipe_scrapers/cookstr.py
|
IsaiahData/recipe-scrapers
|
d9ffc2e066af1d82f244806cff6f7bdd1a5a38d0
|
[
"MIT"
] | 1
|
2021-01-18T01:43:36.000Z
|
2021-01-18T01:43:36.000Z
|
recipe_scrapers/cookstr.py
|
IsaiahData/recipe-scrapers
|
d9ffc2e066af1d82f244806cff6f7bdd1a5a38d0
|
[
"MIT"
] | null | null | null |
recipe_scrapers/cookstr.py
|
IsaiahData/recipe-scrapers
|
d9ffc2e066af1d82f244806cff6f7bdd1a5a38d0
|
[
"MIT"
] | null | null | null |
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string
class Cookstr(AbstractScraper):
@classmethod
def host(self):
return 'cookstr.com'
def title(self):
return normalize_string(self.soup.find(
'h1',
{'class': 'articleHeadline'}
).get_text())
def total_time(self):
sections = self.soup.findAll(
'div',
{'class': 'articleAttrSection'}
)
total_time = 0
for section in sections:
time = section.find(text='Total Time')
if time:
total_time += get_minutes(time.parent.parent)
return total_time
def ingredients(self):
ingredients = self.soup.find(
'div',
{'class': "recipeIngredients"}
)
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients.findAll('li')
]
def instructions(self):
instructions = self.soup.find(
'div',
{'class': 'stepByStepInstructionsDiv'}
)
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions.findAll('p')
])
| 25.32
| 61
| 0.553712
|
b81f9fa904a1956fb6e1a16659a824608ab7f516
| 1,182
|
py
|
Python
|
RecoTracker/IterativeTracking/python/iterativeTkUtils.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 1
|
2020-08-12T08:37:04.000Z
|
2020-08-12T08:37:04.000Z
|
RecoTracker/IterativeTracking/python/iterativeTkUtils.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
RecoTracker/IterativeTracking/python/iterativeTkUtils.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
# This file provides additional helpers for getting information of
# iterations in automated way.
import RecoTracker.IterativeTracking.iterativeTkConfig as _cfg
import RecoTracker.IterativeTracking.iterativeTk_cff as _iterativeTk_cff
def getMVASelectors(postfix):
# assume naming convention that the iteration name (when first
# letter in lower case) is the selector name
ret = {}
for iterName, seqName in _cfg.iterationAlgos(postfix, includeSequenceName=True):
if hasattr(_iterativeTk_cff, iterName):
mod = getattr(_iterativeTk_cff, iterName)
seq = getattr(_iterativeTk_cff, seqName)
# Ignore iteration if the MVA selector module is not in the sequence
if not seq.contains(mod):
continue
typeName = mod._TypedParameterizable__type
classifiers = []
if typeName == "ClassifierMerger":
classifiers = mod.inputClassifiers.value()
elif "TrackMVAClassifier" in typeName:
classifiers = [iterName]
if len(classifiers) > 0:
ret[iterName] = (iterName+"Tracks", classifiers)
return ret
| 38.129032
| 84
| 0.668359
|
8191e25c052b557395199f8f501788a34c2bdbc5
| 3,204
|
py
|
Python
|
phoneme_encoder.py
|
ushikado/yuyuyui-speech-synthesis-sever
|
91f7177be6f933d7152db28f75c3101c090a1a5e
|
[
"MIT"
] | 1
|
2022-01-30T19:39:14.000Z
|
2022-01-30T19:39:14.000Z
|
phoneme_encoder.py
|
ushikado/yuyuyui-speech-synthesis-sever
|
91f7177be6f933d7152db28f75c3101c090a1a5e
|
[
"MIT"
] | null | null | null |
phoneme_encoder.py
|
ushikado/yuyuyui-speech-synthesis-sever
|
91f7177be6f933d7152db28f75c3101c090a1a5e
|
[
"MIT"
] | null | null | null |
import pyopenjtalk
import regex
from urllib.request import urlretrieve
import os
import tarfile
user_dict = [
("友奈", "ゆうな"),
("犬吠埼風", "いぬぼうざきふう"),
("風先輩", "ふうせんぱい"),
("風殿", "ふうどの"),
("風様", "ふうさま"),
("風達", "ふうたち"),
("風以外", "ふう以外"),
("風さん", "ふうさん"),
("つむじ風", "つむじかぜ"),
("よい風", "よいかぜ"),
("すっごい風", "すっごいかぜ"),
("風を感じ", "かぜを感じ"),
("激しい風", "激しいかぜ"),
("激しい風", "激しいかぜ"),
("強い風", "強いかぜ"),
("果樹", "かじゅ"),
("神樹", "しんじゅ"),
("樹海", "じゅかい"),
("樹木", "じゅもく"),
("樹", "いつき"),
("夏凜", "かりん"),
("芽吹き", "めぶき"),
("芽吹く", "めぶく"),
("芽吹い", "めぶい"),
("芽吹", "めぶき"),
("伊予島", "いよじま"),
("杏", "あんず"),
("夕海子", "ゆみこ"),
("上里", "うえさと"),
("美森", "みもり"),
("秋原", "あきはら"),
("雪花", "せっか"),
("古波蔵", "こはぐら"),
("棗", "なつめ"),
("須美", "すみ"),
("水都", "みと"),
("真鈴", "ますず"),
("美佳", "よしか"),
("法花堂", "ほっけどう"),
("天の神", "てんのかみ"),
("象頭", "ぞーず"),
("五岳", "ごがく"),
("~", "ー"),
("〜", "ー"),
("...", "…"),
("..", "…"),
(".", "…"),
("、", ","),
("。", "."),
("!", "!"),
("?", "?"),
]
regex_dict = [
(regex.compile(r"風([ぁ-ゖ])吹"), "かぜ{1}吹"),
# [漢字]風 は漢字のまま残す。
# 風[漢字] は漢字のまま残す。
# それ以外は「ふう」にする。
(regex.compile(r"(^|[^\p{Script=Han}])風([^\p{Script=Han}]|$)"), "{1}ふう{2}"),
# 不要な記号
(regex.compile(r"[「」『』(){}]"), ""),
# @s(60)みたいな制御文字
(regex.compile(r"@[a-z]\(.*?\)"), ""),
]
def init():
# 辞書をダウンロードしてくる
# pyopenjtalk デフォルトの機能に任せると、Read only な場所にダウンロードしようとしてしまうため
# https://github.com/r9y9/pyopenjtalk/blob/master/pyopenjtalk/__init__.py
dict_dir = os.environ.get("OPEN_JTALK_DICT_DIR")
dict_url = "https://github.com/r9y9/open_jtalk/releases/download/v1.11.1/open_jtalk_dic_utf_8-1.11.tar.gz"
download_path = "/tmp/dic.tar.gz"
extract_path = os.path.abspath(os.path.join(dict_dir, "../"))
print('Downloading {} to {}'.format(dict_url, download_path))
urlretrieve(dict_url, download_path)
print("Extracting {} to {}".format(download_path, extract_path))
with tarfile.open(download_path, mode="r|gz") as tar:
tar.extractall(path=extract_path)
os.remove(download_path)
def preprocess(text):
for kanji, kana in user_dict:
text = text.replace(kanji, kana)
for before, after in regex_dict:
text = regex.subf(before, after, text)
return text
def encode(text, reject_nonverbal=True):
text = preprocess(text)
phones = ""
while 0 < len(text):
symbol = ""
match = regex.search(r"[,.!?…♪]", text)
if match:
length = match.span()[0]
sub_text = text[:length]
symbol = text[length]
if 0 < len(sub_text):
phones += pyopenjtalk.g2p(sub_text, kana=False) + " "
symbol = text[length]
phones += symbol + " "
text = text[length+1:]
else:
length = len(text)
phones += pyopenjtalk.g2p(text, kana=False)
text = ""
phones = phones.strip().replace(" ", "-").replace("--", "-")
if reject_nonverbal and len(phones.strip(",.!?…♪- ")) == 0:
return None
return phones
| 25.632
| 110
| 0.488452
|
ec1371755aedec2515668ba2cb8817ff9e981391
| 10,635
|
py
|
Python
|
aiida/orm/implementation/querybuilder.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/implementation/querybuilder.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/implementation/querybuilder.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Backend query implementation classes"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import abc
import six
from aiida.common import exceptions
from aiida.common.lang import abstractclassmethod, type_check
from aiida.common.exceptions import InputValidationError
__all__ = ('BackendQueryBuilder',)
@six.add_metaclass(abc.ABCMeta)
class BackendQueryBuilder(object):
"""Backend query builder interface"""
# pylint: disable=invalid-name,too-many-public-methods,useless-object-inheritance
outer_to_inner_schema = None
inner_to_outer_schema = None
def __init__(self, backend):
"""
:param backend: the backend
"""
from . import backends
type_check(backend, backends.Backend)
self._backend = backend
self.inner_to_outer_schema = dict()
self.outer_to_inner_schema = dict()
@abc.abstractmethod
def Node(self):
"""
Decorated as a property, returns the implementation for DbNode.
It needs to return a subclass of sqlalchemy.Base, which means that for different ORM's
a corresponding dummy-model must be written.
"""
@abc.abstractmethod
def Link(self):
"""
A property, decorated with @property. Returns the implementation for the DbLink
"""
@abc.abstractmethod
def Computer(self):
"""
A property, decorated with @property. Returns the implementation for the Computer
"""
@abc.abstractmethod
def User(self):
"""
A property, decorated with @property. Returns the implementation for the User
"""
@abc.abstractmethod
def Group(self):
"""
A property, decorated with @property. Returns the implementation for the Group
"""
@abc.abstractmethod
def AuthInfo(self):
"""
A property, decorated with @property. Returns the implementation for the Group
"""
@abc.abstractmethod
def Comment(self):
"""
A property, decorated with @property. Returns the implementation for the Comment
"""
@abc.abstractmethod
def Log(self):
"""
A property, decorated with @property. Returns the implementation for the Log
"""
@abc.abstractmethod
def table_groups_nodes(self):
"""
A property, decorated with @property. Returns the implementation for the many-to-many
relationship between group and nodes.
"""
@property
def AiidaNode(self):
"""
A property, decorated with @property. Returns the implementation for the AiiDA-class for Node
"""
from aiida.orm import Node
return Node
@abc.abstractmethod
def get_session(self):
"""
:returns: a valid session, an instance of sqlalchemy.orm.session.Session
"""
@abc.abstractmethod
def modify_expansions(self, alias, expansions):
"""
Modify names of projections if ** was specified.
This is important for the schema having attributes in a different table.
"""
@abstractclassmethod
def get_filter_expr_from_attributes(cls, operator, value, attr_key, column=None, column_name=None, alias=None): # pylint: disable=too-many-arguments
"""
Returns an valid SQLAlchemy expression.
:param operator: The operator provided by the user ('==', '>', ...)
:param value: The value to compare with, e.g. (5.0, 'foo', ['a','b'])
:param str attr_key:
The path to that attribute as a tuple of values.
I.e. if that attribute I want to filter by is the 2nd element in a list stored under the
key 'mylist', this is ('mylist', '2').
:param column: Optional, an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or
:param str column_name: The name of the column, and the backend should get the InstrumentedAttribute.
:param alias: The aliased class.
:returns: An instance of sqlalchemy.sql.elements.BinaryExpression
"""
@classmethod
def get_corresponding_properties(cls, entity_table, given_properties, mapper):
"""
This method returns a list of updated properties for a given list of properties.
If there is no update for the property, the given property is returned in the list.
"""
if entity_table in mapper.keys():
res = list()
for given_property in given_properties:
res.append(cls.get_corresponding_property(entity_table, given_property, mapper))
return res
return given_properties
@classmethod
def get_corresponding_property(cls, entity_table, given_property, mapper):
"""
This method returns an updated property for a given a property.
If there is no update for the property, the given property is returned.
"""
try:
# Get the mapping for the specific entity_table
property_mapping = mapper[entity_table]
try:
# Get the mapping for the specific property
return property_mapping[given_property]
except KeyError:
# If there is no mapping, the property remains unchanged
return given_property
except KeyError:
# If it doesn't exist, it means that the given_property remains v
return given_property
@classmethod
def get_filter_expr_from_column(cls, operator, value, column):
"""
A method that returns an valid SQLAlchemy expression.
:param operator: The operator provided by the user ('==', '>', ...)
:param value: The value to compare with, e.g. (5.0, 'foo', ['a','b'])
:param column: an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or
:returns: An instance of sqlalchemy.sql.elements.BinaryExpression
"""
# Label is used because it is what is returned for the
# 'state' column by the hybrid_column construct
# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed
# pylint: disable=no-name-in-module,import-error
from sqlalchemy.sql.elements import Cast, Label
from sqlalchemy.orm.attributes import InstrumentedAttribute, QueryableAttribute
from sqlalchemy.sql.expression import ColumnClause
from sqlalchemy.types import String
if not isinstance(column, (Cast, InstrumentedAttribute, QueryableAttribute, Label, ColumnClause)):
raise TypeError('column ({}) {} is not a valid column'.format(type(column), column))
database_entity = column
if operator == '==':
expr = database_entity == value
elif operator == '>':
expr = database_entity > value
elif operator == '<':
expr = database_entity < value
elif operator == '>=':
expr = database_entity >= value
elif operator == '<=':
expr = database_entity <= value
elif operator == 'like':
# the like operator expects a string, so we cast to avoid problems
# with fields like UUID, which don't support the like operator
expr = database_entity.cast(String).like(value)
elif operator == 'ilike':
expr = database_entity.ilike(value)
elif operator == 'in':
expr = database_entity.in_(value)
else:
raise InputValidationError('Unknown operator {} for filters on columns'.format(operator))
return expr
@abc.abstractmethod
def get_projectable_attribute(self, alias, column_name, attrpath, cast=None, **kwargs):
pass
@abc.abstractmethod
def get_aiida_res(self, key, res):
"""
Some instance returned by ORM (django or SA) need to be converted
to Aiida instances (eg nodes)
:param key: the key that this entry would be returned with
:param res: the result returned by the query
:returns: an aiida-compatible instance
"""
@abc.abstractmethod
def yield_per(self, query, batch_size):
"""
:param int batch_size: Number of rows to yield per step
Yields *count* rows at a time
:returns: a generator
"""
@abc.abstractmethod
def count(self, query):
"""
:returns: the number of results
"""
@abc.abstractmethod
def first(self, query):
"""
Executes query in the backend asking for one instance.
:returns: One row of aiida results
"""
@abc.abstractmethod
def iterall(self, query, batch_size, tag_to_index_dict):
"""
:return: An iterator over all the results of a list of lists.
"""
@abc.abstractmethod
def iterdict(self, query, batch_size, tag_to_projected_properties_dict, tag_to_alias_map):
"""
:returns: An iterator over all the results of a list of dictionaries.
"""
@abc.abstractmethod
def get_column_names(self, alias):
"""
Return the column names of the given table (alias).
"""
def get_column(self, colname, alias): # pylint: disable=no-self-use
"""
Return the column for a given projection.
"""
try:
return getattr(alias, colname)
except AttributeError:
raise exceptions.InputValidationError("{} is not a column of {}\n"
"Valid columns are:\n"
"{}".format(
colname,
alias,
'\n'.join(alias._sa_class_manager.mapper.c.keys()) # pylint: disable=protected-access
))
| 36.672414
| 153
| 0.601787
|
09154bf7b5d1616cf9d1c7006c88f910ae3c964f
| 82
|
py
|
Python
|
enthought/chaco/barplot.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/chaco/barplot.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/chaco/barplot.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from chaco.barplot import *
| 20.5
| 38
| 0.829268
|
daafeeedcb3ade81ae30e3bc875e78ccb0507a8c
| 11,639
|
py
|
Python
|
sdk/python/pulumi_rabbitmq/shovel.py
|
pulumi/pulumi-rabbitmq
|
f380f679fa512a40106ac1ae0d8bbf1766ff4ecf
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-04-14T15:37:05.000Z
|
2021-12-01T20:57:40.000Z
|
sdk/python/pulumi_rabbitmq/shovel.py
|
pulumi/pulumi-rabbitmq
|
f380f679fa512a40106ac1ae0d8bbf1766ff4ecf
|
[
"ECL-2.0",
"Apache-2.0"
] | 25
|
2019-12-06T13:36:32.000Z
|
2022-03-31T15:41:19.000Z
|
sdk/python/pulumi_rabbitmq/shovel.py
|
pulumi/pulumi-rabbitmq
|
f380f679fa512a40106ac1ae0d8bbf1766ff4ecf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ShovelArgs', 'Shovel']
@pulumi.input_type
class ShovelArgs:
def __init__(__self__, *,
info: pulumi.Input['ShovelInfoArgs'],
vhost: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Shovel resource.
:param pulumi.Input['ShovelInfoArgs'] info: The settings of the dynamic shovel. The structure is
described below.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
:param pulumi.Input[str] name: The shovel name.
"""
pulumi.set(__self__, "info", info)
pulumi.set(__self__, "vhost", vhost)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def info(self) -> pulumi.Input['ShovelInfoArgs']:
"""
The settings of the dynamic shovel. The structure is
described below.
"""
return pulumi.get(self, "info")
@info.setter
def info(self, value: pulumi.Input['ShovelInfoArgs']):
pulumi.set(self, "info", value)
@property
@pulumi.getter
def vhost(self) -> pulumi.Input[str]:
"""
The vhost to create the resource in.
"""
return pulumi.get(self, "vhost")
@vhost.setter
def vhost(self, value: pulumi.Input[str]):
pulumi.set(self, "vhost", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The shovel name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _ShovelState:
def __init__(__self__, *,
info: Optional[pulumi.Input['ShovelInfoArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Shovel resources.
:param pulumi.Input['ShovelInfoArgs'] info: The settings of the dynamic shovel. The structure is
described below.
:param pulumi.Input[str] name: The shovel name.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
"""
if info is not None:
pulumi.set(__self__, "info", info)
if name is not None:
pulumi.set(__self__, "name", name)
if vhost is not None:
pulumi.set(__self__, "vhost", vhost)
@property
@pulumi.getter
def info(self) -> Optional[pulumi.Input['ShovelInfoArgs']]:
"""
The settings of the dynamic shovel. The structure is
described below.
"""
return pulumi.get(self, "info")
@info.setter
def info(self, value: Optional[pulumi.Input['ShovelInfoArgs']]):
pulumi.set(self, "info", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The shovel name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def vhost(self) -> Optional[pulumi.Input[str]]:
"""
The vhost to create the resource in.
"""
return pulumi.get(self, "vhost")
@vhost.setter
def vhost(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vhost", value)
class Shovel(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
info: Optional[pulumi.Input[pulumi.InputType['ShovelInfoArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The ``Shovel`` resource creates and manages a dynamic shovel.
## Example Usage
```python
import pulumi
import pulumi_rabbitmq as rabbitmq
test_v_host = rabbitmq.VHost("testVHost")
test_exchange = rabbitmq.Exchange("testExchange",
settings=rabbitmq.ExchangeSettingsArgs(
auto_delete=True,
durable=False,
type="fanout",
),
vhost=test_v_host.name)
test_queue = rabbitmq.Queue("testQueue",
settings=rabbitmq.QueueSettingsArgs(
auto_delete=True,
durable=False,
),
vhost=test_v_host.name)
shovel_test = rabbitmq.Shovel("shovelTest",
info=rabbitmq.ShovelInfoArgs(
destination_queue=test_queue.name,
destination_uri="amqp:///test",
source_exchange=test_exchange.name,
source_exchange_key="test",
source_uri="amqp:///test",
),
vhost=test_v_host.name)
```
## Import
Shovels can be imported using the `name` and `vhost` E.g.
```sh
$ pulumi import rabbitmq:index/shovel:Shovel test shovelTest@test
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ShovelInfoArgs']] info: The settings of the dynamic shovel. The structure is
described below.
:param pulumi.Input[str] name: The shovel name.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ShovelArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The ``Shovel`` resource creates and manages a dynamic shovel.
## Example Usage
```python
import pulumi
import pulumi_rabbitmq as rabbitmq
test_v_host = rabbitmq.VHost("testVHost")
test_exchange = rabbitmq.Exchange("testExchange",
settings=rabbitmq.ExchangeSettingsArgs(
auto_delete=True,
durable=False,
type="fanout",
),
vhost=test_v_host.name)
test_queue = rabbitmq.Queue("testQueue",
settings=rabbitmq.QueueSettingsArgs(
auto_delete=True,
durable=False,
),
vhost=test_v_host.name)
shovel_test = rabbitmq.Shovel("shovelTest",
info=rabbitmq.ShovelInfoArgs(
destination_queue=test_queue.name,
destination_uri="amqp:///test",
source_exchange=test_exchange.name,
source_exchange_key="test",
source_uri="amqp:///test",
),
vhost=test_v_host.name)
```
## Import
Shovels can be imported using the `name` and `vhost` E.g.
```sh
$ pulumi import rabbitmq:index/shovel:Shovel test shovelTest@test
```
:param str resource_name: The name of the resource.
:param ShovelArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShovelArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
info: Optional[pulumi.Input[pulumi.InputType['ShovelInfoArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShovelArgs.__new__(ShovelArgs)
if info is None and not opts.urn:
raise TypeError("Missing required property 'info'")
__props__.__dict__["info"] = info
__props__.__dict__["name"] = name
if vhost is None and not opts.urn:
raise TypeError("Missing required property 'vhost'")
__props__.__dict__["vhost"] = vhost
super(Shovel, __self__).__init__(
'rabbitmq:index/shovel:Shovel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
info: Optional[pulumi.Input[pulumi.InputType['ShovelInfoArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
vhost: Optional[pulumi.Input[str]] = None) -> 'Shovel':
"""
Get an existing Shovel resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ShovelInfoArgs']] info: The settings of the dynamic shovel. The structure is
described below.
:param pulumi.Input[str] name: The shovel name.
:param pulumi.Input[str] vhost: The vhost to create the resource in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ShovelState.__new__(_ShovelState)
__props__.__dict__["info"] = info
__props__.__dict__["name"] = name
__props__.__dict__["vhost"] = vhost
return Shovel(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def info(self) -> pulumi.Output['outputs.ShovelInfo']:
"""
The settings of the dynamic shovel. The structure is
described below.
"""
return pulumi.get(self, "info")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The shovel name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def vhost(self) -> pulumi.Output[str]:
"""
The vhost to create the resource in.
"""
return pulumi.get(self, "vhost")
| 35.163142
| 134
| 0.592147
|
848b7325a684e0dbbbfad1d6a027a87463ca5738
| 2,444
|
py
|
Python
|
tests/conftest.py
|
cnheider/pyro
|
60bcab73ada30c2b3f05d525690c9664ff6fc22e
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
cnheider/pyro
|
60bcab73ada30c2b3f05d525690c9664ff6fc22e
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
cnheider/pyro
|
60bcab73ada30c2b3f05d525690c9664ff6fc22e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import warnings
import pyro
def pytest_configure(config):
config.addinivalue_line("markers",
"init(rng_seed): initialize the RNG using the seed provided.")
config.addinivalue_line("markers",
"stage(NAME): mark test to run when testing stage matches NAME.")
def pytest_runtest_setup(item):
pyro.clear_param_store()
test_initialize_marker = item.get_marker("init")
if test_initialize_marker:
rng_seed = test_initialize_marker.kwargs["rng_seed"]
pyro.set_rng_seed(rng_seed)
def pytest_addoption(parser):
parser.addoption("--stage",
action="append",
metavar="NAME",
default=[],
help="Only run tests matching the stage NAME.")
def _get_highest_specificity_marker(stage_marker):
"""
Get the most specific stage marker corresponding to the test. Specificity
of test function marker is the highest, followed by test class marker and
module marker.
:return: List of most specific stage markers for the test.
"""
is_test_collected = False
selected_stages = []
try:
for marker in stage_marker:
selected_stages = list(marker.args)
is_test_collected = True
break
except TypeError:
selected_stages = list(stage_marker.args)
is_test_collected = True
if not is_test_collected:
raise RuntimeError("stage marker needs at least one stage to be specified.")
return selected_stages
def pytest_collection_modifyitems(config, items):
test_stages = set(config.getoption("--stage"))
if not test_stages or "all" in test_stages:
return
selected_items = []
deselected_items = []
for item in items:
stage_marker = item.get_marker("stage")
if not stage_marker:
selected_items.append(item)
warnings.warn("No stage associated with the test {}. Will run on each stage invocation.".format(item.name))
continue
item_stage_markers = _get_highest_specificity_marker(stage_marker)
if test_stages.isdisjoint(item_stage_markers):
deselected_items.append(item)
else:
selected_items.append(item)
config.hook.pytest_deselected(items=deselected_items)
items[:] = selected_items
| 33.479452
| 119
| 0.66162
|
0f963dd4e42a6a35bbacb43cad11e34231393fc3
| 6,796
|
py
|
Python
|
src/datadog_api_client/v1/model/ip_prefixes_process.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/ip_prefixes_process.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/ip_prefixes_process.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class IPPrefixesProcess(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"prefixes_ipv4": ([str],), # noqa: E501
"prefixes_ipv6": ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"prefixes_ipv4": "prefixes_ipv4", # noqa: E501
"prefixes_ipv6": "prefixes_ipv6", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IPPrefixesProcess - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
prefixes_ipv4 ([str]): List of IPv4 prefixes.. [optional] # noqa: E501
prefixes_ipv6 ([str]): List of IPv6 prefixes.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| 40.694611
| 108
| 0.583873
|
4930cac47ddd0de2de4e536448e3330e81fc0850
| 25
|
py
|
Python
|
pytest_order/__init__.py
|
Joacchim/pytest-order
|
1e873d5c2b67211ed3bc578ee72bb3d986906532
|
[
"MIT"
] | 41
|
2021-03-16T07:57:00.000Z
|
2022-03-01T10:02:10.000Z
|
pytest_order/__init__.py
|
Joacchim/pytest-order
|
1e873d5c2b67211ed3bc578ee72bb3d986906532
|
[
"MIT"
] | 39
|
2021-03-04T16:50:04.000Z
|
2022-02-18T18:51:14.000Z
|
pytest_order/__init__.py
|
Joacchim/pytest-order
|
1e873d5c2b67211ed3bc578ee72bb3d986906532
|
[
"MIT"
] | 9
|
2021-03-04T18:27:12.000Z
|
2021-12-16T06:46:13.000Z
|
__version__ = "1.1.dev0"
| 12.5
| 24
| 0.68
|
2bd062c855d2fdace5e28fb1ff7354eb6abff089
| 2,459
|
py
|
Python
|
src/tidygraphtool/pipes.py
|
jstonge/tidygraphtool
|
6bf0a0e11d667e7cd1cb8f0ff1f61cb930536ce1
|
[
"MIT"
] | null | null | null |
src/tidygraphtool/pipes.py
|
jstonge/tidygraphtool
|
6bf0a0e11d667e7cd1cb8f0ff1f61cb930536ce1
|
[
"MIT"
] | null | null | null |
src/tidygraphtool/pipes.py
|
jstonge/tidygraphtool
|
6bf0a0e11d667e7cd1cb8f0ff1f61cb930536ce1
|
[
"MIT"
] | null | null | null |
class Pipe():
"""Making functions pipeable"""
def __init__(self, base_object=None, *args, unpack_input=False, use_first_arg_only=False, try_normal_first=False, **kwargs):
self.__doc__ = base_object.__doc__
self.base_object = base_object
self.args = args
self.kwargs = kwargs
self.unpack_input = unpack_input
self.use_first_arg_only = use_first_arg_only
self.try_normal_first = try_normal_first
def __rshift__(self, other):
return other.base_object(self.base_object, *other.args, **other.kwargs)
def __lshift__(self, other):
if self.unpack_input:
return self.base_object(*other, *self.args, **self.kwargs)
elif self.use_first_arg_only:
return self.base_object(other[0], *self.args, **self.kwargs)
else:
return self.base_object(other, *self.args, **self.kwargs)
def __rrshift__(self, other):
if self.unpack_input:
return self.base_object(*other, *self.args, **self.kwargs)
elif self.use_first_arg_only:
return self.base_object(other[0], *self.args, **self.kwargs)
else:
return self.base_object(other, *self.args, **self.kwargs)
def __llshift__(self, other):
return other.base_object(self.base_object, *other.args, **other.kwargs)
def __getattr__(self, attribute):
return getattr(self.base_object, attribute)
def __call__(self, *args, **kwargs):
if self.try_normal_first:
try:
return self.base_object(*args, **kwargs)
except:
pass
if self.base_object is not None:
# Typical behavior: Pipeable object is created when the Pipeable object is called, allowing for functions to be defined.
return Pipe(
self.base_object,
*args,
unpack_input=self.unpack_input,
use_first_arg_only=self.use_first_arg_only,
try_normal_first=self.try_normal_first,
**kwargs
)
else:
# Pipeable was created with no base_object, so enable use of Pipeable object as a decorator.
return Pipe(
*args,
unpack_input=self.unpack_input,
use_first_arg_only=self.use_first_arg_only,
try_normal_first=self.try_normal_first,
**kwargs
)
| 39.66129
| 132
| 0.612444
|
890c6920873f19004956ce6768824dc034fc5d51
| 1,341
|
py
|
Python
|
pinax/projects/intranet_project/urls.py
|
ericholscher/pinax
|
6ba4585671c6a3d9ac154441296f8a403453469f
|
[
"MIT"
] | 1
|
2015-11-08T11:32:53.000Z
|
2015-11-08T11:32:53.000Z
|
pinax/projects/intranet_project/urls.py
|
ericholscher/pinax
|
6ba4585671c6a3d9ac154441296f8a403453469f
|
[
"MIT"
] | null | null | null |
pinax/projects/intranet_project/urls.py
|
ericholscher/pinax
|
6ba4585671c6a3d9ac154441296f8a403453469f
|
[
"MIT"
] | null | null | null |
from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import direct_to_template
from wiki import models as wiki_models
from django.contrib import admin
admin.autodiscover()
from account.openid_consumer import PinaxConsumer
urlpatterns = patterns('',
url(r'^$', direct_to_template, {
"template": "homepage.html",
}, name="home"),
url(r'^admin/invite_user/$', 'signup_codes.views.admin_invite_user', name="admin_invite_user"),
url(r'^account/signup/$', "signup_codes.views.signup", name="acct_signup"),
(r'^account/', include('account.urls')),
(r'^openid/(.*)', PinaxConsumer()),
(r'^profiles/', include('basic_profiles.urls')),
(r'^notices/', include('notification.urls')),
(r'^announcements/', include('announcements.urls')),
(r'^tagging_utils/', include('tagging_utils.urls')),
(r'^attachments/', include('attachments.urls')),
(r'^bookmarks/', include('bookmarks.urls')),
(r'^tasks/', include('tasks.urls')),
(r'^topics/', include('topics.urls')),
(r'^comments/', include('threadedcomments.urls')),
(r'^wiki/', include('wiki.urls')),
(r'^admin/(.*)', admin.site.root),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns('',
(r'^site_media/', include('staticfiles.urls')),
)
| 31.928571
| 99
| 0.655481
|
1a2331fb7164b3ef8af1f152019e2164e17eb1dd
| 2,563
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/resources/v20160201/get_deployment.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/resources/v20160201/get_deployment.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/resources/v20160201/get_deployment.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetDeploymentResult',
'AwaitableGetDeploymentResult',
'get_deployment',
]
@pulumi.output_type
class GetDeploymentResult:
"""
Deployment information.
"""
def __init__(__self__, name=None, properties=None):
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the deployment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.DeploymentPropertiesExtendedResponse':
"""
Deployment properties.
"""
return pulumi.get(self, "properties")
class AwaitableGetDeploymentResult(GetDeploymentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeploymentResult(
name=self.name,
properties=self.properties)
def get_deployment(deployment_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeploymentResult:
"""
Use this data source to access information about an existing resource.
:param str deployment_name: The name of the deployment.
:param str resource_group_name: The name of the resource group to get. The name is case insensitive.
"""
__args__ = dict()
__args__['deploymentName'] = deployment_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:resources/v20160201:getDeployment', __args__, opts=opts, typ=GetDeploymentResult).value
return AwaitableGetDeploymentResult(
name=__ret__.name,
properties=__ret__.properties)
| 32.443038
| 138
| 0.673039
|
b01a3efca2c990007f51965f1908009ba89e5dc1
| 403
|
py
|
Python
|
1-asyncio/10.4.py
|
rcmgn/kts-school-backend
|
8a895043b7f0156ec49554504198b631df41d2cd
|
[
"MIT"
] | 9
|
2021-02-04T07:00:59.000Z
|
2022-03-21T06:28:27.000Z
|
1-asyncio/10.4.py
|
rcmgn/kts-school-backend
|
8a895043b7f0156ec49554504198b631df41d2cd
|
[
"MIT"
] | null | null | null |
1-asyncio/10.4.py
|
rcmgn/kts-school-backend
|
8a895043b7f0156ec49554504198b631df41d2cd
|
[
"MIT"
] | 4
|
2021-10-20T18:44:22.000Z
|
2022-02-16T19:11:49.000Z
|
import asyncio
class AsyncManager:
def __init__(self):
pass
async def __aenter__(self):
await asyncio.sleep(1)
print('finished __aenter__')
return self
async def __aexit__(self, *args):
await asyncio.sleep(1)
print('finished __aexit__')
async def main():
async with AsyncManager():
print('inside manager')
asyncio.run(main())
| 19.190476
| 37
| 0.62531
|
4dcf80828bcea6580984e90bb248acf0fd7154c8
| 151
|
py
|
Python
|
niio/__init__.py
|
kristianeschenburg/io
|
f5bd75e0ee082162bed3476b78cccb02af429ca6
|
[
"BSD-3-Clause"
] | 2
|
2019-04-05T15:51:16.000Z
|
2019-04-08T05:14:49.000Z
|
niio/__init__.py
|
kristianeschenburg/io
|
f5bd75e0ee082162bed3476b78cccb02af429ca6
|
[
"BSD-3-Clause"
] | null | null | null |
niio/__init__.py
|
kristianeschenburg/io
|
f5bd75e0ee082162bed3476b78cccb02af429ca6
|
[
"BSD-3-Clause"
] | null | null | null |
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from . import (convert, loaded, structures, tables, write)
| 30.2
| 58
| 0.788079
|
0cfa6097555ee79860b333e30442933bab30c4e6
| 14,769
|
py
|
Python
|
fastreid/utils/checkpoint.py
|
hanleiyu/prcv
|
df5ad9469b38b8176121357fe5de2b1cf30aae1c
|
[
"MIT"
] | 1
|
2021-03-28T15:03:58.000Z
|
2021-03-28T15:03:58.000Z
|
fastreid/utils/checkpoint.py
|
hanleiyu/prcv
|
df5ad9469b38b8176121357fe5de2b1cf30aae1c
|
[
"MIT"
] | null | null | null |
fastreid/utils/checkpoint.py
|
hanleiyu/prcv
|
df5ad9469b38b8176121357fe5de2b1cf30aae1c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import collections
import copy
import logging
import os
from collections import defaultdict
from typing import Any
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from termcolor import colored
from torch.nn.parallel import DataParallel, DistributedDataParallel
from fastreid.utils.file_io import PathManager
class Checkpointer(object):
"""
A checkpointer that can save/load model as well as extra checkpointable
objects.
"""
def __init__(
self,
model: nn.Module,
dataset: Dataset = None,
save_dir: str = "",
*,
save_to_disk: bool = True,
**checkpointables: object,
):
"""
Args:
model (nn.Module): model.
save_dir (str): a directory to save and find checkpoints.
save_to_disk (bool): if True, save checkpoint to disk, otherwise
disable saving for this checkpointer.
checkpointables (object): any checkpointable objects, i.e., objects
that have the `state_dict()` and `load_state_dict()` method. For
example, it can be used like
`Checkpointer(model, "dir", optimizer=optimizer)`.
"""
if isinstance(model, (DistributedDataParallel, DataParallel)):
model = model.module
self.model = model
self.dataset = dataset
self.checkpointables = copy.copy(checkpointables)
self.logger = logging.getLogger(__name__)
self.save_dir = save_dir
self.save_to_disk = save_to_disk
def save(self, name: str, **kwargs: dict):
"""
Dump model and checkpointables to a file.
Args:
name (str): name of the file.
kwargs (dict): extra arbitrary data to save.
"""
if not self.save_dir or not self.save_to_disk:
return
data = {}
data["model"] = self.model.state_dict()
if self.dataset is not None:
data["pid_dict"] = self.dataset.pid_dict
for key, obj in self.checkpointables.items():
data[key] = obj.state_dict()
data.update(kwargs)
basename = "{}.pth".format(name)
save_file = os.path.join(self.save_dir, basename)
assert os.path.basename(save_file) == basename, basename
self.logger.info("Saving checkpoint to {}".format(save_file))
with PathManager.open(save_file, "wb") as f:
torch.save(data, f)
self.tag_last_checkpoint(basename)
def load(self, path: str):
"""
Load from the given checkpoint. When path points to network file, this
function has to be called on all ranks.
Args:
path (str): path or url to the checkpoint. If empty, will not load
anything.
Returns:
dict:
extra data loaded from the checkpoint that has not been
processed. For example, those saved with
:meth:`.save(**extra_data)`.
"""
if not path:
# no checkpoint provided
self.logger.info(
"No checkpoint found. Initializing model from scratch"
)
return {}
self.logger.info("Loading checkpoint from {}".format(path))
if not os.path.isfile(path):
path = PathManager.get_local_path(path)
assert os.path.isfile(path), "Checkpoint {} not found!".format(path)
checkpoint = self._load_file(path)
if self.dataset is None:
self.logger.info(
"No need to load dataset pid dictionary"
)
else:
self._load_dataset_pid_dict(checkpoint)
self._load_model(checkpoint)
for key, obj in self.checkpointables.items():
if key in checkpoint:
self.logger.info("Loading {} from {}".format(key, path))
obj.load_state_dict(checkpoint.pop(key))
# return any further checkpoint data
return checkpoint
def has_checkpoint(self):
"""
Returns:
bool: whether a checkpoint exists in the target directory.
"""
save_file = os.path.join(self.save_dir, "last_checkpoint")
return PathManager.exists(save_file)
def get_checkpoint_file(self):
"""
Returns:
str: The latest checkpoint file in target directory.
"""
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
with PathManager.open(save_file, "r") as f:
last_saved = f.read().strip()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
return ""
return os.path.join(self.save_dir, last_saved)
def get_all_checkpoint_files(self):
"""
Returns:
list: All available checkpoint files (.pth files) in target
directory.
"""
all_model_checkpoints = [
os.path.join(self.save_dir, file)
for file in PathManager.ls(self.save_dir)
if PathManager.isfile(os.path.join(self.save_dir, file))
and file.endswith(".pth")
]
return all_model_checkpoints
def resume_or_load(self, path: str, *, resume: bool = True):
"""
If `resume` is True, this method attempts to resume from the last
checkpoint, if exists. Otherwise, load checkpoint from the given path.
This is useful when restarting an interrupted training job.
Args:
path (str): path to the checkpoint.
resume (bool): if True, resume from the last checkpoint if it exists.
Returns:
same as :meth:`load`.
"""
if resume and self.has_checkpoint():
path = self.get_checkpoint_file()
return self.load(path)
def tag_last_checkpoint(self, last_filename_basename: str):
"""
Tag the last checkpoint.
Args:
last_filename_basename (str): the basename of the last filename.
"""
save_file = os.path.join(self.save_dir, "last_checkpoint")
with PathManager.open(save_file, "w") as f:
f.write(last_filename_basename)
def _load_file(self, f: str):
"""
Load a checkpoint file. Can be overwritten by subclasses to support
different formats.
Args:
f (str): a locally mounted file path.
Returns:
dict: with keys "model" and optionally others that are saved by
the checkpointer dict["model"] must be a dict which maps strings
to torch.Tensor or numpy arrays.
"""
return torch.load(f, map_location=torch.device("cpu"))
def _load_dataset_pid_dict(self, checkpoint: Any):
checkpoint_pid_dict = checkpoint.pop("pid_dict")
self.dataset.update_pid_dict(checkpoint_pid_dict)
def _load_model(self, checkpoint: Any):
"""
Load weights from a checkpoint.
Args:
checkpoint (Any): checkpoint contains the weights.
"""
checkpoint_state_dict = checkpoint.pop("model")
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
# work around https://github.com/pytorch/pytorch/issues/24139
model_state_dict = self.model.state_dict()
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
self.logger.warning(
"'{}' has shape {} in the checkpoint but {} in the "
"model! Skipped.".format(
k, shape_checkpoint, shape_model
)
)
checkpoint_state_dict.pop(k)
incompatible = self.model.load_state_dict(
checkpoint_state_dict, strict=False
)
if incompatible.missing_keys:
self.logger.info(
get_missing_parameters_message(incompatible.missing_keys)
)
if incompatible.unexpected_keys:
self.logger.info(
get_unexpected_parameters_message(incompatible.unexpected_keys)
)
def _convert_ndarray_to_tensor(self, state_dict: dict):
"""
In-place convert all numpy arrays in the state_dict to torch tensor.
Args:
state_dict (dict): a state-dict to be loaded to the model.
"""
# model could be an OrderedDict with _metadata attribute
# (as returned by Pytorch's state_dict()). We should preserve these
# properties.
for k in list(state_dict.keys()):
v = state_dict[k]
if not isinstance(v, np.ndarray) and not isinstance(
v, torch.Tensor
):
raise ValueError(
"Unsupported type found in checkpoint! {}: {}".format(
k, type(v)
)
)
if not isinstance(v, torch.Tensor):
state_dict[k] = torch.from_numpy(v)
class PeriodicCheckpointer:
"""
Save checkpoints periodically. When `.step(iteration)` is called, it will
execute `checkpointer.save` on the given checkpointer, if iteration is a
multiple of period or if `max_iter` is reached.
"""
def __init__(self, checkpointer: Any, period: int, max_iter: int = None):
"""
Args:
checkpointer (Any): the checkpointer object used to save
checkpoints.
period (int): the period to save checkpoint.
max_iter (int): maximum number of iterations. When it is reached,
a checkpoint named "model_final" will be saved.
"""
self.checkpointer = checkpointer
self.period = int(period)
self.max_iter = max_iter
def step(self, iteration: int, **kwargs: Any):
"""
Perform the appropriate action at the given iteration.
Args:
iteration (int): the current iteration, ranged in [0, max_iter-1].
kwargs (Any): extra data to save, same as in
:meth:`Checkpointer.save`.
"""
iteration = int(iteration)
additional_state = {"iteration": iteration}
additional_state.update(kwargs)
if (iteration + 1) % self.period == 0:
self.checkpointer.save(
"model_{:07d}".format(iteration), **additional_state
)
if iteration >= self.max_iter - 1:
self.checkpointer.save("model_final", **additional_state)
def save(self, name: str, **kwargs: Any):
"""
Same argument as :meth:`Checkpointer.save`.
Use this method to manually save checkpoints outside the schedule.
Args:
name (str): file name.
kwargs (Any): extra data to save, same as in
:meth:`Checkpointer.save`.
"""
self.checkpointer.save(name, **kwargs)
def get_missing_parameters_message(keys: list):
"""
Get a logging-friendly message to report parameter names (keys) that are in
the model but not found in a checkpoint.
Args:
keys (list[str]): List of keys that were not found in the checkpoint.
Returns:
str: message.
"""
groups = _group_checkpoint_keys(keys)
msg = "Some model parameters are not in the checkpoint:\n"
msg += "\n".join(
" " + colored(k + _group_to_str(v), "blue") for k, v in groups.items()
)
return msg
def get_unexpected_parameters_message(keys: list):
"""
Get a logging-friendly message to report parameter names (keys) that are in
the checkpoint but not found in the model.
Args:
keys (list[str]): List of keys that were not found in the model.
Returns:
str: message.
"""
groups = _group_checkpoint_keys(keys)
msg = "The checkpoint contains parameters not used by the model:\n"
msg += "\n".join(
" " + colored(k + _group_to_str(v), "magenta")
for k, v in groups.items()
)
return msg
def _strip_prefix_if_present(state_dict: collections.OrderedDict, prefix: str):
"""
Strip the prefix in metadata, if any.
Args:
state_dict (OrderedDict): a state-dict to be loaded to the model.
prefix (str): prefix.
"""
keys = sorted(state_dict.keys())
if not all(len(key) == 0 or key.startswith(prefix) for key in keys):
return
for key in keys:
newkey = key[len(prefix):]
state_dict[newkey] = state_dict.pop(key)
# also strip the prefix in metadata, if any..
try:
metadata = state_dict._metadata
except AttributeError:
pass
else:
for key in list(metadata.keys()):
# for the metadata dict, the key can be:
# '': for the DDP module, which we want to remove.
# 'module': for the actual model.
# 'module.xx.xx': for the rest.
if len(key) == 0:
continue
newkey = key[len(prefix):]
metadata[newkey] = metadata.pop(key)
def _group_checkpoint_keys(keys: list):
"""
Group keys based on common prefixes. A prefix is the string up to the final
"." in each key.
Args:
keys (list[str]): list of parameter names, i.e. keys in the model
checkpoint dict.
Returns:
dict[list]: keys with common prefixes are grouped into lists.
"""
groups = defaultdict(list)
for key in keys:
pos = key.rfind(".")
if pos >= 0:
head, tail = key[:pos], [key[pos + 1:]]
else:
head, tail = key, []
groups[head].extend(tail)
return groups
def _group_to_str(group: list):
"""
Format a group of parameter name suffixes into a loggable string.
Args:
group (list[str]): list of parameter name suffixes.
Returns:
str: formated string.
"""
if len(group) == 0:
return ""
if len(group) == 1:
return "." + group[0]
return ".{" + ", ".join(group) + "}"
| 35.24821
| 81
| 0.588869
|
70cf01e2cd377b85f10250f5688ec65b6699580c
| 8,883
|
py
|
Python
|
tests/h2o/test_h2o_model_export.py
|
freefrag/mlflow
|
b8e74cfc591397290e1c1ba2600be7daa64b4c05
|
[
"Apache-2.0"
] | 1,825
|
2018-06-05T17:30:39.000Z
|
2022-03-24T13:39:46.000Z
|
tests/h2o/test_h2o_model_export.py
|
freefrag/mlflow
|
b8e74cfc591397290e1c1ba2600be7daa64b4c05
|
[
"Apache-2.0"
] | 200
|
2018-06-06T02:30:57.000Z
|
2018-07-30T17:06:34.000Z
|
tests/h2o/test_h2o_model_export.py
|
freefrag/mlflow
|
b8e74cfc591397290e1c1ba2600be7daa64b4c05
|
[
"Apache-2.0"
] | 301
|
2018-06-05T17:47:48.000Z
|
2022-03-17T12:09:17.000Z
|
# pep8: disable=E501
from __future__ import print_function
import os
import pytest
import yaml
import json
import pandas as pd
import pandas.testing
from collections import namedtuple
import sklearn.datasets as datasets
import h2o
from h2o.estimators.gbm import H2OGradientBoostingEstimator
import mlflow.h2o
import mlflow
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models import Model
from mlflow.tracking.utils import _get_model_log_dir
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from tests.helper_functions import pyfunc_serve_and_score_model
from tests.helper_functions import score_model_in_sagemaker_docker_container
ModelWithData = namedtuple("ModelWithData", ["model", "inference_data"])
@pytest.fixture
def h2o_iris_model():
h2o.init()
iris = datasets.load_iris()
data = h2o.H2OFrame({
'feature1': list(iris.data[:, 0]),
'feature2': list(iris.data[:, 1]),
'target': list(map(lambda i: "Flower %d" % i, iris.target))
})
train, test = data.split_frame(ratios=[.7])
h2o_gbm = H2OGradientBoostingEstimator(ntrees=10, max_depth=6)
h2o_gbm.train(['feature1', 'feature2'], 'target', training_frame=train)
return ModelWithData(model=h2o_gbm, inference_data=test)
@pytest.fixture
def model_path(tmpdir):
return os.path.join(str(tmpdir), "model")
@pytest.fixture
def h2o_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(
conda_env,
additional_conda_deps=["pytest"],
additional_pip_deps=["h2o"])
return conda_env
def test_model_save_load(h2o_iris_model, model_path):
h2o_model = h2o_iris_model.model
mlflow.h2o.save_model(h2o_model=h2o_model, path=model_path)
# Loading h2o model
h2o_model_loaded = mlflow.h2o.load_model(model_path)
assert all(
h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() ==
h2o_model.predict(h2o_iris_model.inference_data).as_data_frame())
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
assert all(
pyfunc_loaded.predict(h2o_iris_model.inference_data.as_data_frame()) ==
h2o_model.predict(h2o_iris_model.inference_data).as_data_frame())
def test_model_log(h2o_iris_model):
h2o_model = h2o_iris_model.model
old_uri = mlflow.get_tracking_uri()
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
with TempDir(chdr=True, remove_on_exit=True):
try:
artifact_path = "gbm_model"
mlflow.set_tracking_uri("test")
if should_start_run:
mlflow.start_run()
mlflow.h2o.log_model(h2o_model=h2o_model, artifact_path=artifact_path)
# Load model
h2o_model_loaded = mlflow.h2o.load_model(
path=artifact_path, run_id=mlflow.active_run().info.run_uuid)
assert all(
h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() ==
h2o_model.predict(h2o_iris_model.inference_data).as_data_frame())
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_uri)
def test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path(
h2o_iris_model, model_path):
"""
This is a backwards compatibility test to ensure that models saved in MLflow version <= 0.7.0
can be loaded successfully. These models are missing the `data` flavor configuration key.
"""
h2o_model = h2o_iris_model.model
mlflow.h2o.save_model(h2o_model=h2o_model, path=model_path)
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
flavor_conf = model_conf.flavors.get(mlflow.h2o.FLAVOR_NAME, None)
assert flavor_conf is not None
del flavor_conf['data']
model_conf.save(model_conf_path)
h2o_model_loaded = mlflow.h2o.load_model(model_path)
assert all(
h2o_model_loaded.predict(h2o_iris_model.inference_data).as_data_frame() ==
h2o_model.predict(h2o_iris_model.inference_data).as_data_frame())
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
h2o_iris_model, model_path, h2o_custom_env):
mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=h2o_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != h2o_custom_env
with open(h2o_custom_env, "r") as f:
h2o_custom_env_text = f.read()
with open(saved_conda_env_path, "r") as f:
saved_conda_env_text = f.read()
assert saved_conda_env_text == h2o_custom_env_text
def test_model_save_accepts_conda_env_as_dict(h2o_iris_model, model_path):
conda_env = dict(mlflow.h2o.DEFAULT_CONDA_ENV)
conda_env["dependencies"].append("pytest")
mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(
h2o_iris_model, h2o_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.h2o.log_model(h2o_model=h2o_iris_model.model,
artifact_path=artifact_path,
conda_env=h2o_custom_env)
run_id = mlflow.active_run().info.run_uuid
model_path = _get_model_log_dir(artifact_path, run_id)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != h2o_custom_env
with open(h2o_custom_env, "r") as f:
h2o_custom_env_text = f.read()
with open(saved_conda_env_path, "r") as f:
saved_conda_env_text = f.read()
assert saved_conda_env_text == h2o_custom_env_text
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
h2o_iris_model, model_path):
mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=None)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.h2o.DEFAULT_CONDA_ENV
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
h2o_iris_model):
artifact_path = "model"
with mlflow.start_run():
mlflow.h2o.log_model(h2o_model=h2o_iris_model.model, artifact_path=artifact_path)
run_id = mlflow.active_run().info.run_uuid
model_path = _get_model_log_dir(artifact_path, run_id)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
with open(conda_env_path, "r") as f:
conda_env = yaml.safe_load(f)
assert conda_env == mlflow.h2o.DEFAULT_CONDA_ENV
@pytest.mark.release
def test_sagemaker_docker_model_scoring_with_default_conda_env(h2o_iris_model, model_path):
mlflow.h2o.save_model(h2o_model=h2o_iris_model.model, path=model_path, conda_env=None)
reloaded_h2o_pyfunc = mlflow.pyfunc.load_pyfunc(model_path)
scoring_response = score_model_in_sagemaker_docker_container(
model_path=model_path,
data=h2o_iris_model.inference_data.as_data_frame(),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
flavor=mlflow.pyfunc.FLAVOR_NAME)
deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content))
pandas.testing.assert_frame_equal(
deployed_model_preds["predict"].to_frame(),
reloaded_h2o_pyfunc.predict(
h2o_iris_model.inference_data.as_data_frame())["predict"].to_frame(),
check_dtype=False,
check_less_precise=6)
| 39.30531
| 100
| 0.738827
|
a2462ba6133a02b0f21a5007596618c5eb968b1a
| 438
|
py
|
Python
|
seg/src/code/predict.py
|
Nick17t/UD-Net
|
fd162f1e495bee3edc43f34b3d892ac007d9f819
|
[
"MIT"
] | 2
|
2022-01-17T08:59:08.000Z
|
2022-02-22T03:37:23.000Z
|
seg/src/code/predict.py
|
Nick17t/UD-Net
|
fd162f1e495bee3edc43f34b3d892ac007d9f819
|
[
"MIT"
] | null | null | null |
seg/src/code/predict.py
|
Nick17t/UD-Net
|
fd162f1e495bee3edc43f34b3d892ac007d9f819
|
[
"MIT"
] | 1
|
2022-02-22T03:32:51.000Z
|
2022-02-22T03:32:51.000Z
|
'''
Created on 2019年4月3日
@author: vcc
'''
import models
from reader import Reader
from process import Process
if __name__ == '__main__':
reader = Reader()
reader.scan_pic('../data/source/test')
# 这里的predictmask为空列表,用于存放模型输出结果
imglist , predictmask = reader.read_boost()
unet = models.unet(pretrained_weights='../product/model/unet_weights_best.h5',batch_norm=True)
predictmask = unet.predict_class(imglist)
| 24.333333
| 98
| 0.723744
|
04fafcf9719039b9d742beeccb09ed87d40127d0
| 2,273
|
py
|
Python
|
src/labster/conftest.py
|
jean3108/labandco
|
4317e7d3875f10d76076ad5fc68c1ba3c12badba
|
[
"Apache-2.0"
] | 2
|
2019-11-11T22:09:58.000Z
|
2020-01-20T19:44:30.000Z
|
src/labster/conftest.py
|
jean3108/labandco
|
4317e7d3875f10d76076ad5fc68c1ba3c12badba
|
[
"Apache-2.0"
] | 15
|
2020-03-31T10:58:37.000Z
|
2022-01-22T09:14:49.000Z
|
src/labster/conftest.py
|
jean3108/labandco
|
4317e7d3875f10d76076ad5fc68c1ba3c12badba
|
[
"Apache-2.0"
] | 2
|
2021-05-28T12:20:24.000Z
|
2021-09-08T11:27:57.000Z
|
"""Configuration and injectable fixtures for Pytest."""
from __future__ import annotations
import logging
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from pytest import fixture
from sqlalchemy.exc import SQLAlchemyError
from typeguard import TypeChecker
from labster.app import create_app
from labster.domain.services.constants import get_initial_constants
from labster.extensions import db as _db
checker = TypeChecker("labster")
if "TYPECHECK" in os.environ:
logging.captureWarnings(True)
if not checker.active:
checker.start()
class TestConfig:
TESTING = True
CSRF_ENABLED = False
MAIL_SENDER = "test@example.com"
MAIL_SUPPRESS_SEND = True
SECRET_KEY = "changeme"
SERVER_NAME = "localhost.localdomain"
SQLALCHEMY_DATABASE_URI = "sqlite://"
@fixture(scope="session")
def app() -> Flask:
"""We usually only create an app once per session."""
return create_app(TestConfig)
@fixture
def app_context(app: Flask):
with app.app_context() as ctx:
yield ctx
@fixture
def request_context(app: Flask):
with app.test_request_context() as ctx:
yield ctx
@fixture
def db(app: Flask) -> SQLAlchemy:
"""Return a fresh db for each test."""
with app.app_context():
cleanup_db(_db)
_db.create_all()
yield _db
_db.session.remove()
cleanup_db(_db)
_db.session.flush()
@fixture
def db_session(db: SQLAlchemy):
"""Kept for historical reasons."""
return db.session
@fixture
def config():
from labster.domain.models.config import Config
DATA = get_initial_constants()
config = Config()
config.data = DATA
return config
@fixture
def client(app, db):
"""Return a Web client, used for testing, bound to a DB session."""
return app.test_client()
@fixture
def injector(app):
from labster.di import injector
return injector
#
# Cleanup utilities
#
def cleanup_db(db):
"""Drop all the tables, in a way that doesn't raise integrity errors."""
for table in reversed(db.metadata.sorted_tables):
try:
db.session.execute(table.delete())
except SQLAlchemyError:
print(f"Failed to delete table {table}")
pass
| 20.294643
| 76
| 0.688957
|
1478334485f12138fc2a2b07a640621cb36a1445
| 464
|
py
|
Python
|
src/utility_lib/number_utilities/calculations.py
|
DonalChilde/utility_lib
|
9cf1cc142e5fcbf99f9f2e9bf6099520cc3eb545
|
[
"MIT"
] | null | null | null |
src/utility_lib/number_utilities/calculations.py
|
DonalChilde/utility_lib
|
9cf1cc142e5fcbf99f9f2e9bf6099520cc3eb545
|
[
"MIT"
] | null | null | null |
src/utility_lib/number_utilities/calculations.py
|
DonalChilde/utility_lib
|
9cf1cc142e5fcbf99f9f2e9bf6099520cc3eb545
|
[
"MIT"
] | null | null | null |
from typing import Union
def percent_difference(foo: Union[int, float], bar: Union[int, float]) -> float:
if bar == 0:
return 0
diff = foo - bar
return diff / bar
def safe_div(num: Union[int, float], denom: Union[int, float]) -> float:
# https://stackoverflow.com/a/27317595/105844
num = float(num)
denom = float(denom)
if not denom:
return 0
# print(f"{num},{denom}")
result = num / denom
return result
| 23.2
| 80
| 0.612069
|
b7e63c3fa457625ba5b4b4fad0e5bc615734d0c2
| 129
|
py
|
Python
|
python-sdk/nuscenes/eval/panoptic/panoptic_seg_evaluator.py
|
tanjiangyuan/Classification_nuScence
|
b94c4b0b6257fc1c048a676e3fd9e71183108d53
|
[
"Apache-2.0"
] | null | null | null |
python-sdk/nuscenes/eval/panoptic/panoptic_seg_evaluator.py
|
tanjiangyuan/Classification_nuScence
|
b94c4b0b6257fc1c048a676e3fd9e71183108d53
|
[
"Apache-2.0"
] | null | null | null |
python-sdk/nuscenes/eval/panoptic/panoptic_seg_evaluator.py
|
tanjiangyuan/Classification_nuScence
|
b94c4b0b6257fc1c048a676e3fd9e71183108d53
|
[
"Apache-2.0"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:dc0a79583dcec8c7dc03e423c5ab679dad899154d99153ca17dcf8c6c0dbf83e
size 7372
| 32.25
| 75
| 0.883721
|
5df244d995b88a581706a6753b29b07a9d0eb3e5
| 815
|
py
|
Python
|
test/test_environments.py
|
Ais105/course_project
|
a4ea3991756be2d12ae3fef9db6956f9d09c0c07
|
[
"MIT"
] | null | null | null |
test/test_environments.py
|
Ais105/course_project
|
a4ea3991756be2d12ae3fef9db6956f9d09c0c07
|
[
"MIT"
] | null | null | null |
test/test_environments.py
|
Ais105/course_project
|
a4ea3991756be2d12ae3fef9db6956f9d09c0c07
|
[
"MIT"
] | 1
|
2020-02-18T20:56:57.000Z
|
2020-02-18T20:56:57.000Z
|
import unittest
import os
class TestEnvironment(unittest.TestCase):
def setUp(self) -> None:
if os.environ.get("user_name1"):
del os.environ['user_name1']
if os.environ.get("password1"):
del os.environ['password1']
def test_environments_absent(self):
with self.assertRaises(KeyError):
user_name = os.environ['user_name1']
with self.assertRaises(KeyError):
password = os.environ['password1']
def test_environments_exist(self):
os.environ["user_name1"] = "cool_user"
os.environ["user_password1"] = "secure_password"
self.assertEqual(os.environ['user_name1'], "cool_user")
self.assertEqual(os.environ['user_password1'], "secure_password")
if __name__ == '__main__':
unittest.main()
| 28.103448
| 73
| 0.646626
|
af43ac8e5a3d55137d11d8405a00e5fa7dad92c1
| 3,579
|
py
|
Python
|
sel_links.py
|
xCodeR01/selenium_testing
|
9ec56db55714cc8392e43cfbbfaceee8b257ca8d
|
[
"MIT"
] | null | null | null |
sel_links.py
|
xCodeR01/selenium_testing
|
9ec56db55714cc8392e43cfbbfaceee8b257ca8d
|
[
"MIT"
] | null | null | null |
sel_links.py
|
xCodeR01/selenium_testing
|
9ec56db55714cc8392e43cfbbfaceee8b257ca8d
|
[
"MIT"
] | null | null | null |
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
# creating chromium driver with custom options
op = webdriver.ChromeOptions()
op.binary_location = "chrome-win\\chrome.exe"
# op.add_argument("--headless")
driver = webdriver.Chrome(
executable_path="drivers\\chromedriver.exe", options=op)
######### links #########
# driver.get("https://duckduckgo.com/?t=ffab&q=scrapy+image+pipelines&ia=web")
# # find all the links on a webpage
# links = driver.find_elements(by=By.TAG_NAME, value='a')
# print('Total number of links found: ', len(links))
# # find links with specific link text and click
# driver.find_element(By.LINK_TEXT, 'DuckDuckGo').click()
# # find links with specific link text using substring of text and click
# driver.back()
# driver.find_element(By.PARTIAL_LINK_TEXT, 'scrapy').click()
######## alerts #######
# driver.get('https://testautomationpractice.blogspot.com/')
# # open the alert using a button on the webpage
# # we can't perofrm actions on alert window as it is not a webelement
# # first switch to the alert window and then accept the alert or cancel it
# driver.find_element(
# By.XPATH, '/html/body/div[4]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[4]/div[2]/div/aside/div/div[2]/div[1]/button').click()
# time.sleep(5)
# driver.switch_to_alert().accept()
# driver.find_element(
# By.XPATH, '/html/body/div[4]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[4]/div[2]/div/aside/div/div[2]/div[1]/button').click()
# time.sleep(5)
# driver.switch_to_alert().dismiss()
######## working with frames ######
# a frame is essentially a seperate window on a webpage, with its own scroll bar if scrollable
# so we can not directly interact with elements in different frames, without switching to a specific frame first
# driver.get(
# "https://www.selenium.dev/selenium/docs/api/java/index.html?overview-summary.html")
# # switch_to.frame(name|id|index) - a driver method to switch between frames using different methods
# driver.switch_to.frame('packageListFrame')
# # now we can normally interact with elements in this frame
# driver.find_element(By.LINK_TEXT, 'org.openqa.selenium.chrome').click()
# time.sleep(3)
# driver.back()
# # now we cannot directly jump to another frame, we first need to go back to main frame
# # and from there we can switch to some other frame, there is once convience method for that
# # instead of manually remembering the default window
# driver.switch_to.default_content()
# driver.switch_to.frame("packageFrame")
# driver.find_element(By.LINK_TEXT, 'Alert').click()
# time.sleep(3)
####### multiple tabs/windows ######
# first window and tab are same, second every widow has a handle that can be used
# to switch to that window, we can perform actions only in current window
driver.get('http://demo.automationtesting.in/Windows.html')
# show the handle of our current focused window
print('current window handle', driver.current_window_handle)
time.sleep(3)
# show the handles of all windows in our browser, firstly let's open a new window
driver.find_element(By.XPATH, '//*[@id="Tabbed"]/a/button').click()
print('all handles', driver.window_handles)
# so once we obtained the window handle, we can switch to it and can fetch like a new page or perform other action
print("current window title:", driver.title)
sec_window_handle = driver.window_handles[1]
driver.switch_to.window(sec_window_handle)
print("switched to second window:", driver.title)
# closes only focued window
driver.close()
| 42.105882
| 139
| 0.736239
|
fddff55e1672d1184fe35ae3c71ba1ce853d2d5b
| 347
|
py
|
Python
|
Algorithms/grading_students.py
|
Sudhir-Pawar/Hackerrank-Codes
|
6996a71a5dbd33e8d347d381f9c0c458717abc96
|
[
"MIT"
] | 70
|
2020-10-04T09:23:15.000Z
|
2022-02-01T09:44:39.000Z
|
Algorithms/grading_students.py
|
Sudhir-Pawar/Hackerrank-Codes
|
6996a71a5dbd33e8d347d381f9c0c458717abc96
|
[
"MIT"
] | 148
|
2020-06-05T15:32:12.000Z
|
2020-11-01T08:29:01.000Z
|
Algorithms/grading_students.py
|
Sudhir-Pawar/Hackerrank-Codes
|
6996a71a5dbd33e8d347d381f9c0c458717abc96
|
[
"MIT"
] | 298
|
2020-10-04T04:27:01.000Z
|
2022-03-07T04:02:59.000Z
|
"""
Name : Grading Students
Category : Implementation
Difficulty : Easy
Language : Python3
Question Link : https://www.hackerrank.com/challenges/grading/problem
"""
n = int(input().strip())
for _ in range(n):
x = int(input().strip())
if x >= 38 and x % 5 > 2:
while x % 5 != 0: x += 1
print(x)
| 20.411765
| 69
| 0.559078
|
7ba57923aa84795cb0a0d9bc17e0c955f8722364
| 343
|
py
|
Python
|
sdk/python/pulumi_gcp/folder/__init__.py
|
vrutkovs/pulumi-gcp
|
ced632feea265d95a38c7ae02826deaae77fb358
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/folder/__init__.py
|
vrutkovs/pulumi-gcp
|
ced632feea265d95a38c7ae02826deaae77fb358
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/folder/__init__.py
|
vrutkovs/pulumi-gcp
|
ced632feea265d95a38c7ae02826deaae77fb358
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .iam_binding import *
from .iam_member import *
from .iam_policy import *
from .organization_policy import *
| 34.3
| 87
| 0.731778
|
7392b6d5a18afb229a000af4e2eb8f767ab9afcd
| 2,691
|
py
|
Python
|
pybot/whatsapp/feature/BaseFeature.py
|
iren86/whatsapp-pybot
|
28f0d855f53090cb9a0c88bdbca29f6f93efc95e
|
[
"MIT"
] | 4
|
2017-08-15T10:04:11.000Z
|
2019-09-01T08:28:42.000Z
|
pybot/whatsapp/feature/BaseFeature.py
|
iren86/whatsapp-pybot
|
28f0d855f53090cb9a0c88bdbca29f6f93efc95e
|
[
"MIT"
] | null | null | null |
pybot/whatsapp/feature/BaseFeature.py
|
iren86/whatsapp-pybot
|
28f0d855f53090cb9a0c88bdbca29f6f93efc95e
|
[
"MIT"
] | 2
|
2019-08-27T19:14:03.000Z
|
2019-10-12T21:50:03.000Z
|
# -*- coding: utf-8 -*-
from random import uniform
from time import sleep
from selenium.common.exceptions import NoSuchElementException
from pybot.whatsapp.driver import ChromeFactory
from pybot.whatsapp.util.AppUtil import new_pagesource_path
from pybot.whatsapp.util.AppUtil import new_screenshot_path
RANDOM_SLEEP_BETWEEN_REQUESTS_START = 0.5
RANDOM_SLEEP_BETWEEN_REQUESTS_END = 2.0
RANDOM_SLEEP_BETWEEN_SEND_KEYS_START = 0.05
RANDOM_SLEEP_BETWEEN_SEND_KEYS_END = 0.15
class BaseFeature(object):
"""Base class to initialize the base feature that will be called from all features"""
def __init__(self, driver):
self.driver = driver
def disable_leave_page_popup(self):
'''
Disable popup saying "Are you sure?
The page is asking you to confirm that you want to leave - data entered will be lost."
with 2 buttons: Leave Page and Stay on Page
:return:
'''
# firefox version
self.driver.execute_script("window.onbeforeunload = function(e){};")
# chrome version
self.driver.execute_script('$(window).unbind("beforeunload");')
def is_valid(self, validator, message, *args):
screenshot_path = new_screenshot_path()
pagesource_path = new_pagesource_path()
try:
if args is not None:
assert validator(*args), message
else:
assert validator(), message
except Exception:
self.driver.make_screenshot(screenshot_path)
self.driver.save_page_source(pagesource_path)
raise
def is_element_exists(self, locator):
assert len(locator) == 2, "Locator must consists from By expression (0) and element locator (1)"
try:
self.driver.find_element(locator[0], locator[1])
except NoSuchElementException:
# driver.find_element raise only NoSuchElementException
return False
return True
def random_sleep_between_requests(self):
'''
Random delays between the requests to avoid getting blocked
'''
sleep(uniform(RANDOM_SLEEP_BETWEEN_REQUESTS_START, RANDOM_SLEEP_BETWEEN_REQUESTS_END))
def random_sleep_send_keys(self, field, text):
'''
Type letter by letter to avoid getting blocked
'''
assert field, "Field must be provided"
assert text, "Text must be provided"
for letter in text:
field.send_keys(letter)
sleep(uniform(RANDOM_SLEEP_BETWEEN_SEND_KEYS_START, RANDOM_SLEEP_BETWEEN_SEND_KEYS_END))
def get_request_timeout_in_sec(self):
return ChromeFactory.REQUEST_TIMEOUT_IN_SEC
| 32.421687
| 104
| 0.680045
|
64ab14d58f430b2f32607171289e7ad1bbaf7b81
| 576
|
py
|
Python
|
slack.py
|
jcooper-korg/talon_user
|
ef086f9890448f7d633a4f02b36a18de853581a8
|
[
"0BSD"
] | 1
|
2018-09-22T22:34:35.000Z
|
2018-09-22T22:34:35.000Z
|
slack.py
|
jcooper-korg/talon_user
|
ef086f9890448f7d633a4f02b36a18de853581a8
|
[
"0BSD"
] | null | null | null |
slack.py
|
jcooper-korg/talon_user
|
ef086f9890448f7d633a4f02b36a18de853581a8
|
[
"0BSD"
] | null | null | null |
# from https://github.com/JonathanNickerson/talon_voice_user_scripts
# jsc added smileys
from talon.voice import Context, Key
ctx = Context('slack', bundle='com.tinyspeck.slackmacgap')
keymap = {
#'channel': Key('cmd-k'),
'channel up': Key('alt-up'),
'channel down': Key('alt-down'),
'(highlight command | insert command)': ['``', Key('left')],
'(highlight code | insert code)': ['``````', Key('left left left')],
# jsc: added smileys
'thumbs up': ':+1:',
'smiley': ':slightly_smiling_face:',
'laugh out loud': ':joy:',
}
ctx.keymap(keymap)
| 26.181818
| 72
| 0.626736
|
1e8766665c804f994c02066e8487c88aaf7f3ab8
| 56
|
py
|
Python
|
server.py
|
jvdgoltz/mini-bot
|
ca51f37bfd8d53eaedac4df4ebba6be6ed0eeacf
|
[
"MIT"
] | null | null | null |
server.py
|
jvdgoltz/mini-bot
|
ca51f37bfd8d53eaedac4df4ebba6be6ed0eeacf
|
[
"MIT"
] | null | null | null |
server.py
|
jvdgoltz/mini-bot
|
ca51f37bfd8d53eaedac4df4ebba6be6ed0eeacf
|
[
"MIT"
] | null | null | null |
# Here we make the server for a remote controlled robot
| 28
| 55
| 0.785714
|
e00794b5dc2a2d1580aae20ee39377a162eafa4c
| 4,270
|
py
|
Python
|
tools/mqtt_bruteforce.py
|
galletitaoreo/PythonPentest
|
abd46cc251abfc3fba02d7f03ddd049803dc6047
|
[
"MIT"
] | 5
|
2019-08-07T08:59:53.000Z
|
2021-05-14T19:35:57.000Z
|
tools/mqtt_bruteforce.py
|
Harusenpai/PythonPentest
|
abd46cc251abfc3fba02d7f03ddd049803dc6047
|
[
"MIT"
] | null | null | null |
tools/mqtt_bruteforce.py
|
Harusenpai/PythonPentest
|
abd46cc251abfc3fba02d7f03ddd049803dc6047
|
[
"MIT"
] | 2
|
2021-03-31T21:20:19.000Z
|
2021-08-28T04:21:12.000Z
|
#!/usr/bin/env python2
# Stupid MQTT Brute Forcer
# Coded by Alex. Twitter: @_tmp0
# https://github.com/zombiesam/joffrey/blob/master/joffrey-BH-2017.py
# Shoutout to ch3rn0byl, arch4y, evlb, acidgen and h4v0k
# Todo: try on broker with tls. make stuff pretty?
try:
import paho.mqtt.client as mqtt
except:
print '[!] Could not import paho. Install it genius.'
quit(42)
from time import sleep
from random import randint
import optparse, threading
import sys
reload(sys)
sys.setdefaultencoding('utf8')
parser = optparse.OptionParser('Usage: python %s [ARGS]' % sys.argv[0])
parser.add_option('-t', dest='target', type='string', help='Target domain or ip to invade')
parser.add_option('-p', dest='port', type='int', help='Target port (optional)')
parser.add_option('--threads', dest='nrThreads', type='int', help='Amount of threads for the King to do as he please with')
parser.add_option('-u', dest='username', type='string', help='Specify username')
parser.add_option('-w', dest='wordlist', type='string', help='Path to wordlist')
(options, args) = parser.parse_args()
target = options.target
port = options.port
nrThreads = options.nrThreads
username = options.username
wordlist = options.wordlist
if target == None or username == None or wordlist == None:
print parser.print_help()
quit(42)
if nrThreads == None:
nrThreads = 1
print '[*] Thread argv not supplied, setting threads to 1'
if port == None:
# TLS@8883
port = 1883
print '[*] TARGET => ' + target
print '[*] PORT => ' + str(port)
print '[*] THREADS => ' + str(nrThreads)
print '[*] USERNAME => ' + username
print '[*] WORDLIST => ' + wordlist
class bigslap(threading.Thread):
def __init__(self, target, port, username, pArray):
threading.Thread.__init__(self)
self.target = target
self.port = port
self.target_username = username
self.pArray = pArray
self.hearteater = mqtt.Client('C%d' % (randint(1,1000)))
self.p_id = False
def on_connect(self, c, u, f, rc):
if rc == 0: # rc 0 equals successful login, rest equals trouble
self.p_id = True
def run(self):
global pFound
for passwd in self.pArray:
if pFound == True:
return
self.hearteater.username_pw_set(username=self.target_username, password=passwd)
self.hearteater.on_connect = self.on_connect
self.hearteater.connect(self.target)
self.hearteater.loop_start()
sleep(1)
try:
self.hearteater.disconnect()
self.hearteater.loop_stop()
except:
pass
if self.p_id == True:
print '[+] Username: %s\n[+] Password: %s' % (self.target_username, passwd)
pFound = True
break
del self.hearteater
with open(wordlist) as f:
lenWordlist = sum(1 for line in f)
print '[*] Parsed %d passwords from %s' %(lenWordlist, wordlist)
thread_counter = 0
i = 1
wList_counter = 1
wList_total = 0
wList = []
global pFound
pFound = False
bEOF = False
print '[*] Hearteater will try to strike true!'
with open(wordlist) as infile:
for line in infile:
if pFound == True:
break
wList.append(line.strip('\n'))
if wList_counter == 10:
wList_total += wList_counter
t = bigslap(target, port, username, wList)
# t.setDaemon(True)
t.start()
del wList
wList = []
thread_counter += 1
wList_counter = 0
if thread_counter == nrThreads and bEOF == False:
t.join()
thread_counter = 0
if i == lenWordlist:
bEOF = True
wList_total += wList_counter
t = bigslap(target, port, username, wList)
t.setDaemon(True)
t.start()
t.join()
sys.stdout.write(' > %d/%d stabs\r' % (wList_total, lenWordlist))
sys.stdout.flush()
i += 1
wList_counter += 1
t.join()
if not pFound:
print '[*] Argh.. shouldn\'t had drunk that wine...'
else:
print '[*] Took a good %d stabs to find the heart!' % wList_total
print '[*] Long live the king!'
| 30.283688
| 123
| 0.60726
|
1c4e01f194711f376c920698342f053e2c7946fb
| 5,066
|
py
|
Python
|
butterfly/measurement.py
|
bwprice/butterfly-wings
|
18b138e7a907e372029b9c8e927bd06882bac964
|
[
"BSD-3-Clause"
] | 1
|
2020-05-30T23:30:08.000Z
|
2020-05-30T23:30:08.000Z
|
butterfly/measurement.py
|
bwprice/butterfly-wings
|
18b138e7a907e372029b9c8e927bd06882bac964
|
[
"BSD-3-Clause"
] | null | null | null |
butterfly/measurement.py
|
bwprice/butterfly-wings
|
18b138e7a907e372029b9c8e927bd06882bac964
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from joblib import Memory
location = './cachedir'
memory = Memory(location, verbose=0)
@memory.cache()
def main(points_interest, T_space, axes=None):
''' Calculates the length and draws the lines for length
of the butterfly wings.
Parameters
----------
ax: array
the array containing the 3 intermediary Axes.
points_interest: dictionary
dictionary containing the points of interest in the form [y, x],
keyed with "outer_pix_l", "inner_pix_l", "outer_pix_r", "inner_pix_r",
"body_center"
T_space: float
number of pixels between 2 ticks.
Returns
-------
ax: ax
an ax object
dst_pix: dictionary
dictionary containing measurements in pixels, keyed with
"dist_l", "dist_r", "dist_l_center",
"dist_r_center", "dist_span"
dst_mm: tuple
dictionary containing measurements in mm, keyed with
the same keys as dst_pix
'''
# Extract points of interest
pix_out_l, pix_out_r = np.array(points_interest["outer_pix_l"]), np.array(points_interest["outer_pix_r"])
pix_in_l, pix_in_r = np.array(points_interest["inner_pix_l"]), np.array(points_interest["inner_pix_r"])
body_center = np.array(points_interest["body_center"])
# Distance measurements between points of interest
dist_r_pix = np.linalg.norm(pix_out_r - pix_in_r)
dist_l_pix = np.linalg.norm(pix_out_l - pix_in_l)
dist_r_center_pix = np.linalg.norm(pix_out_r - body_center)
dist_l_center_pix = np.linalg.norm(pix_out_l - body_center)
dist_span_pix = np.linalg.norm(pix_out_l - pix_out_r)
# Converting to millimeters
dist_l_mm = round(dist_l_pix / T_space, 2)
dist_r_mm = round(dist_r_pix / T_space, 2)
dist_l_center_mm = round(dist_l_center_pix / T_space, 2)
dist_r_center_mm = round(dist_r_center_pix / T_space, 2)
dist_span_mm = round(dist_span_pix / T_space, 2)
# Do we want to round these?
dist_l_pix = round(dist_l_pix, 2)
dist_r_pix = round(dist_r_pix, 2)
dist_l_center_pix = round(dist_l_center_pix, 2)
dist_r_center_pix = round(dist_r_center_pix, 2)
dist_span_pix = round(dist_span_pix, 2)
dist_pix = {
"dist_l": dist_l_pix,
"dist_r": dist_r_pix,
"dist_l_center": dist_l_center_pix,
"dist_r_center": dist_r_center_pix,
"dist_span": dist_span_pix
}
dist_mm = {
"dist_l": dist_l_mm,
"dist_r": dist_r_mm,
"dist_l_center": dist_l_center_mm,
"dist_r_center": dist_r_center_mm,
"dist_span": dist_span_mm
}
if axes and axes[0]:
textsize = 5
if axes[3]:
textsize = 3
axes[0].plot([pix_out_l[1], pix_in_l[1]],
[pix_out_l[0], pix_in_l[0]], color='r')
axes[0].plot([pix_out_r[1], pix_in_r[1]],
[pix_out_r[0], pix_in_r[0]], color='r')
axes[0].text(int((pix_out_l[1] + pix_in_l[1]) / 2) + 50,
int((pix_out_l[0] + pix_in_l[0]) / 2) - 50,
'left_wing = ' + str(round(dist_l_mm, 2)) + ' mm',
size=textsize,
color='r')
axes[0].text(int((pix_out_r[1] + pix_in_r[1]) / 2) + 50,
int((pix_out_r[0] + pix_in_r[0]) / 2) + 50,
'right_wing = ' + str(round(dist_r_mm, 2))
+ ' mm',
size=textsize, color='r')
axes[0].plot([pix_out_l[1], body_center[1]],
[pix_out_l[0], body_center[0]], color='orange', linestyle='dotted')
axes[0].plot([pix_out_r[1], body_center[1]],
[pix_out_r[0], body_center[0]], color='orange', linestyle='dotted')
axes[0].text(int((pix_out_l[1] + body_center[1]) / 2) + 50,
int((pix_out_l[0] + body_center[0]) / 2) - 50,
'left_wing_center = ' + str(round(dist_l_center_mm, 2)) + ' mm',
size=textsize,
color='orange')
axes[0].text(int((pix_out_r[1] + body_center[1]) / 2) + 50,
int((pix_out_r[0] + body_center[0]) / 2) + 50,
'right_wing_center = ' + str(round(dist_r_center_mm, 2))
+ ' mm',
size=textsize, color='orange')
axes[0].plot([pix_out_l[1], pix_out_r[1]],
[pix_out_l[0], pix_out_r[0]], color='orange', linestyle='dashed')
axes[0].text(int((pix_out_l[1] + pix_out_r[1]) / 2) - 50,
int((pix_out_l[0] + pix_out_r[0]) / 2) - 50,
'wing_span = ' + str(round(dist_span_mm, 2))
+ ' mm',
size=textsize, color='orange')
print(f'left_wing : {dist_mm["dist_l"]} mm')
print(f'right_wing : {dist_mm["dist_r"]} mm')
print(f'left_wing_center : {dist_mm["dist_l_center"]} mm')
print(f'right_wing_center : {dist_mm["dist_r_center"]} mm')
print(f'wing_span : {dist_mm["dist_span"]} mm')
return dist_pix, dist_mm
| 39.271318
| 109
| 0.580932
|
d63d7b88c7b4bc182b2a7534431d84d42e38cdba
| 1,974
|
py
|
Python
|
player.py
|
king2b3/ow_team_picker
|
1d1bdc638df7870d92eaaab7225f36bdaf1f06e8
|
[
"MIT"
] | null | null | null |
player.py
|
king2b3/ow_team_picker
|
1d1bdc638df7870d92eaaab7225f36bdaf1f06e8
|
[
"MIT"
] | null | null | null |
player.py
|
king2b3/ow_team_picker
|
1d1bdc638df7870d92eaaab7225f36bdaf1f06e8
|
[
"MIT"
] | null | null | null |
from main import roles
import random
class Player():
def __init__(self, name:str, roles:dict, heroes:dict):
"""
Parameters:
name: str
name of the player, which will be printed out
roles: dict
holds the role preferences for the hero
heroes: dict
holds the hero preferences for this player
"""
...
self.name = name
self.map1 = None
self.map2 = None
self.map3 = None
self.map4 = None
self.roles = roles
self.heroes = heroes
self.maps_played = 0
self.tank = roles["tank"]
self.dps = roles["dps"]
self.support = roles["support"]
# makes roulette wheel for heroes
self.heroes_list = []
for hero in self.heroes.keys():
for i in range(self.heroes[hero]):
self.heroes_list.append(hero)
# makes roulette wheel for roles
self.roles_list = []
for role in self.roles.keys():
for i in range(self.roles[role]):
self.roles_list.append(role)
def returnPlayer(self, role) -> str:
"""Returns a hero choice off of preference
"""
temp_hero = None
while temp_hero not in roles[role]:
temp_hero = random.choice(self.heroes_list)
return temp_hero
def main():
import GOML
import random
from main import roles
# choses role
t = Player("logro", GOML.LOGRO_ROLES, GOML.LOGRO_HEROES)
temp_role = random.choice(t.roles_list)
print(f"role picked {temp_role}")
print(t.returnPlayer(temp_role))
map1 = {
"MT": Player("logro", GOML.LOGRO_ROLES, GOML.LOGRO_HEROES),
"OT": None,
"HS": None,
"PJ": None,
"MS": None,
"FS": None
}
print(map1["MT"].name)
if __name__ == "__main__":
main()
| 24.37037
| 67
| 0.537487
|
534892435f2ee30a8f2d556124954249bbde3a42
| 6,529
|
py
|
Python
|
vnet_manager/environment/lxc.py
|
ppartarr/vnet-manager
|
e7e8dfc9014c98f34bce639f48e0baa603d83b67
|
[
"MIT"
] | null | null | null |
vnet_manager/environment/lxc.py
|
ppartarr/vnet-manager
|
e7e8dfc9014c98f34bce639f48e0baa603d83b67
|
[
"MIT"
] | null | null | null |
vnet_manager/environment/lxc.py
|
ppartarr/vnet-manager
|
e7e8dfc9014c98f34bce639f48e0baa603d83b67
|
[
"MIT"
] | null | null | null |
import shlex
from logging import getLogger
from time import sleep
from vnet_manager.operations.image import check_if_lxc_image_exists, create_lxc_image_from_container
from vnet_manager.operations.profile import check_if_lxc_profile_exists, create_vnet_lxc_profile, delete_vnet_lxc_profile
from vnet_manager.operations.storage import check_if_lxc_storage_pool_exists, create_lxc_storage_pool, delete_lxc_storage_pool
from vnet_manager.operations.machine import create_lxc_base_image_container, change_lxc_machine_status, destroy_lxc_machine
from vnet_manager.environment.host import check_for_supported_os, check_for_installed_packages
from vnet_manager.providers.lxc import get_lxd_client
from vnet_manager.conf import settings
from vnet_manager.utils.user import request_confirmation
logger = getLogger(__name__)
def ensure_vnet_lxc_environment(config):
"""
Checks and creates the LXC environment
param: dict config: The config created by get_config()
:raises RuntimeError: If unsupported OS, or missing packages
"""
# Check if there are any LXC machines in the config
if "lxc" not in [settings.MACHINE_TYPE_PROVIDER_MAPPING[machine["type"]] for machine in config["machines"].values()]:
logger.debug("Skipping LXC environment creation, no LXC machines in config")
return
# Check if we are on a supported OS
if not check_for_supported_os(config, "lxc"):
logger.critical("Unable to create LXC environment on your machine, OS not supported")
raise RuntimeError("OS not supported for provider LXC")
# Check if all required packages have been installed
if not check_for_installed_packages(config, "lxc"):
logger.critical("Not all required host packages seem to be installed, please fix this before proceeding")
raise RuntimeError("Missing host packages")
# Check if the storage pool exists
if not check_if_lxc_storage_pool_exists(settings.LXC_STORAGE_POOL_NAME):
logger.info("VNet LXC storage pool does not exist, creating it")
create_lxc_storage_pool(name=settings.LXC_STORAGE_POOL_NAME, driver=settings.LXC_STORAGE_POOL_DRIVER)
else:
logger.debug("VNet LXC storage pool {} found".format(settings.LXC_STORAGE_POOL_NAME))
# Check if the profile exists
if not check_if_lxc_profile_exists(settings.LXC_VNET_PROFILE):
logger.info("VNet LXC profile does not exist, creating it")
create_vnet_lxc_profile(settings.LXC_VNET_PROFILE)
else:
logger.debug("VNet profile {} found".format(settings.LXC_VNET_PROFILE))
# Check if the base image exists
if not check_if_lxc_image_exists(settings.LXC_BASE_IMAGE_ALIAS, by_alias=True):
logger.info("Base image does not exist, creating it")
create_lxc_base_image_container(config)
change_lxc_machine_status(settings.LXC_BASE_IMAGE_MACHINE_NAME, status="start")
configure_lxc_base_machine(config)
create_lxc_image_from_container(settings.LXC_BASE_IMAGE_MACHINE_NAME, alias=settings.LXC_BASE_IMAGE_ALIAS)
destroy_lxc_machine(settings.LXC_BASE_IMAGE_MACHINE_NAME, wait=False)
else:
logger.debug("Base image {} found".format(settings.LXC_BASE_IMAGE_ALIAS))
def cleanup_vnet_lxc_environment():
"""
Cleans up specific VNet LXC configuration
No environments should be active when calling this function
"""
request_confirmation(message="Cleanup will delete the VNet LXC configurations, such as profile and storage pools")
logger.info("Cleaning up VNet LXC configuration")
delete_vnet_lxc_profile(settings.LXC_VNET_PROFILE)
delete_lxc_storage_pool(settings.LXC_STORAGE_POOL_NAME)
def configure_lxc_base_machine(config):
"""
Configure the LXC base machine to get a fully functional VNet base machine which we can make an image from
:param dict config: The config generated by get_config()
:raises RuntimeError: If the base machine is started without networking/dns
"""
logger.info("Configuring LXC base machine {}, this might take a while".format(settings.LXC_BASE_IMAGE_MACHINE_NAME))
client = get_lxd_client()
machine = client.containers.get(settings.LXC_BASE_IMAGE_MACHINE_NAME)
def execute_and_log(command, **kwargs):
result = machine.execute(shlex.split(command), **kwargs)
logger.debug(result)
return result
# Check for DNS
logger.debug("Checking for DNS connectivity")
dns = False
for _ in range(0, settings.LXC_MAX_STATUS_WAIT_ATTEMPTS):
if execute_and_log("host -t A google.com")[0] == 0:
dns = True
break
# No DNS connectivity (yet), try again
sleep(2)
if not dns:
# Shutdown base if DNS check fails
logger.debug("Stopping base machine")
machine.stop()
raise RuntimeError("Base machine started without working DNS, unable to continue")
# Set the FRR routing source and key
execute_and_log("bash -c 'curl -s https://deb.frrouting.org/frr/keys.asc | apt-key add'")
execute_and_log(
"bash -c 'echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) {} | tee -a /etc/apt/sources.list.d/frr.list'".format(
settings.FRR_RELEASE
)
)
# Update and install packages
execute_and_log("apt-get update")
execute_and_log(
"apt-get upgrade -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold'",
environment={"DEBIAN_FRONTEND": "noninteractive"},
)
execute_and_log(
"apt-get install -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' {}".format(
" ".join(config["providers"]["lxc"]["guest_packages"])
),
environment={"DEBIAN_FRONTEND": "noninteractive"},
)
# Disable radvd by default
execute_and_log("systemctl disable radvd")
# Disable cloud init messing with our networking
execute_and_log("bash -c 'echo network: {config: disabled} > /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg'")
# Set the default VTYSH_PAGER
execute_and_log("bash -c 'export VTYSH_PAGER=more >> ~/.bashrc'")
# Make all files in the FRR dir owned by the frr user
execute_and_log(
"bash -c 'echo -e \"#!/bin/bash\nchown -R frr:frr /etc/frr\nsystemctl restart frr\" > /etc/rc.local; chmod +x /etc/rc.local'"
)
# All done, stop the container
machine.stop(wait=True)
logger.debug("LXC base machine {} successfully configured".format(settings.LXC_BASE_IMAGE_MACHINE_NAME))
| 46.971223
| 133
| 0.730433
|
3458440e7755a643a13050e863d2f373f9ac62d3
| 1,116
|
py
|
Python
|
bff.py
|
jsabak/advanved_python_for_testers
|
fc55ca4d5d5ad7beebee489193e251a16c3e956b
|
[
"MIT"
] | 1
|
2020-04-21T20:47:07.000Z
|
2020-04-21T20:47:07.000Z
|
bff.py
|
mkusz/advanced_python_for_testers
|
fc55ca4d5d5ad7beebee489193e251a16c3e956b
|
[
"MIT"
] | null | null | null |
bff.py
|
mkusz/advanced_python_for_testers
|
fc55ca4d5d5ad7beebee489193e251a16c3e956b
|
[
"MIT"
] | 1
|
2019-11-27T21:32:51.000Z
|
2019-11-27T21:32:51.000Z
|
(lambda _:[{1:lambda:[0for _['l']in[[i for(i,x)in enumerate(_['P'])if x=='[']]]and[0for _['i']in[_['i']+1]],2:lambda:[0for _['r']in[[i for(i,x)in enumerate(_['P']) if x==']'][::-1]]]and[0for _['i']in[_['i']+1]],3:lambda:_['c'].close(),'>':lambda:[0for _['p']in[_['p']+1]]and[0for _['i']in[_['i']+1]],'<':lambda:[0for _['p']in[_['p']-1]]and[0for _['i']in[_['i']+1]],'+':lambda:[0for _['m'][_['p']]in[(_['m'][_['p']]+1)]]and[0for _['i']in[_['i']+1]],'-':lambda:[0for _['m'][_['p']]in[(_['m'][_['p']]-1)]]and[0for _['i']in[_['i']+1]],'.':lambda:print(chr(_['m'][_['p']]),end='') or [0for _['i']in[_['i']+1]],',':lambda:[0for _['m'][_['p']]in[ord(input())]]and[0for _['i']in[_['i']+1]],'[':lambda:[0for _['i']in[_['i']+1 if _['m'][_['p']] else _['r'][_['l'].index(_['i'])]+1]],']':lambda:[0for _['i']in[_['l'][_['r'].index(_['i'])]]]}[_['P'][_['i']]]() for c in _['c'] if _['P'][_['i']]in[1,2,3,'>','<','+','-','.',',','[',']']])({'p':0,'m':[0]*100,'P':[1,2]+list('++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.+++++++..+++.>++.<<+++++++++++++++.>.+++.------.--------.>+.>.')+[3],'i':0,'c':(c for c in range(10**8))})
| 1,116
| 1,116
| 0.401434
|
852c7fa73251d81111ad4f68bd8ab53c34c7d3ac
| 2,738
|
py
|
Python
|
api/tests/test_auth.py
|
I-am-vishalmaurya/django_movie_recommender
|
7c4bbdc7cc8ee2b279c3fb1b3213c92f9d030bd7
|
[
"Apache-2.0"
] | null | null | null |
api/tests/test_auth.py
|
I-am-vishalmaurya/django_movie_recommender
|
7c4bbdc7cc8ee2b279c3fb1b3213c92f9d030bd7
|
[
"Apache-2.0"
] | null | null | null |
api/tests/test_auth.py
|
I-am-vishalmaurya/django_movie_recommender
|
7c4bbdc7cc8ee2b279c3fb1b3213c92f9d030bd7
|
[
"Apache-2.0"
] | null | null | null |
from .tests_setup import TestSetup
from ..models import CustomUser
class TestAuth(TestSetup):
def test_user_cannot_register_with_no_data(self):
response = self.client.post(self.register_url)
self.assertEqual(response.status_code, 400)
def test_user_can_register_successfully(self):
response = self.client.post(self.register_url, data=self.user_data, format='json')
self.assertEqual(response.data['email'], self.user_data['email'])
self.assertEqual(response.data['username'], self.user_data['username'])
self.assertEqual(response.status_code, 201)
def test_user_cannot_register_with_existing_email(self):
response = self.client.post(self.register_url, data=self.user_data, format='json')
self.assertEqual(response.status_code, 201)
response2 = self.client.post(self.register_url, data=self.user_data, format='json')
self.assertEqual(response2.status_code, 400)
def test_user_cannot_register_with_existing_username(self):
response = self.client.post(self.register_url, data=self.user_data, format='json')
self.assertEqual(response.status_code, 201)
response2 = self.client.post(self.register_url, data=self.user_data, format='json')
self.assertEqual(response2.status_code, 400)
def test_user_cannot_login_with_no_data(self):
response = self.client.post(self.login_url)
self.assertEqual(response.status_code, 400)
def test_user_cannot_login_with_wrong_data(self):
register = self.client.post(self.register_url, data=self.user_data, format='json')
self.assertEqual(register.status_code, 201)
wrong_email = self.client.post(self.login_url, data=self.wrong_email_data, format='json')
self.assertEqual(wrong_email.status_code, 400)
wrong_password = self.client.post(self.login_url, data=self.wrong_password_data, format='json')
self.assertEqual(wrong_password.status_code, 400)
def test_user_cannot_login_with_unverified_email(self):
register = self.client.post(self.register_url, data=self.user_data, format='json')
self.assertEqual(register.status_code, 201)
response = self.client.post(self.login_url, data=self.user_data, format='json')
self.assertEqual(response.status_code, 400)
def test_user_can_login_successfully(self):
self.client.post(self.register_url, data=self.user_data, format='json')
# Verify the email
user = CustomUser.objects.get(email=self.user_data['email'])
user.is_active = True
user.save()
response = self.client.post(self.login_url, data=self.user_data, format='json')
self.assertEqual(response.status_code, 200)
| 49.781818
| 103
| 0.728269
|
236e168eaf442d1432e3a0fcc9d16cc12205d251
| 380
|
py
|
Python
|
composition/__init__.py
|
wilsonify/music21-tools
|
78cf5404c1bf5e4ab8b4d5b7b6c76e253d48c8ee
|
[
"BSD-3-Clause"
] | 29
|
2018-02-07T09:08:22.000Z
|
2021-06-14T07:38:25.000Z
|
composition/__init__.py
|
wilsonify/music21-tools
|
78cf5404c1bf5e4ab8b4d5b7b6c76e253d48c8ee
|
[
"BSD-3-Clause"
] | 2
|
2018-05-09T16:45:11.000Z
|
2019-08-07T20:23:03.000Z
|
composition/__init__.py
|
wilsonify/music21-tools
|
78cf5404c1bf5e4ab8b4d5b7b6c76e253d48c8ee
|
[
"BSD-3-Clause"
] | 12
|
2018-06-07T14:36:46.000Z
|
2021-09-01T11:17:06.000Z
|
# -*- coding: utf-8 -*-
"""
Files in this package relate to aiding in composition
"""
__all__ = ['arvo', 'phasing', 'seeger'] # leave off aug30 for now
from music21.demos.composition import arvo
from music21.demos.composition import phasing
from music21.demos.composition import seeger
#------------------------------------------------------------------------------
# eof
| 21.111111
| 79
| 0.571053
|
fff65161dee82c74f9f91cf30b8ee934853da4fc
| 1,539
|
py
|
Python
|
fluentcms_emailtemplates/tests/test_replace_fields.py
|
django-fluent/fluentcms-emailtemplates
|
29f032dab9f60d05db852d2a1adcbd16e18017d1
|
[
"Apache-2.0"
] | 4
|
2018-01-08T11:00:32.000Z
|
2020-05-13T15:21:03.000Z
|
fluentcms_emailtemplates/tests/test_replace_fields.py
|
django-fluent/fluentcms-emailtemplates
|
29f032dab9f60d05db852d2a1adcbd16e18017d1
|
[
"Apache-2.0"
] | 2
|
2018-01-11T04:20:50.000Z
|
2018-01-22T21:42:22.000Z
|
fluentcms_emailtemplates/tests/test_replace_fields.py
|
django-fluent/fluentcms-emailtemplates
|
29f032dab9f60d05db852d2a1adcbd16e18017d1
|
[
"Apache-2.0"
] | 2
|
2018-01-08T08:18:54.000Z
|
2018-01-19T14:02:15.000Z
|
from unittest import TestCase
from fluentcms_emailtemplates.rendering import replace_fields
class ReplaceFieldsTests(TestCase):
"""
Test how replace fields syntax works.
It emulates a subset of str.format()
and supports missing fields
"""
def test_replace_scalar(self):
result = replace_fields('Hello {subject}!', {'subject': "TEST"}, errors='raise')
self.assertEqual("Hello TEST!", result)
result = replace_fields('Hello {aa} or {bb} and others', {'aa': 11, 'bb': 22}, errors='raise')
self.assertEqual("Hello 11 or 22 and others", result)
def test_replace_scalar_format(self):
result = replace_fields('Hello {subject:s}!', {'subject': "TEST"}, errors='raise')
self.assertEqual("Hello TEST!", result)
result = replace_fields('Hello {aa:.02f}', {'aa': 1.5}, errors='raise')
self.assertEqual("Hello 1.50", result)
def test_replace_object(self):
class Foo(object):
def __init__(self):
self.x = 2
result = replace_fields('test {foo.x}!', {'foo': Foo()}, errors='raise')
self.assertEqual("test 2!", result)
def test_replace_invalid(self):
result = replace_fields('Hello {aa} or {bb} and others', {'aa': 11}, errors='inline')
self.assertEqual("Hello 11 or !!missing bb!! and others", result)
def test_replace_unicode(self):
result = replace_fields(u'Hello {aa} \xe9', {'aa': u'\xf6'}, errors='inline')
self.assertEqual(u"Hello \xf6 \xe9", result)
| 37.536585
| 102
| 0.630929
|
ff0dfb2c88db47e9a0a7eb8e88409370f4c412a3
| 33,133
|
py
|
Python
|
python/ccxt/async/bitfinex.py
|
bilibilihuangyifan/ccxt
|
e3e058067808014c7110813d626d8a076ce96edb
|
[
"MIT"
] | 73
|
2018-05-15T00:53:50.000Z
|
2022-03-07T14:45:11.000Z
|
python/ccxt/async/bitfinex.py
|
bilibilihuangyifan/ccxt
|
e3e058067808014c7110813d626d8a076ce96edb
|
[
"MIT"
] | 20
|
2018-05-15T08:46:45.000Z
|
2018-06-19T08:49:27.000Z
|
python/ccxt/async/bitfinex.py
|
bilibilihuangyifan/ccxt
|
e3e058067808014c7110813d626d8a076ce96edb
|
[
"MIT"
] | 11
|
2018-05-15T00:09:30.000Z
|
2022-03-07T14:45:27.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.base.exchange import Exchange
import base64
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitfinex (Exchange):
def describe(self):
return self.deep_extend(super(bitfinex, self).describe(), {
'id': 'bitfinex',
'name': 'Bitfinex',
'countries': 'VG',
'version': 'v1',
'rateLimit': 1500,
# new metainfo interface
'has': {
'CORS': False,
'createDepositAddress': True,
'deposit': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchTradingFees': True,
'fetchFundingFees': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchTickers': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': 'https://api.bitfinex.com',
'www': 'https://www.bitfinex.com',
'doc': [
'https://bitfinex.readme.io/v1/docs',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
},
'api': {
'v2': {
'get': [
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
],
},
'public': {
'get': [
'book/{symbol}',
# 'candles/{symbol}',
'lendbook/{currency}',
'lends/{currency}',
'pubticker/{symbol}',
'stats/{symbol}',
'symbols',
'symbols_details',
'tickers',
'today',
'trades/{symbol}',
],
},
'private': {
'post': [
'account_fees',
'account_infos',
'balances',
'basket_manage',
'credits',
'deposit/new',
'funding/close',
'history',
'history/movements',
'key_info',
'margin_infos',
'mytrades',
'mytrades_funding',
'offer/cancel',
'offer/new',
'offer/status',
'offers',
'offers/hist',
'order/cancel',
'order/cancel/all',
'order/cancel/multi',
'order/cancel/replace',
'order/new',
'order/new/multi',
'order/status',
'orders',
'orders/hist',
'position/claim',
'position/close',
'positions',
'summary',
'taken_funds',
'total_taken_funds',
'transfer',
'unused_taken_funds',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.2 / 100,
'tiers': {
'taker': [
[0, 0.2 / 100],
[500000, 0.2 / 100],
[1000000, 0.2 / 100],
[2500000, 0.2 / 100],
[5000000, 0.2 / 100],
[7500000, 0.2 / 100],
[10000000, 0.18 / 100],
[15000000, 0.16 / 100],
[20000000, 0.14 / 100],
[25000000, 0.12 / 100],
[30000000, 0.1 / 100],
],
'maker': [
[0, 0.1 / 100],
[500000, 0.08 / 100],
[1000000, 0.06 / 100],
[2500000, 0.04 / 100],
[5000000, 0.02 / 100],
[7500000, 0],
[10000000, 0],
[15000000, 0],
[20000000, 0],
[25000000, 0],
[30000000, 0],
],
},
},
'funding': {
'tierBased': False, # True for tier-based/progressive
'percentage': False, # fixed commission
# Actually deposit fees are free for larger deposits(> $1000 USD equivalent)
# these values below are deprecated, we should not hardcode fees and limits anymore
# to be reimplemented with bitfinex funding fees from their API or web endpoints
'deposit': {
'BTC': 0.0004,
'IOTA': 0.5,
'ETH': 0.0027,
'BCH': 0.0001,
'LTC': 0.001,
'EOS': 0.24279,
'XMR': 0.04,
'SAN': 0.99269,
'DASH': 0.01,
'ETC': 0.01,
'XRP': 0.02,
'YYW': 16.915,
'NEO': 0,
'ZEC': 0.001,
'BTG': 0,
'OMG': 0.14026,
'DATA': 20.773,
'QASH': 1.9858,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.95001,
'AVT': 1.3045,
'USDT': 0,
'TRX': 28.184,
'ZRX': 1.9947,
'RCN': 10.793,
'TNB': 31.915,
'SNT': 14.976,
'RLC': 1.414,
'GNT': 5.8952,
'SPK': 10.893,
'REP': 0.041168,
'BAT': 6.1546,
'ELF': 1.8753,
'FUN': 32.336,
'SNG': 18.622,
'AID': 8.08,
'MNA': 16.617,
'NEC': 1.6504,
},
'withdraw': {
'BTC': 0.0004,
'IOTA': 0.5,
'ETH': 0.0027,
'BCH': 0.0001,
'LTC': 0.001,
'EOS': 0.24279,
'XMR': 0.04,
'SAN': 0.99269,
'DASH': 0.01,
'ETC': 0.01,
'XRP': 0.02,
'YYW': 16.915,
'NEO': 0,
'ZEC': 0.001,
'BTG': 0,
'OMG': 0.14026,
'DATA': 20.773,
'QASH': 1.9858,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.95001,
'AVT': 1.3045,
'USDT': 20,
'TRX': 28.184,
'ZRX': 1.9947,
'RCN': 10.793,
'TNB': 31.915,
'SNT': 14.976,
'RLC': 1.414,
'GNT': 5.8952,
'SPK': 10.893,
'REP': 0.041168,
'BAT': 6.1546,
'ELF': 1.8753,
'FUN': 32.336,
'SNG': 18.622,
'AID': 8.08,
'MNA': 16.617,
'NEC': 1.6504,
},
},
},
'commonCurrencies': {
'BCC': 'CST_BCC',
'BCU': 'CST_BCU',
'DAT': 'DATA',
'DSH': 'DASH', # Bitfinex names Dash as DSH, instead of DASH
'IOS': 'IOST',
'IOT': 'IOTA',
'MNA': 'MANA',
'QSH': 'QASH',
'QTM': 'QTUM',
'SNG': 'SNGLS',
'SPK': 'SPANK',
'YYW': 'YOYOW',
},
'exceptions': {
'exact': {
'temporarily_unavailable': ExchangeNotAvailable, # Sorry, the service is temporarily unavailable. See https://www.bitfinex.com/ for more info.
'Order could not be cancelled.': OrderNotFound, # non-existent order
'No such order found.': OrderNotFound, # ?
'Order price must be positive.': InvalidOrder, # on price <= 0
'Could not find a key matching the given X-BFX-APIKEY.': AuthenticationError,
'This API key does not have permission for self action': AuthenticationError, # authenticated but not authorized
'Key price should be a decimal number, e.g. "123.456"': InvalidOrder, # on isNaN(price)
'Key amount should be a decimal number, e.g. "123.456"': InvalidOrder, # on isNaN(amount)
'ERR_RATE_LIMIT': DDoSProtection,
'Nonce is too small.': InvalidNonce,
},
'broad': {
'Invalid order: not enough exchange balance for ': InsufficientFunds, # when buying cost is greater than the available quote currency
'Invalid order: minimum size for ': InvalidOrder, # when amount below limits.amount.min
'Invalid order': InvalidOrder, # ?
'The available balance is only': InsufficientFunds, # {"status":"error","message":"Cannot withdraw 1.0027 ETH from your exchange wallet. The available balance is only 0.0 ETH. If you have limit orders, open positions, unused or active margin funding, self will decrease your available balance. To increase it, you can cancel limit orders or reduce/close your positions.","withdrawal_id":0,"fees":"0.0027"}
},
},
'precisionMode': SIGNIFICANT_DIGITS,
})
async def fetch_funding_fees(self, params={}):
await self.load_markets()
response = await self.privatePostAccountFees(params)
fees = response['withdraw']
withdraw = {}
ids = list(fees.keys())
for i in range(0, len(ids)):
id = ids[i]
code = id
if id in self.currencies_by_id:
currency = self.currencies_by_id[id]
code = currency['code']
withdraw[code] = self.safe_float(fees, id)
return {
'info': response,
'withdraw': withdraw,
'deposit': withdraw, # only for deposits of less than $1000
}
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privatePostSummary(params)
return {
'info': response,
'maker': self.safe_float(response, 'maker_fee'),
'taker': self.safe_float(response, 'taker_fee'),
}
async def fetch_markets(self):
markets = await self.publicGetSymbolsDetails()
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['pair'].upper()
baseId = id[0:3]
quoteId = id[3:6]
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'price': market['price_precision'],
'amount': market['price_precision'],
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def price_to_precision(self, symbol, price):
return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode)
def fee_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode)
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * rate
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.fee_to_precision(market[key], cost)),
}
async def fetch_balance(self, params={}):
await self.load_markets()
balanceType = self.safe_string(params, 'type', 'exchange')
balances = await self.privatePostBalances()
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
if balance['type'] == balanceType:
currency = balance['currency']
uppercase = currency.upper()
uppercase = self.common_currency_code(uppercase)
account = self.account()
account['free'] = float(balance['available'])
account['total'] = float(balance['amount'])
account['used'] = account['total'] - account['free']
result[uppercase] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['limit_bids'] = limit
request['limit_asks'] = limit
orderbook = await self.publicGetBookSymbol(self.extend(request, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'amount')
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
parsedTicker = self.parse_ticker(ticker)
symbol = parsedTicker['symbol']
result[symbol] = parsedTicker
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetPubtickerSymbol(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_float(ticker, 'timestamp') * 1000
symbol = None
if market is not None:
symbol = market['symbol']
elif 'pair' in ticker:
id = ticker['pair']
if id in self.markets_by_id:
market = self.markets_by_id[id]
if market is not None:
symbol = market['symbol']
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
last = self.safe_float(ticker, 'last_price')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'mid'),
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = int(float(trade['timestamp'])) * 1000
side = trade['type'].lower()
orderId = self.safe_string(trade, 'order_id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = price * amount
fee = None
if 'fee_amount' in trade:
feeCost = -self.safe_float(trade, 'fee_amount')
feeCurrency = self.safe_string(trade, 'fee_currency')
if feeCurrency in self.currencies_by_id:
feeCurrency = self.currencies_by_id[feeCurrency]['code']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'order': orderId,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=50, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'limit_trades': limit,
}
if since is not None:
request['timestamp'] = int(since / 1000)
response = await self.publicGetTradesSymbol(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {'symbol': market['id']}
if limit is not None:
request['limit_trades'] = limit
if since is not None:
request['timestamp'] = int(since / 1000)
response = await self.privatePostMytrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
orderType = type
if (type == 'limit') or (type == 'market'):
orderType = 'exchange ' + type
# amount = self.amount_to_precision(symbol, amount)
order = {
'symbol': self.market_id(symbol),
'amount': str(amount),
'side': side,
'type': orderType,
'ocoorder': False,
'buy_price_oco': 0,
'sell_price_oco': 0,
}
if type == 'market':
order['price'] = str(self.nonce())
else:
# price = self.price_to_precision(symbol, price)
order['price'] = str(price)
result = await self.privatePostOrderNew(self.extend(order, params))
return self.parse_order(result)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostOrderCancel({'order_id': int(id)})
def parse_order(self, order, market=None):
side = order['side']
open = order['is_live']
canceled = order['is_cancelled']
status = None
if open:
status = 'open'
elif canceled:
status = 'canceled'
else:
status = 'closed'
symbol = None
if not market:
exchange = order['symbol'].upper()
if exchange in self.markets_by_id:
market = self.markets_by_id[exchange]
if market:
symbol = market['symbol']
orderType = order['type']
exchange = orderType.find('exchange ') >= 0
if exchange:
parts = order['type'].split(' ')
orderType = parts[1]
timestamp = int(float(order['timestamp']) * 1000)
result = {
'info': order,
'id': str(order['id']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': orderType,
'side': side,
'price': self.safe_float(order, 'price'),
'average': self.safe_float(order, 'avg_execution_price'),
'amount': self.safe_float(order, 'original_amount'),
'remaining': self.safe_float(order, 'remaining_amount'),
'filled': self.safe_float(order, 'executed_amount'),
'status': status,
'fee': None,
}
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
response = await self.privatePostOrders(params)
orders = self.parse_orders(response, None, since, limit)
if symbol:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if limit is not None:
request['limit'] = limit
response = await self.privatePostOrdersHist(self.extend(request, params))
orders = self.parse_orders(response, None, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
orders = self.filter_by(orders, 'status', 'closed')
return orders
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostOrderStatus(self.extend({
'order_id': int(id),
}, params))
return self.parse_order(response)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0],
ohlcv[1],
ohlcv[3],
ohlcv[4],
ohlcv[2],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
if limit is None:
limit = 100
market = self.market(symbol)
v2id = 't' + market['id']
request = {
'symbol': v2id,
'timeframe': self.timeframes[timeframe],
'sort': 1,
'limit': limit,
}
if since is not None:
request['start'] = since
response = await self.v2GetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def get_currency_name(self, currency):
names = {
'AGI': 'agi',
'AID': 'aid',
'AIO': 'aio',
'ANT': 'ant',
'AVT': 'aventus', # #1811
'BAT': 'bat',
'BCH': 'bcash', # undocumented
'BCI': 'bci',
'BFT': 'bft',
'BTC': 'bitcoin',
'BTG': 'bgold',
'CFI': 'cfi',
'DAI': 'dai',
'DASH': 'dash',
'DATA': 'datacoin',
'DTH': 'dth',
'EDO': 'eidoo', # #1811
'ELF': 'elf',
'EOS': 'eos',
'ETC': 'ethereumc',
'ETH': 'ethereum',
'ETP': 'metaverse',
'FUN': 'fun',
'GNT': 'golem',
'IOST': 'ios',
'IOTA': 'iota',
'LRC': 'lrc',
'LTC': 'litecoin',
'MANA': 'mna',
'MIT': 'mit',
'MTN': 'mtn',
'NEO': 'neo',
'ODE': 'ode',
'OMG': 'omisego',
'OMNI': 'mastercoin',
'QASH': 'qash',
'QTUM': 'qtum', # #1811
'RCN': 'rcn',
'RDN': 'rdn',
'REP': 'rep',
'REQ': 'req',
'RLC': 'rlc',
'SAN': 'santiment',
'SNGLS': 'sng',
'SNT': 'status',
'SPANK': 'spk',
'STJ': 'stj',
'TNB': 'tnb',
'TRX': 'trx',
'USD': 'wire',
'USDT': 'tetheruso', # undocumented
'WAX': 'wax',
'XLM': 'xlm',
'XMR': 'monero',
'XRP': 'ripple',
'XVG': 'xvg',
'YOYOW': 'yoyow',
'ZEC': 'zcash',
'ZRX': 'zrx',
}
if currency in names:
return names[currency]
raise NotSupported(self.id + ' ' + currency + ' not supported for withdrawal')
async def create_deposit_address(self, currency, params={}):
response = await self.fetch_deposit_address(currency, self.extend({
'renew': 1,
}, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'currency': currency,
'address': address,
'status': 'ok',
'info': response['info'],
}
async def fetch_deposit_address(self, currency, params={}):
name = self.get_currency_name(currency)
request = {
'method': name,
'wallet_name': 'exchange',
'renew': 0, # a value of 1 will generate a new address
}
response = await self.privatePostDepositNew(self.extend(request, params))
address = response['address']
tag = None
if 'address_pool' in response:
tag = address
address = response['address_pool']
self.check_address(address)
return {
'currency': currency,
'address': address,
'tag': tag,
'status': 'ok',
'info': response,
}
async def withdraw(self, currency, amount, address, tag=None, params={}):
self.check_address(address)
name = self.get_currency_name(currency)
request = {
'withdraw_type': name,
'walletselected': 'exchange',
'amount': str(amount),
'address': address,
}
if tag:
request['payment_id'] = tag
responses = await self.privatePostWithdraw(self.extend(request, params))
response = responses[0]
id = response['withdrawal_id']
message = response['message']
errorMessage = self.find_broadly_matched_key(self.exceptions['broad'], message)
if id == 0:
if errorMessage is not None:
Exception = self.exceptions['broad'][errorMessage]
raise Exception(self.id + ' ' + message)
raise ExchangeError(self.id + ' withdraw returned an id of zero: ' + self.json(response))
return {
'info': response,
'id': id,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
if api == 'v2':
request = '/' + api + request
else:
request = '/' + self.version + request
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + request
if (api == 'public') or (path.find('/hist') >= 0):
if query:
suffix = '?' + self.urlencode(query)
url += suffix
request += suffix
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': str(nonce),
'request': request,
}, query)
query = self.json(query)
query = self.encode(query)
payload = base64.b64encode(query)
secret = self.encode(self.secret)
signature = self.hmac(payload, secret, hashlib.sha384)
headers = {
'X-BFX-APIKEY': self.apiKey,
'X-BFX-PAYLOAD': self.decode(payload),
'X-BFX-SIGNATURE': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def find_broadly_matched_key(self, map, broadString):
partialKeys = list(map.keys())
for i in range(0, len(partialKeys)):
partialKey = partialKeys[i]
if broadString.find(partialKey) >= 0:
return partialKey
return None
def handle_errors(self, code, reason, url, method, headers, body):
if len(body) < 2:
return
if code >= 400:
if body[0] == '{':
response = json.loads(body)
feedback = self.id + ' ' + self.json(response)
message = None
if 'message' in response:
message = response['message']
elif 'error' in response:
message = response['error']
else:
raise ExchangeError(feedback) # malformed(to our knowledge) response
exact = self.exceptions['exact']
if message in exact:
raise exact[message](feedback)
broad = self.exceptions['broad']
broadKey = self.find_broadly_matched_key(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback) # unknown message
| 38.61655
| 426
| 0.458425
|
37202290daab94626433aabecd3843d51fd57e23
| 7,231
|
py
|
Python
|
python/ray/tune/examples/experiments/cifar10/pbt_tune_cifar10_with_keras.py
|
verystrongjoe/ray
|
9f598230ebd9b84af8dce6a1b144d60f384e9266
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/examples/experiments/cifar10/pbt_tune_cifar10_with_keras.py
|
verystrongjoe/ray
|
9f598230ebd9b84af8dce6a1b144d60f384e9266
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/examples/experiments/cifar10/pbt_tune_cifar10_with_keras.py
|
verystrongjoe/ray
|
9f598230ebd9b84af8dce6a1b144d60f384e9266
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Train keras CNN on the CIFAR10 small images dataset.
The model comes from: https://zhuanlan.zhihu.com/p/29214791,
and it gets to about 87% validation accuracy in 100 epochs.
Note that the script requires a machine with 4 GPUs. You
can set {"gpu": 0} to use CPUs for training, although
it is less efficient.
"""
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.layers import Input, Dense, Dropout, Flatten
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
import ray
from ray.tune import grid_search, run, sample_from
from ray.tune import Trainable
from ray.tune.schedulers import PopulationBasedTraining
import random
num_classes = 10
NUM_SAMPLES = 128
class Cifar10Model(Trainable):
def _read_data(self):
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype("float32")
x_train /= 255
x_test = x_test.astype("float32")
x_test /= 255
return (x_train, y_train), (x_test, y_test)
def _build_model(self, input_shape):
x = Input(shape=(32, 32, 3))
y = x
y = Convolution2D(
filters=64,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = Convolution2D(
filters=64,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Convolution2D(
filters=128,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = Convolution2D(
filters=128,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Convolution2D(
filters=256,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = Convolution2D(
filters=256,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Flatten()(y)
y = Dropout(self.config.get("dropout", 0.5))(y)
y = Dense(
units=10, activation="softmax", kernel_initializer="he_normal")(y)
model = Model(inputs=x, outputs=y, name="model1")
return model
def setup(self, config):
self.train_data, self.test_data = self._read_data()
x_train = self.train_data[0]
model = self._build_model(x_train.shape[1:])
opt = tf.keras.optimizers.Adadelta(
lr=self.config.get("lr", 1e-4),
decay=self.config.get("decay", 1e-4))
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
self.model = model
def step(self):
x_train, y_train = self.train_data
x_train, y_train = x_train[:NUM_SAMPLES], y_train[:NUM_SAMPLES]
x_test, y_test = self.test_data
x_test, y_test = x_test[:NUM_SAMPLES], y_test[:NUM_SAMPLES]
aug_gen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by dataset std
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# randomly rotate images in the range (degrees, 0 to 180)
rotation_range=0,
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
)
aug_gen.fit(x_train)
batch_size = self.config.get("batch_size", 64)
gen = aug_gen.flow(x_train, y_train, batch_size=batch_size)
self.model.fit_generator(
generator=gen,
epochs=self.config.get("epochs", 1),
validation_data=None)
# loss, accuracy
_, accuracy = self.model.evaluate(x_test, y_test, verbose=0)
return {"mean_accuracy": accuracy}
def save_checkpoint(self, checkpoint_dir):
file_path = checkpoint_dir + "/model"
self.model.save(file_path)
return file_path
def load_checkpoint(self, path):
# See https://stackoverflow.com/a/42763323
del self.model
self.model = load_model(path)
def cleanup(self):
# If need, save your model when exit.
# saved_path = self.model.save(self.logdir)
# print("save model at: ", saved_path)
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
train_spec = {
"resources_per_trial": {
"cpu": 1,
"gpu": 0
},
"stop": {
"mean_accuracy": 0.80,
"training_iteration": 30,
},
"config": {
"epochs": 1,
"batch_size": 64,
"lr": grid_search([10**-4, 10**-5]),
"decay": sample_from(lambda spec: spec.config.lr / 100.0),
"dropout": grid_search([0.25, 0.5]),
},
"num_samples": 20,
}
if args.smoke_test:
train_spec["config"]["lr"] = 10**-4
train_spec["config"]["dropout"] = 0.5
ray.init()
pbt = PopulationBasedTraining(
time_attr="training_iteration",
metric="mean_accuracy",
mode="max",
perturbation_interval=10,
hyperparam_mutations={
"dropout": lambda : np.random.uniform(0, 1),
"lr": lambda : np.random.uniform(0.0003, 0.003),
"batch_size": lambda : random.choice([64, 128, 256, 512])
})
run(Cifar10Model, name="pbt_cifar10", scheduler=pbt, **train_spec)
| 32.137778
| 79
| 0.592034
|
bfb2f5ae67d19fc1b659b7167825ee5111952b89
| 28,355
|
py
|
Python
|
scaper_concept/venv/lib/python3.9/site-packages/click/termui.py
|
edudbot/web-scraper
|
12c07f805427699b2c3a35ed7c0d7efbc3673a7f
|
[
"MIT"
] | 1,394
|
2015-01-01T17:38:10.000Z
|
2016-03-30T00:04:54.000Z
|
scaper_concept/venv/lib/python3.9/site-packages/click/termui.py
|
edudbot/web-scraper
|
12c07f805427699b2c3a35ed7c0d7efbc3673a7f
|
[
"MIT"
] | 293
|
2015-01-02T08:19:41.000Z
|
2016-03-24T00:01:14.000Z
|
scaper_concept/venv/lib/python3.9/site-packages/click/termui.py
|
edudbot/web-scraper
|
12c07f805427699b2c3a35ed7c0d7efbc3673a7f
|
[
"MIT"
] | 188
|
2015-01-01T03:41:28.000Z
|
2016-03-30T15:39:33.000Z
|
import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
V = t.TypeVar("V")
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func: t.Callable[[str], str] = input
_ansi_colors = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"reset": 39,
"bright_black": 90,
"bright_red": 91,
"bright_green": 92,
"bright_yellow": 93,
"bright_blue": 94,
"bright_magenta": 95,
"bright_cyan": 96,
"bright_white": 97,
}
_ansi_reset_all = "\033[0m"
def hidden_prompt_func(prompt: str) -> str:
import getpass
return getpass.getpass(prompt)
def _build_prompt(
text: str,
suffix: str,
show_default: bool = False,
default: t.Optional[t.Any] = None,
show_choices: bool = True,
type: t.Optional[ParamType] = None,
) -> str:
prompt = text
if type is not None and show_choices and isinstance(type, Choice):
prompt += f" ({', '.join(map(str, type.choices))})"
if default is not None and show_default:
prompt = f"{prompt} [{_format_default(default)}]"
return f"{prompt}{suffix}"
def _format_default(default: t.Any) -> t.Any:
if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
return default.name # type: ignore
return default
def prompt(
text: str,
default: t.Optional[t.Any] = None,
hide_input: bool = False,
confirmation_prompt: t.Union[bool, str] = False,
type: t.Optional[t.Union[ParamType, t.Any]] = None,
value_proc: t.Optional[t.Callable[[str], t.Any]] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
show_choices: bool = True,
) -> t.Any:
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending an interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: Prompt a second time to confirm the
value. Can be set to a string instead of ``True`` to customize
the message.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param show_choices: Show or hide choices if the passed type is a Choice.
For example if type is a Choice of either day or week,
show_choices is true and text is "Group by" then the
prompt will be "Group by (day, week): ".
.. versionadded:: 8.0
``confirmation_prompt`` can be a custom string.
.. versionadded:: 7.0
Added the ``show_choices`` parameter.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
"""
def prompt_func(text: str) -> str:
f = hidden_prompt_func if hide_input else visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text.rstrip(" "), nl=False, err=err)
# Echo a space to stdout to work around an issue where
# readline causes backspace to clear the whole line.
return f(" ")
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort() from None
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(
text, prompt_suffix, show_default, default, show_choices, type
)
if confirmation_prompt:
if confirmation_prompt is True:
confirmation_prompt = _("Repeat for confirmation")
confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)
while True:
while True:
value = prompt_func(prompt)
if value:
break
elif default is not None:
value = default
break
try:
result = value_proc(value)
except UsageError as e:
if hide_input:
echo(_("Error: The value you entered was invalid."), err=err)
else:
echo(_("Error: {e.message}").format(e=e), err=err) # noqa: B306
continue
if not confirmation_prompt:
return result
while True:
value2 = prompt_func(confirmation_prompt)
is_empty = not value and not value2
if value2 or is_empty:
break
if value == value2:
return result
echo(_("Error: The two entered values do not match."), err=err)
def confirm(
text: str,
default: t.Optional[bool] = False,
abort: bool = False,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
) -> bool:
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
:param text: the question to ask.
:param default: The default value to use when no input is given. If
``None``, repeat until input is given.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
.. versionchanged:: 8.0
Repeat until input is given if ``default`` is ``None``.
.. versionadded:: 4.0
Added the ``err`` parameter.
"""
prompt = _build_prompt(
text,
prompt_suffix,
show_default,
"y/n" if default is None else ("Y/n" if default else "y/N"),
)
while True:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt.rstrip(" "), nl=False, err=err)
# Echo a space to stdout to work around an issue where
# readline causes backspace to clear the whole line.
value = visible_prompt_func(" ").lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort() from None
if value in ("y", "yes"):
rv = True
elif value in ("n", "no"):
rv = False
elif default is not None and value == "":
rv = default
else:
echo(_("Error: invalid input"), err=err)
continue
break
if abort and not rv:
raise Abort()
return rv
def echo_via_pager(
text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],
color: t.Optional[bool] = None,
) -> None:
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text_or_generator: the text to page, or alternatively, a
generator emitting the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if inspect.isgeneratorfunction(text_or_generator):
i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()
elif isinstance(text_or_generator, str):
i = [text_or_generator]
else:
i = iter(t.cast(t.Iterable[str], text_or_generator))
# convert every element of i to a text type if necessary
text_generator = (el if isinstance(el, str) else str(el) for el in i)
from ._termui_impl import pager
return pager(itertools.chain(text_generator, "\n"), color)
def progressbar(
iterable: t.Optional[t.Iterable[V]] = None,
length: t.Optional[int] = None,
label: t.Optional[str] = None,
show_eta: bool = True,
show_percent: t.Optional[bool] = None,
show_pos: bool = False,
item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
fill_char: str = "#",
empty_char: str = "-",
bar_template: str = "%(label)s [%(bar)s] %(info)s",
info_sep: str = " ",
width: int = 36,
file: t.Optional[t.TextIO] = None,
color: t.Optional[bool] = None,
update_min_steps: int = 1,
) -> "ProgressBar[V]":
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already created. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
Note: The progress bar is currently designed for use cases where the
total progress can be expected to take at least several seconds.
Because of this, the ProgressBar class object won't display
progress that is considered too fast, and progress where the time
between steps is less than a second.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
The ``update()`` method also takes an optional value specifying the
``current_item`` at the new position. This is useful when used
together with ``item_show_func`` to customize the output for each
manual step::
with click.progressbar(
length=total_size,
label='Unzipping archive',
item_show_func=lambda a: a.filename
) as bar:
for archive in zip_file:
archive.extract()
bar.update(archive.size, archive)
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: A function called with the current item which
can return a string to show next to the progress bar. If the
function returns ``None`` nothing is shown. The current item can
be ``None``, such as when entering and exiting the bar.
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: The file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
:param update_min_steps: Render only when this many updates have
completed. This allows tuning for very fast iterators.
.. versionchanged:: 8.0
Output is shown even if execution time is less than 0.5 seconds.
.. versionchanged:: 8.0
``item_show_func`` shows the current item, not the previous one.
.. versionchanged:: 8.0
Labels are echoed if the output is not a TTY. Reverts a change
in 7.0 that removed all output.
.. versionadded:: 8.0
Added the ``update_min_steps`` parameter.
.. versionchanged:: 4.0
Added the ``color`` parameter. Added the ``update`` method to
the object.
.. versionadded:: 2.0
"""
from ._termui_impl import ProgressBar
color = resolve_color_default(color)
return ProgressBar(
iterable=iterable,
length=length,
show_eta=show_eta,
show_percent=show_percent,
show_pos=show_pos,
item_show_func=item_show_func,
fill_char=fill_char,
empty_char=empty_char,
bar_template=bar_template,
info_sep=info_sep,
file=file,
label=label,
width=width,
color=color,
update_min_steps=update_min_steps,
)
def clear() -> None:
"""Clears the terminal screen. This will have the effect of clearing
the whole visible space of the terminal and moving the cursor to the
top left. This does not do anything if not connected to a terminal.
.. versionadded:: 2.0
"""
if not isatty(sys.stdout):
return
if WIN:
os.system("cls")
else:
sys.stdout.write("\033[2J\033[1;1H")
def _interpret_color(
color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0
) -> str:
if isinstance(color, int):
return f"{38 + offset};5;{color:d}"
if isinstance(color, (tuple, list)):
r, g, b = color
return f"{38 + offset};2;{r:d};{g:d};{b:d}"
return str(_ansi_colors[color] + offset)
def style(
text: t.Any,
fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bold: t.Optional[bool] = None,
dim: t.Optional[bool] = None,
underline: t.Optional[bool] = None,
overline: t.Optional[bool] = None,
italic: t.Optional[bool] = None,
blink: t.Optional[bool] = None,
reverse: t.Optional[bool] = None,
strikethrough: t.Optional[bool] = None,
reset: bool = True,
) -> str:
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``bright_black``
* ``bright_red``
* ``bright_green``
* ``bright_yellow``
* ``bright_blue``
* ``bright_magenta``
* ``bright_cyan``
* ``bright_white``
* ``reset`` (reset the color code only)
If the terminal supports it, color may also be specified as:
- An integer in the interval [0, 255]. The terminal must support
8-bit/256-color mode.
- An RGB tuple of three integers in [0, 255]. The terminal must
support 24-bit/true-color mode.
See https://en.wikipedia.org/wiki/ANSI_color and
https://gist.github.com/XVilka/8346728 for more information.
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param overline: if provided this will enable or disable overline.
:param italic: if provided this will enable or disable italic.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param strikethrough: if provided this will enable or disable
striking through text.
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
.. versionchanged:: 8.0
A non-string ``message`` is converted to a string.
.. versionchanged:: 8.0
Added support for 256 and RGB color codes.
.. versionchanged:: 8.0
Added the ``strikethrough``, ``italic``, and ``overline``
parameters.
.. versionchanged:: 7.0
Added support for bright colors.
.. versionadded:: 2.0
"""
if not isinstance(text, str):
text = str(text)
bits = []
if fg:
try:
bits.append(f"\033[{_interpret_color(fg)}m")
except KeyError:
raise TypeError(f"Unknown color {fg!r}") from None
if bg:
try:
bits.append(f"\033[{_interpret_color(bg, 10)}m")
except KeyError:
raise TypeError(f"Unknown color {bg!r}") from None
if bold is not None:
bits.append(f"\033[{1 if bold else 22}m")
if dim is not None:
bits.append(f"\033[{2 if dim else 22}m")
if underline is not None:
bits.append(f"\033[{4 if underline else 24}m")
if overline is not None:
bits.append(f"\033[{53 if overline else 55}m")
if italic is not None:
bits.append(f"\033[{3 if italic else 23}m")
if blink is not None:
bits.append(f"\033[{5 if blink else 25}m")
if reverse is not None:
bits.append(f"\033[{7 if reverse else 27}m")
if strikethrough is not None:
bits.append(f"\033[{9 if strikethrough else 29}m")
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return "".join(bits)
def unstyle(text: str) -> str:
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as Click's echo function will
automatically remove styling if necessary.
.. versionadded:: 2.0
:param text: the text to remove style information from.
"""
return strip_ansi(text)
def secho(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.AnyStr]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
**styles: t.Any,
) -> None:
"""This function combines :func:`echo` and :func:`style` into one
call. As such the following two calls are the same::
click.secho('Hello World!', fg='green')
click.echo(click.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions
depending on which one they go with.
Non-string types will be converted to :class:`str`. However,
:class:`bytes` are passed directly to :meth:`echo` without applying
style. If you want to style bytes that represent text, call
:meth:`bytes.decode` first.
.. versionchanged:: 8.0
A non-string ``message`` is converted to a string. Bytes are
passed through without style applied.
.. versionadded:: 2.0
"""
if message is not None and not isinstance(message, (bytes, bytearray)):
message = style(message, **styles)
return echo(message, file=file, nl=nl, err=err, color=color)
def edit(
text: t.Optional[t.AnyStr] = None,
editor: t.Optional[str] = None,
env: t.Optional[t.Mapping[str, str]] = None,
require_save: bool = True,
extension: str = ".txt",
filename: t.Optional[str] = None,
) -> t.Optional[t.AnyStr]:
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from ._termui_impl import Editor
ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)
if filename is None:
return ed.edit(text)
ed.edit_file(filename)
return None
def launch(url: str, wait: bool = False, locate: bool = False) -> int:
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
click.launch('https://click.palletsprojects.com/')
click.launch('/my/downloaded/file', locate=True)
.. versionadded:: 2.0
:param url: URL or filename of the thing to launch.
:param wait: Wait for the program to exit before returning. This
only works if the launched program blocks. In particular,
``xdg-open`` on Linux does not block.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from ._termui_impl import open_url
return open_url(url, wait=wait, locate=locate)
# If this is provided, getchar() calls into this instead. This is used
# for unittesting purposes.
_getchar: t.Optional[t.Callable[[bool], str]] = None
def getchar(echo: bool = False) -> str:
"""Fetches a single character from the terminal and returns it. This
will always return a unicode character and under certain rare
circumstances this might return more than one character. The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal.
Note that this will always read from the terminal, even if something
is piped into the standard input.
Note for Windows: in rare cases when typing non-ASCII characters, this
function might wait for a second character and then return both at once.
This is because certain Unicode characters look like special-key markers.
.. versionadded:: 2.0
:param echo: if set to `True`, the character read will also show up on
the terminal. The default is to not show it.
"""
global _getchar
if _getchar is None:
from ._termui_impl import getchar as f
_getchar = f
return _getchar(echo)
def raw_terminal() -> t.ContextManager[int]:
from ._termui_impl import raw_terminal as f
return f()
def pause(info: t.Optional[str] = None, err: bool = False) -> None:
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: The message to print before pausing. Defaults to
``"Press any key to continue..."``.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
if info is None:
info = _("Press any key to continue...")
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err)
| 35.983503
| 87
| 0.63502
|
b2f5da5ad6530be6b3e368b41a31324a162c09ef
| 4,982
|
py
|
Python
|
lib/laser_helper.py
|
jzx-gooner/Structured-Light-Scanner
|
a5762e193ef040e1d88393c390c3afa1be43b175
|
[
"MIT"
] | 6
|
2021-11-11T02:32:55.000Z
|
2022-02-28T12:38:54.000Z
|
lib/laser_helper.py
|
jzx-gooner/Structured-Light-Scanner
|
a5762e193ef040e1d88393c390c3afa1be43b175
|
[
"MIT"
] | null | null | null |
lib/laser_helper.py
|
jzx-gooner/Structured-Light-Scanner
|
a5762e193ef040e1d88393c390c3afa1be43b175
|
[
"MIT"
] | null | null | null |
# Steger algorithm for edge/line extraction
# Author : Munch Quentin, 2020
# General and computer vision lib
import numpy as np
import cv2
from matplotlib import pyplot as plt
from matplotlib import pyplot
def computeDerivative(img, sigmaX, sigmaY):
# blurr the image
img = cv2.GaussianBlur(img, ksize=(0,0), sigmaX=sigmaX, sigmaY=sigmaY)
# create filter for derivative calulation
dxFilter = np.array([[1],[0],[-1]])
dyFilter = np.array([[1,0,-1]])
dxxFilter = np.array([[1],[-2],[1]])
dyyFilter = np.array([[1,-2,1]])
dxyFilter = np.array([[1,-1],[-1,1]])
# compute derivative
dx = cv2.filter2D(img,-1, dxFilter)
dy = cv2.filter2D(img,-1, dyFilter)
dxx = cv2.filter2D(img,-1, dxxFilter)
dyy = cv2.filter2D(img,-1, dyyFilter)
dxy = cv2.filter2D(img,-1, dxyFilter)
return dx, dy, dxx, dyy, dxy
def computeMagnitude(dxx, dyy):
# convert to float
dxx = dxx.astype(float)
dyy = dyy.astype(float)
# calculate magnitude and angle
mag = cv2.magnitude(dxx, dyy)
phase = mag*180./np.pi
return mag, phase
def nonMaxSuppression(det, phase):
# gradient max init
gmax = np.zeros(det.shape)
# thin-out evry edge for angle = [0, 45, 90, 135]
for i in range(gmax.shape[0]):
for j in range(gmax.shape[1]):
if phase[i][j] < 0:
phase[i][j] += 360
if ((j+1) < gmax.shape[1]) and ((j-1) >= 0) and ((i+1) < gmax.shape[0]) and ((i-1) >= 0):
# 0 degrees
if (phase[i][j] >= 337.5 or phase[i][j] < 22.5) or (phase[i][j] >= 157.5 and phase[i][j] < 202.5):
if det[i][j] >= det[i][j + 1] and det[i][j] >= det[i][j - 1]:
gmax[i][j] = det[i][j]
# 45 degrees
if (phase[i][j] >= 22.5 and phase[i][j] < 67.5) or (phase[i][j] >= 202.5 and phase[i][j] < 247.5):
if det[i][j] >= det[i - 1][j + 1] and det[i][j] >= det[i + 1][j - 1]:
gmax[i][j] = det[i][j]
# 90 degrees
if (phase[i][j] >= 67.5 and phase[i][j] < 112.5) or (phase[i][j] >= 247.5 and phase[i][j] < 292.5):
if det[i][j] >= det[i - 1][j] and det[i][j] >= det[i + 1][j]:
gmax[i][j] = det[i][j]
# 135 degrees
if (phase[i][j] >= 112.5 and phase[i][j] < 157.5) or (phase[i][j] >= 292.5 and phase[i][j] < 337.5):
if det[i][j] >= det[i - 1][j - 1] and det[i][j] >= det[i + 1][j + 1]:
gmax[i][j] = det[i][j]
return gmax
def computeHessian(dx, dy, dxx, dyy, dxy):
# create empty list
point=[]
direction=[]
value=[]
# for the all image
for x in range(0, img.shape[1]): # column
for y in range(0, img.shape[0]): # line
# if superior to certain threshold
if dxy[y,x] > 0:
# compute local hessian
hessian = np.zeros((2,2))
hessian[0,0] = dxx[y,x]
hessian[0,1] = dxy[y,x]
hessian[1,0] = dxy[y,x]
hessian[1,1] = dyy[y,x]
# compute eigen vector and eigne value
ret, eigenVal, eigenVect = cv2.eigen(hessian)
if np.abs(eigenVal[0,0]) >= np.abs(eigenVal[1,0]):
nx = eigenVect[0,0]
ny = eigenVect[0,1]
else:
nx = eigenVect[1,0]
ny = eigenVect[1,1]
# calculate denominator for the taylor polynomial expension
denom = dxx[y,x]*nx*nx + dyy[y,x]*ny*ny + 2*dxy[y,x]*nx*ny
# verify non zero denom
if denom != 0:
T = -(dx[y,x]*nx + dy[y,x]*ny)/denom
# update point
if np.abs(T*nx) <= 0.5 and np.abs(T*ny) <= 0.5:
point.append((x,y))
direction.append((nx,ny))
value.append(np.abs(dxy[y,x]+dxy[y,x]))
return point, direction, value
# resize, grayscale and blurr
img = cv2.imread("../aha1.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (240,240))
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# compute derivative
dx, dy, dxx, dyy, dxy = computeDerivative(gray_img, 1.1, 1.1)
normal, phase = computeMagnitude(dxx, dyy)
# compute thin-out image normal
dxy = nonMaxSuppression(normal, phase)
pt, dir, val = computeHessian(dx, dy, dxx, dyy, dxy)
# take the first n max value
nMax = 1000
idx = np.argsort(val)
idx = idx[::-1][:nMax]
# plot resulting point
for i in range(0, len(idx)):
img = cv2.circle(img, (pt[idx[i]][0], pt[idx[i]][1]), 1, (255, 0, 0), 1)
# plot the result
plt.imshow(dx)
plt.show()
plt.imshow(dy)
plt.show()
plt.imshow(dxx)
plt.show()
plt.imshow(dyy)
plt.show()
plt.imshow(dxy)
plt.show()
plt.imshow(normal)
plt.show()
plt.imshow(phase)
plt.show()
plt.imshow(img)
plt.show()
| 36.364964
| 116
| 0.520072
|
4f5672351c7d2b20c4c09d185b944b77870ff4f8
| 11,938
|
py
|
Python
|
circus/config.py
|
asteven/circus
|
278ff49c1f941177465056350512282b233e8ccf
|
[
"Apache-2.0"
] | null | null | null |
circus/config.py
|
asteven/circus
|
278ff49c1f941177465056350512282b233e8ccf
|
[
"Apache-2.0"
] | null | null | null |
circus/config.py
|
asteven/circus
|
278ff49c1f941177465056350512282b233e8ccf
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import signal
import warnings
from fnmatch import fnmatch
try:
import resource
except ImportError:
resource = None # NOQA
from circus import logger
from circus.py3compat import sort_by_field
from circus.util import (DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB,
DEFAULT_ENDPOINT_MULTICAST, DEFAULT_ENDPOINT_STATS,
StrictConfigParser, replace_gnu_args, to_signum,
to_bool, papa)
def watcher_defaults():
return {
'name': '',
'cmd': '',
'args': '',
'numprocesses': 1,
'warmup_delay': 0,
'executable': None,
'working_dir': None,
'shell': False,
'uid': None,
'gid': None,
'send_hup': False,
'stop_signal': signal.SIGTERM,
'stop_children': False,
'max_retry': 5,
'graceful_timeout': 30,
'rlimits': dict(),
'stderr_stream': dict(),
'stdout_stream': dict(),
'priority': 0,
'use_sockets': False,
'singleton': False,
'copy_env': False,
'copy_path': False,
'hooks': dict(),
'respawn': True,
'autostart': True,
'use_papa': False}
class DefaultConfigParser(StrictConfigParser):
def __init__(self, *args, **kw):
StrictConfigParser.__init__(self, *args, **kw)
self._env = dict(os.environ)
def set_env(self, env):
self._env = dict(env)
def get(self, section, option, **kwargs):
res = StrictConfigParser.get(self, section, option, **kwargs)
return replace_gnu_args(res, env=self._env)
def items(self, section, noreplace=False):
items = StrictConfigParser.items(self, section)
if noreplace:
return items
return [(key, replace_gnu_args(value, env=self._env))
for key, value in items]
def dget(self, section, option, default=None, type=str):
if not self.has_option(section, option):
return default
value = self.get(section, option)
if type is int:
value = int(value)
elif type is bool:
value = to_bool(value)
elif type is float:
value = float(value)
elif type is not str:
raise NotImplementedError()
return value
def rlimit_value(val):
if resource is not None and (val is None or len(val) == 0):
return resource.RLIM_INFINITY
else:
return int(val)
def read_config(config_path):
cfg = DefaultConfigParser()
with open(config_path) as f:
if hasattr(cfg, 'read_file'):
cfg.read_file(f)
else:
cfg.readfp(f)
current_dir = os.path.dirname(config_path)
# load included config files
includes = []
def _scan(filename, includes):
if os.path.abspath(filename) != filename:
filename = os.path.join(current_dir, filename)
paths = glob.glob(filename)
if paths == []:
logger.warn('%r does not lead to any config. Make sure '
'include paths are relative to the main config '
'file' % filename)
includes += paths
for include_file in cfg.dget('circus', 'include', '').split():
_scan(include_file, includes)
for include_dir in cfg.dget('circus', 'include_dir', '').split():
_scan(os.path.join(include_dir, '*.ini'), includes)
logger.debug('Reading config files: %s' % includes)
return cfg, [config_path] + cfg.read(includes)
def get_config(config_file):
if not os.path.exists(config_file):
raise IOError("the configuration file %r does not exist\n" %
config_file)
cfg, cfg_files_read = read_config(config_file)
dget = cfg.dget
config = {}
# reading the global environ first
global_env = dict(os.environ.items())
local_env = dict()
# update environments with [env] section
if 'env' in cfg.sections():
local_env.update(dict(cfg.items('env')))
global_env.update(local_env)
# always set the cfg environment
cfg.set_env(global_env)
# main circus options
config['check_delay'] = dget('circus', 'check_delay', 5., float)
config['endpoint'] = dget('circus', 'endpoint', DEFAULT_ENDPOINT_DEALER)
config['endpoint_owner'] = dget('circus', 'endpoint_owner', None, str)
config['pubsub_endpoint'] = dget('circus', 'pubsub_endpoint',
DEFAULT_ENDPOINT_SUB)
config['multicast_endpoint'] = dget('circus', 'multicast_endpoint',
DEFAULT_ENDPOINT_MULTICAST)
config['stats_endpoint'] = dget('circus', 'stats_endpoint', None)
config['statsd'] = dget('circus', 'statsd', False, bool)
config['umask'] = dget('circus', 'umask', None)
if config['umask']:
config['umask'] = int(config['umask'], 8)
if config['stats_endpoint'] is None:
config['stats_endpoint'] = DEFAULT_ENDPOINT_STATS
elif not config['statsd']:
warnings.warn("You defined a stats_endpoint without "
"setting up statsd to True.",
DeprecationWarning)
config['statsd'] = True
config['warmup_delay'] = dget('circus', 'warmup_delay', 0, int)
config['httpd'] = dget('circus', 'httpd', False, bool)
config['httpd_host'] = dget('circus', 'httpd_host', 'localhost', str)
config['httpd_port'] = dget('circus', 'httpd_port', 8080, int)
config['debug'] = dget('circus', 'debug', False, bool)
config['debug_gc'] = dget('circus', 'debug_gc', False, bool)
config['pidfile'] = dget('circus', 'pidfile')
config['loglevel'] = dget('circus', 'loglevel')
config['logoutput'] = dget('circus', 'logoutput')
config['loggerconfig'] = dget('circus', 'loggerconfig', None)
config['fqdn_prefix'] = dget('circus', 'fqdn_prefix', None, str)
config['papa_endpoint'] = dget('circus', 'fqdn_prefix', None, str)
# Initialize watchers, plugins & sockets to manage
watchers = []
plugins = []
sockets = []
for section in cfg.sections():
if section.startswith("socket:"):
sock = dict(cfg.items(section))
sock['name'] = section.split("socket:")[-1].lower()
sock['so_reuseport'] = dget(section, "so_reuseport", False, bool)
sock['replace'] = dget(section, "replace", False, bool)
sockets.append(sock)
if section.startswith("plugin:"):
plugin = dict(cfg.items(section))
plugin['name'] = section
if 'priority' in plugin:
plugin['priority'] = int(plugin['priority'])
plugins.append(plugin)
if section.startswith("watcher:"):
watcher = watcher_defaults()
watcher['name'] = section.split("watcher:", 1)[1]
# create watcher options
for opt, val in cfg.items(section, noreplace=True):
if opt in ('cmd', 'args', 'working_dir', 'uid', 'gid'):
watcher[opt] = val
elif opt == 'numprocesses':
watcher['numprocesses'] = dget(section, 'numprocesses', 1,
int)
elif opt == 'warmup_delay':
watcher['warmup_delay'] = dget(section, 'warmup_delay', 0,
int)
elif opt == 'executable':
watcher['executable'] = dget(section, 'executable', None,
str)
# default bool to False
elif opt in ('shell', 'send_hup', 'stop_children',
'close_child_stderr', 'use_sockets', 'singleton',
'copy_env', 'copy_path', 'close_child_stdout'):
watcher[opt] = dget(section, opt, False, bool)
elif opt == 'stop_signal':
watcher['stop_signal'] = to_signum(val)
elif opt == 'max_retry':
watcher['max_retry'] = dget(section, "max_retry", 5, int)
elif opt == 'graceful_timeout':
watcher['graceful_timeout'] = dget(
section, "graceful_timeout", 30, int)
elif opt.startswith('stderr_stream') or \
opt.startswith('stdout_stream'):
stream_name, stream_opt = opt.split(".", 1)
watcher[stream_name][stream_opt] = val
elif opt.startswith('rlimit_'):
limit = opt[7:]
watcher['rlimits'][limit] = rlimit_value(val)
elif opt == 'priority':
watcher['priority'] = dget(section, "priority", 0, int)
elif opt == 'use_papa' and dget(section, 'use_papa', False,
bool):
if papa:
watcher['use_papa'] = True
else:
warnings.warn("Config file says use_papa but the papa "
"module is missing.",
ImportWarning)
elif opt.startswith('hooks.'):
hook_name = opt[len('hooks.'):]
val = [elmt.strip() for elmt in val.split(',', 1)]
if len(val) == 1:
val.append(False)
else:
val[1] = to_bool(val[1])
watcher['hooks'][hook_name] = val
# default bool to True
elif opt in ('check_flapping', 'respawn', 'autostart',
'close_child_stdin'):
watcher[opt] = dget(section, opt, True, bool)
else:
# freeform
watcher[opt] = val
if watcher['copy_env']:
watcher['env'] = dict(global_env)
else:
watcher['env'] = dict(local_env)
watchers.append(watcher)
# making sure we return consistent lists
sort_by_field(watchers)
sort_by_field(plugins)
sort_by_field(sockets)
# Second pass to make sure env sections apply to all watchers.
def _extend(target, source):
for name, value in source:
if name in target:
continue
target[name] = value
def _expand_vars(target, key, env):
if isinstance(target[key], str):
target[key] = replace_gnu_args(target[key], env=env)
elif isinstance(target[key], dict):
for k in target[key].keys():
_expand_vars(target[key], k, env)
def _expand_section(section, env, exclude=None):
if exclude is None:
exclude = ('name', 'env')
for option in section.keys():
if option in exclude:
continue
_expand_vars(section, option, env)
# build environment for watcher sections
for section in cfg.sections():
if section.startswith('env:'):
section_elements = section.split("env:", 1)[1]
watcher_patterns = [s.strip() for s in section_elements.split(',')]
env_items = dict(cfg.items(section, noreplace=True))
for pattern in watcher_patterns:
match = [w for w in watchers if fnmatch(w['name'], pattern)]
for watcher in match:
watcher['env'].update(env_items)
# expand environment for watcher sections
for watcher in watchers:
env = dict(global_env)
env.update(watcher['env'])
_expand_section(watcher, env)
config['watchers'] = watchers
config['plugins'] = plugins
config['sockets'] = sockets
return config
| 36.396341
| 79
| 0.544647
|
f0f06b6343742be6962e754ffc4ba99b89ad3d11
| 416
|
py
|
Python
|
twodimexamples/test.py
|
hpgit/HumanFoot
|
f9a1a341b7c43747bddcd5584b8c98a0d1ac2973
|
[
"Apache-2.0"
] | null | null | null |
twodimexamples/test.py
|
hpgit/HumanFoot
|
f9a1a341b7c43747bddcd5584b8c98a0d1ac2973
|
[
"Apache-2.0"
] | null | null | null |
twodimexamples/test.py
|
hpgit/HumanFoot
|
f9a1a341b7c43747bddcd5584b8c98a0d1ac2973
|
[
"Apache-2.0"
] | null | null | null |
import pygame
from pygame.locals import (QUIT, KEYDOWN, K_ESCAPE)
import Box2D
from Box2D.b2 import (world, polygonShape, staticBody, dynamicBody)
pygame.display.set_mode((1280, 960), 0, 32)
playing = True
while playing:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
playing = False
pygame.display.flip()
pygame.quit()
| 21.894737
| 83
| 0.692308
|
3f0f38b1ee8a1bf01c6fc45dc4b52e26c5e4aa10
| 797
|
py
|
Python
|
appengine/logging/writing_logs/main_test.py
|
117null/python-docs-samples
|
c77d0cf5ac860d37e6c7ffb38a089f54f6ec3aaa
|
[
"Apache-2.0"
] | 1
|
2018-09-24T04:54:26.000Z
|
2018-09-24T04:54:26.000Z
|
appengine/standard/logging/writing_logs/main_test.py
|
DalavanCloud/python-docs-samples
|
439ca4c552940284743f5f22a590cc4b6dae1bef
|
[
"Apache-2.0"
] | 2
|
2021-06-10T23:54:32.000Z
|
2021-06-10T23:54:33.000Z
|
appengine/standard/logging/writing_logs/main_test.py
|
DalavanCloud/python-docs-samples
|
439ca4c552940284743f5f22a590cc4b6dae1bef
|
[
"Apache-2.0"
] | 1
|
2018-09-24T04:53:12.000Z
|
2018-09-24T04:53:12.000Z
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import main
import webtest
def test_app(testbed):
app = webtest.TestApp(main.app)
response = app.get('/')
assert response.status_int == 200
assert 'Logging example' in response.text
| 33.208333
| 74
| 0.74655
|
89589df89b855bab6906a1cb39bf3b2e83fe0161
| 728
|
py
|
Python
|
test/test_trait_api.py
|
max-bytes/omnikeeper-client-python
|
0e74e7f86636e908ac0161d907892480a2998eda
|
[
"Apache-2.0"
] | null | null | null |
test/test_trait_api.py
|
max-bytes/omnikeeper-client-python
|
0e74e7f86636e908ac0161d907892480a2998eda
|
[
"Apache-2.0"
] | null | null | null |
test/test_trait_api.py
|
max-bytes/omnikeeper-client-python
|
0e74e7f86636e908ac0161d907892480a2998eda
|
[
"Apache-2.0"
] | null | null | null |
"""
Landscape omnikeeper REST API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import unittest
import okclient
from okclient.api.trait_api import TraitApi # noqa: E501
class TestTraitApi(unittest.TestCase):
"""TraitApi unit test stubs"""
def setUp(self):
self.api = TraitApi() # noqa: E501
def tearDown(self):
pass
def test_get_effective_traits_for_trait_name(self):
"""Test case for get_effective_traits_for_trait_name
"""
pass
if __name__ == '__main__':
unittest.main()
| 20.8
| 124
| 0.688187
|
fc8611483ec04bc5fc0f2a5c0cb93845e58ea78a
| 2,626
|
py
|
Python
|
bitbots_behavior/bitbots_blackboard/src/bitbots_blackboard/capsules/game_status_capsule.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | null | null | null |
bitbots_behavior/bitbots_blackboard/src/bitbots_blackboard/capsules/game_status_capsule.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | null | null | null |
bitbots_behavior/bitbots_blackboard/src/bitbots_blackboard/capsules/game_status_capsule.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | null | null | null |
"""
GameStatusCapsule
^^^^^^^^^^^^^^^^^
Provides information about the current game state.
"""
import rosparam
import rospy
from humanoid_league_msgs.msg import GameState
class GameStatusCapsule:
def __init__(self):
self.team_id = rospy.get_param("team_id", 8)
self.gamestate = GameState()
self.last_update = 0
self.unpenalized_since = 0
def is_game_state_equals(self, value):
assert value in [GameState.GAMESTATE_PLAYING, GameState.GAMESTATE_FINISHED, GameState.GAMESTATE_INITAL,
GameState.GAMESTATE_READY, GameState.GAMESTATE_SET]
return value == self.get_gamestate()
def get_gamestate(self):
return self.gamestate.gameState
def get_secondary_state(self):
return self.gamestate.secondaryState
def get_secondary_team(self):
return self.gamestate.secondaryStateTeam
def has_kickoff(self):
return self.gamestate.hasKickOff
def has_penalty_kick(self):
return (self.gamestate.secondaryState == GameState.STATE_PENALTYKICK or
self.gamestate.secondaryState == GameState.STATE_PENALTYSHOOT) and \
self.gamestate.secondaryStateTeam == self.team_id
def get_own_goals(self):
return self.gamestate.ownScore
def get_opp_goals(self):
return self.gamestate.rivalScore
def get_seconds_remaining(self):
# Time from the message minus time passed since receiving it
return max(self.gamestate.secondsRemaining - (rospy.get_time() - self.last_update), 0)
def get_secondary_seconds_remaining(self):
"""Seconds remaining for things like kickoff"""
# Time from the message minus time passed since receiving it
return max(self.gamestate.secondary_seconds_remaining - (rospy.get_time() - self.last_update), 0)
def get_seconds_since_last_drop_ball(self):
"""Returns the seconds since the last drop in"""
if self.gamestate.dropInTime == -1:
return None
else:
# Time from the message plus seconds passed since receiving it
return self.gamestate.dropInTime + (rospy.get_time() - self.last_update)
def get_seconds_since_unpenalized(self):
return rospy.get_time() - self.unpenalized_since
def is_allowed_to_move(self):
return self.gamestate.allowedToMove
def gamestate_callback(self, gs):
if self.gamestate.penalized and not gs.penalized:
print("update")
self.unpenalized_since = rospy.get_time()
self.last_update = rospy.get_time()
self.gamestate = gs
| 33.666667
| 111
| 0.687357
|
f425a914430f8cb30cf5ba2e0c373c6c457699ec
| 437
|
py
|
Python
|
ws.py
|
FapTek/faopy-server
|
b112e465b6ebc8583884dabb185fefff6d9d47eb
|
[
"MIT"
] | 1
|
2017-11-13T05:35:18.000Z
|
2017-11-13T05:35:18.000Z
|
ws.py
|
FapTek/faopy-server
|
b112e465b6ebc8583884dabb185fefff6d9d47eb
|
[
"MIT"
] | null | null | null |
ws.py
|
FapTek/faopy-server
|
b112e465b6ebc8583884dabb185fefff6d9d47eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import asyncio
import time
import websockets
async def hello(websocket, path):
name = await websocket.recv()
print("< {}".format(name))
greeting = "Hello {}!".format(name)
await websocket.send(greeting)
print("> {}".format(greeting))
start_server = websockets.serve(hello, 'localhost', 8765)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
| 23
| 57
| 0.718535
|
aec57b6da7d4f2b5517eb77ef8637bb73fee5746
| 2,479
|
py
|
Python
|
plugins/net/DNS_Latency.py
|
BH1SCW/lmp
|
2c054b22868af07a11439b785dfeb04e01d31c02
|
[
"Apache-2.0"
] | 159
|
2020-04-15T16:41:06.000Z
|
2022-03-30T08:12:00.000Z
|
plugins/net/DNS_Latency.py
|
BH1SCW/lmp
|
2c054b22868af07a11439b785dfeb04e01d31c02
|
[
"Apache-2.0"
] | 82
|
2020-04-16T10:42:42.000Z
|
2022-02-18T13:08:39.000Z
|
plugins/net/DNS_Latency.py
|
Teanix/lmp
|
94b4be742674f831df22120afe458c98c8349f35
|
[
"Apache-2.0"
] | 76
|
2020-04-14T07:39:52.000Z
|
2022-02-21T05:43:37.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from __future__ import print_function
from bcc import BPF
from time import strftime
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
struct val_t {
u32 pid;
char comm[TASK_COMM_LEN];
char host[80];
u64 ts;
};
struct data_t {
u32 pid;
u64 delta;
char comm[TASK_COMM_LEN];
char host[80];
};
BPF_HASH(start, u32, struct val_t);
BPF_PERF_OUTPUT(events);
int do_entry(struct pt_regs *ctx) {
if (!PT_REGS_PARM1(ctx))
return 0;
struct val_t val = {};
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 pid = pid_tgid >> 32;
u32 tid = (u32)pid_tgid;
if (bpf_get_current_comm(&val.comm, sizeof(val.comm)) == 0) {
bpf_probe_read_user(&val.host, sizeof(val.host),
(void *)PT_REGS_PARM1(ctx));
val.pid = pid;
val.ts = bpf_ktime_get_ns();
start.update(&tid, &val);
}
return 0;
}
int do_return(struct pt_regs *ctx) {
struct val_t *valp;
struct data_t data = {};
u64 delta;
u64 pid_tgid = bpf_get_current_pid_tgid();
u32 tid = (u32)pid_tgid;
u64 tsp = bpf_ktime_get_ns();
valp = start.lookup(&tid);
if (valp == 0)
return 0;
bpf_probe_read_kernel(&data.comm, sizeof(data.comm), valp->comm);
bpf_probe_read_kernel(&data.host, sizeof(data.host), (void *)valp->host);
data.pid = valp->pid;
data.delta = tsp - valp->ts;
events.perf_submit(ctx, &data, sizeof(data));
start.delete(&tid);
return 0;
}
"""
b = BPF(text=bpf_text)
b.attach_uprobe(name="c", sym="getaddrinfo", fn_name="do_entry")
b.attach_uprobe(name="c", sym="gethostbyname", fn_name="do_entry")
b.attach_uprobe(name="c", sym="gethostbyname2", fn_name="do_entry")
b.attach_uretprobe(name="c", sym="getaddrinfo", fn_name="do_return")
b.attach_uretprobe(name="c", sym="gethostbyname", fn_name="do_return")
b.attach_uretprobe(name="c", sym="gethostbyname2", fn_name="do_return")
print("%-9s %-7s %-16s %10s %s" % ("TIME", "PID", "COMM", "LATms", "HOST"))
def print_event(cpu, data, size):
event = b["events"].event(data)
print("%-9s %-7d %-16s %10.2f %s" % (strftime("%H:%M:%S"), event.pid,
event.comm.decode('utf-8', 'replace'), (float(event.delta) / 1000000),
event.host.decode('utf-8', 'replace')))
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| 26.37234
| 78
| 0.631706
|
146fe4ecbfcde9cd0896d45a14edece5f6e92fc0
| 517
|
py
|
Python
|
votesystem/users/admin.py
|
majaeseong/votesystem
|
624fadca0251a81c0417f3a3a23f3d6c38b1cf33
|
[
"MIT"
] | null | null | null |
votesystem/users/admin.py
|
majaeseong/votesystem
|
624fadca0251a81c0417f3a3a23f3d6c38b1cf33
|
[
"MIT"
] | null | null | null |
votesystem/users/admin.py
|
majaeseong/votesystem
|
624fadca0251a81c0417f3a3a23f3d6c38b1cf33
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from votesystem.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 28.722222
| 83
| 0.748549
|
994c95cc9bcef98f253065f4cc045712da9c5f72
| 5,273
|
py
|
Python
|
pw_package/py/pw_package/packages/arduino_core.py
|
antmicro/pigweed
|
a308c3354a6131425e3f484f07f05a1813948860
|
[
"Apache-2.0"
] | null | null | null |
pw_package/py/pw_package/packages/arduino_core.py
|
antmicro/pigweed
|
a308c3354a6131425e3f484f07f05a1813948860
|
[
"Apache-2.0"
] | null | null | null |
pw_package/py/pw_package/packages/arduino_core.py
|
antmicro/pigweed
|
a308c3354a6131425e3f484f07f05a1813948860
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Install and check status of teensy-core."""
import json
import logging
import re
import subprocess
import tempfile
from pathlib import Path
from typing import Sequence
from pw_arduino_build import core_installer
import pw_package.package_manager
_LOG: logging.Logger = logging.getLogger(__name__)
class ArduinoCore(pw_package.package_manager.Package):
"""Install and check status of arduino cores."""
def __init__(self, core_name, *args, **kwargs):
super().__init__(*args, name=core_name, **kwargs)
def status(self, path: Path) -> bool:
return (path / 'hardware').is_dir()
def populate_download_cache_from_cipd(self, path: Path) -> None:
"""Check for arduino core availability in pigweed_internal cipd."""
package_path = path.parent.resolve()
core_name = self.name
core_cache_path = package_path / ".cache" / core_name
core_cache_path.mkdir(parents=True, exist_ok=True)
cipd_package_subpath = "pigweed_internal/third_party/"
cipd_package_subpath += core_name
cipd_package_subpath += "/${platform}"
# Check if teensy cipd package is readable
with tempfile.NamedTemporaryFile(prefix='cipd',
delete=True) as temp_json:
cipd_acl_check_command = [
"cipd",
"acl-check",
cipd_package_subpath,
"-reader",
"-json-output",
temp_json.name,
]
subprocess.run(cipd_acl_check_command, capture_output=True)
# Return if no packages are readable.
if not json.load(temp_json)['result']:
return
def _run_command(command):
_LOG.debug("Running: `%s`", " ".join(command))
result = subprocess.run(command, capture_output=True)
_LOG.debug("Output:\n%s",
result.stdout.decode() + result.stderr.decode())
_run_command(["cipd", "init", "-force", core_cache_path.as_posix()])
_run_command([
"cipd", "install", cipd_package_subpath, "-root",
core_cache_path.as_posix(), "-force"
])
_LOG.debug(
"Available Cache Files:\n%s",
"\n".join([p.as_posix() for p in core_cache_path.glob("*")]))
def install(self, path: Path) -> None:
self.populate_download_cache_from_cipd(path)
if self.status(path):
return
# Otherwise delete current version and reinstall
core_installer.install_core(path.parent.resolve().as_posix(),
self.name)
def info(self, path: Path) -> Sequence[str]:
packages_root = path.parent.resolve()
arduino_package_path = path
arduino_package_name = None
message = [
f'{self.name} currently installed in: {path}',
]
# Make gn args sample copy/paste-able by omitting the starting timestamp
# and INF log on each line.
message_gn_args = [
'Enable by running "gn args out" and adding these lines:',
f' pw_arduino_build_CORE_PATH = "{packages_root}"',
f' pw_arduino_build_CORE_NAME = "{self.name}"'
]
# Search for first valid 'package/version' directory
for hardware_dir in [
path for path in (path / 'hardware').iterdir()
if path.is_dir()
]:
if path.name in ["arduino", "tools"]:
continue
for subdir in [
path for path in hardware_dir.iterdir() if path.is_dir()
]:
if subdir.name == 'avr' or re.match(r'[0-9.]+', subdir.name):
arduino_package_name = f'{hardware_dir.name}/{subdir.name}'
break
if arduino_package_name:
message_gn_args += [
f' pw_arduino_build_PACKAGE_NAME = "{arduino_package_name}"',
' pw_arduino_build_BOARD = "BOARD_NAME"'
]
message += ["\n".join(message_gn_args)]
message += [
'Where BOARD_NAME is any supported board.',
# Have arduino_builder command appear on it's own line.
'List available boards by running:\n'
' arduino_builder '
f'--arduino-package-path {arduino_package_path} '
f'--arduino-package-name {arduino_package_name} list-boards'
]
return message
for arduino_core_name in core_installer.supported_cores():
pw_package.package_manager.register(ArduinoCore, name=arduino_core_name)
| 37.397163
| 80
| 0.605158
|
92fffccbb6976be1d36b3af88ba231d08d4b7c0a
| 2,005
|
py
|
Python
|
convert-images-to-mnist-format.py
|
Kraton99/JPG-PNG-to-MNIST-NN-Format
|
26521143fa002ee5b14f38e4f11a083aaf050aa0
|
[
"Apache-2.0"
] | null | null | null |
convert-images-to-mnist-format.py
|
Kraton99/JPG-PNG-to-MNIST-NN-Format
|
26521143fa002ee5b14f38e4f11a083aaf050aa0
|
[
"Apache-2.0"
] | null | null | null |
convert-images-to-mnist-format.py
|
Kraton99/JPG-PNG-to-MNIST-NN-Format
|
26521143fa002ee5b14f38e4f11a083aaf050aa0
|
[
"Apache-2.0"
] | null | null | null |
import os
from PIL import Image
from array import *
from random import shuffle
# Load from and save to
Names = [['./training-images','train'], ['./test-images','test']]
for name in Names:
data_image = array('B')
data_label = array('B')
FileList = []
for dirname in os.listdir(name[0])[1:]: # [1:] Excludes .DS_Store from Mac OS
path = os.path.join(name[0],dirname)
for filename in os.listdir(path):
if filename.endswith(".png"):
FileList.append(os.path.join(name[0],dirname,filename))
shuffle(FileList) # Usefull for further segmenting the validation set
for filename in FileList:
label = int(filename.split('/')[2])
Im = Image.open(filename)
Im = Im.convert('1')
pixel = Im.load()
width, height = Im.size
for x in range(0,width):
for y in range(0,height):
data_image.append(pixel[y,x])
data_label.append(label) # labels start (one unsigned byte each)
hexval = "{0:#0{1}x}".format(len(FileList),6) # number of files in HEX
# header for label array
header = array('B')
header.extend([0,0,8,1,0,0])
header.append(int('0x'+hexval[2:][:2],16))
header.append(int('0x'+hexval[2:][2:],16))
data_label = header + data_label
# additional header for images array
if max([width,height]) <= 256:
header.extend([0,0,0,width,0,0,0,height])
else:
raise ValueError('Image exceeds maximum size: 256x256 pixels');
header[3] = 3 # Changing MSB for image data (0x00000803)
data_image = header + data_image
output_file = open(name[1]+'-images-idx3-ubyte', 'wb')
data_image.tofile(output_file)
output_file.close()
output_file = open(name[1]+'-labels-idx1-ubyte', 'wb')
data_label.tofile(output_file)
output_file.close()
# gzip resulting files
for name in Names:
os.system('gzip '+name[1]+'-images-idx3-ubyte')
os.system('gzip '+name[1]+'-labels-idx1-ubyte')
| 27.847222
| 81
| 0.61995
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.