hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f51beb9590d4b8e11e5a1f72441ffb7a8829482
| 3,276
|
py
|
Python
|
src/robot/parsing/parser/fileparser.py
|
Global19/robotframework
|
c908c15951bf6fcb193828d8b93e0f5bd42382d4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/parsing/parser/fileparser.py
|
Global19/robotframework
|
c908c15951bf6fcb193828d8b93e0f5bd42382d4
|
[
"ECL-2.0",
"Apache-2.0"
] | 40
|
2020-11-06T08:30:26.000Z
|
2022-03-02T10:06:51.000Z
|
src/robot/parsing/parser/fileparser.py
|
Mattlk13/robotframework
|
bdd3a303e56050803c02229d8c394a29c3a51362
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-11-23T03:44:31.000Z
|
2020-11-23T03:44:31.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from robot.utils import is_pathlike, is_string
from ..lexer import Token
from ..model import (File, CommentSection, SettingSection, VariableSection,
TestCaseSection, KeywordSection)
from .blockparsers import Parser, TestCaseParser, KeywordParser
class FileParser(Parser):
def __init__(self, source=None):
Parser.__init__(self, File(source=self._get_path(source)))
def _get_path(self, source):
if not source:
return None
if is_string(source) and '\n' not in source and os.path.isfile(source):
return source
if is_pathlike(source) and source.is_file():
return str(source)
return None
def handles(self, statement):
return True
def parse(self, statement):
parser_class = {
Token.SETTING_HEADER: SettingSectionParser,
Token.VARIABLE_HEADER: VariableSectionParser,
Token.TESTCASE_HEADER: TestCaseSectionParser,
Token.KEYWORD_HEADER: KeywordSectionParser,
Token.COMMENT_HEADER: CommentSectionParser,
Token.COMMENT: ImplicitCommentSectionParser,
Token.ERROR: ImplicitCommentSectionParser,
Token.EOL: ImplicitCommentSectionParser
}[statement.type]
parser = parser_class(statement)
self.model.sections.append(parser.model)
return parser
class SectionParser(Parser):
subsection_parsers = {}
section_class = None
def __init__(self, header):
Parser.__init__(self, self.section_class(header))
def handles(self, statement):
return statement.type not in Token.HEADER_TOKENS
def parse(self, statement):
parser_class = self.subsection_parsers.get(statement.type)
if parser_class:
parser = parser_class(statement)
self.model.body.append(parser.model)
return parser
self.model.body.append(statement)
class SettingSectionParser(SectionParser):
section_class = SettingSection
class VariableSectionParser(SectionParser):
section_class = VariableSection
class CommentSectionParser(SectionParser):
section_class = CommentSection
class ImplicitCommentSectionParser(SectionParser):
def section_class(self, statement):
return CommentSection(body=[statement])
class TestCaseSectionParser(SectionParser):
subsection_parsers = {Token.TESTCASE_NAME: TestCaseParser}
section_class = TestCaseSection
class KeywordSectionParser(SectionParser):
subsection_parsers = {Token.KEYWORD_NAME: KeywordParser}
section_class = KeywordSection
| 31.2
| 79
| 0.711233
|
bd3af3115e8d49f430758993048a508fe563f588
| 6,002
|
py
|
Python
|
models/impl/se_vector_fields.py
|
spetrescu/sesn
|
43ecc5da7083364eea2c66742c17231c18465973
|
[
"MIT"
] | 55
|
2020-02-13T17:44:25.000Z
|
2022-03-08T04:26:34.000Z
|
models/impl/se_vector_fields.py
|
spetrescu/sesn
|
43ecc5da7083364eea2c66742c17231c18465973
|
[
"MIT"
] | 1
|
2021-07-09T21:49:17.000Z
|
2021-07-10T15:12:33.000Z
|
models/impl/se_vector_fields.py
|
spetrescu/sesn
|
43ecc5da7083364eea2c66742c17231c18465973
|
[
"MIT"
] | 7
|
2020-02-13T19:41:46.000Z
|
2021-09-20T17:23:27.000Z
|
'''It is a reimplementation of "Scale equavariant CNNs with vector fields"
Paper: https://arxiv.org/pdf/1807.11783.pdf
Code: https://github.com/dmarcosg/ScaleEqNet
This reimplementation is slightly faster than the original one
MIT License
Copyright (c) 2020 Ivan Sosnovik
'''
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
class ScaleConvScalar(nn.Module):
'''Scalar to Vector fields scale-convolution'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1,
n_scales_small=5, n_scales_big=3, angle_range=2 * np.pi / 3, base=1.26):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
log_scales = np.linspace(-n_scales_small + 1, n_scales_big, n_scales_small + n_scales_big)
self.scales = base ** log_scales
self.angles = log_scales * angle_range / (n_scales_small + n_scales_big - 1)
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size, kernel_size))
nn.init.kaiming_uniform_(self.weight, a=5**0.5)
def forward(self, x):
x = [conv_scale(x, self.weight, s, self.padding, self.stride) for s in self.scales]
vals, args = torch.stack(x, 2).max(2)
angles = torch.Tensor(self.angles)[args].to(args.device)
return F.relu(vals) * angles.cos(), F.relu(vals) * angles.sin()
class ScaleConvVector(nn.Module):
'''Vector to Vector fields scale-convolution'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1,
n_scales_small=5, n_scales_big=3, angle_range=2 * np.pi / 3, base=1.26):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
log_scales = np.linspace(-n_scales_small + 1, n_scales_big, n_scales_small + n_scales_big)
self.scales = base ** log_scales
self.angles = log_scales * angle_range / (n_scales_small + n_scales_big - 1)
self.weight_u = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size, kernel_size))
self.weight_v = nn.Parameter(torch.Tensor(out_channels, in_channels,
kernel_size, kernel_size))
nn.init.kaiming_uniform_(self.weight_u, a=5**0.5)
nn.init.kaiming_uniform_(self.weight_v, a=5**0.5)
def forward(self, u, v):
outputs = []
for scale, angle in zip(self.scales, self.angles):
weight_u = self.weight_u * np.cos(angle) + self.weight_v * np.sin(angle)
weight_v = -self.weight_u * np.sin(angle) + self.weight_v * np.cos(angle)
u_out = conv_scale(u, weight_u, scale, self.padding, self.stride)
v_out = conv_scale(v, weight_v, scale, self.padding, self.stride)
outputs.append(u_out + v_out)
#
vals, args = torch.stack(outputs, 2).max(2)
angles = torch.Tensor(self.angles)[args].to(args.device)
return F.relu(vals) * angles.cos(), F.relu(vals) * angles.sin()
class VectorBatchNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.register_buffer('running_var', torch.ones(1, num_features, 1, 1))
def forward(self, u, v):
if self.training:
var = vector2scalar(u, v).var(dim=(0, 2, 3), unbiased=False, keepdims=True)
n = u.nelement() / u.size(1)
with torch.no_grad():
self.running_var *= 1 - self.momentum
self.running_var += self.momentum * var * n / (n - 1)
else:
var = self.running_var
u = u / (self.eps + var).sqrt()
v = v / (self.eps + var).sqrt()
return u, v
class VectorMaxPool(nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
def forward(self, u, v):
B, C, _, _ = u.shape
x = vector2scalar(u, v)
_, idx = F.max_pool2d_with_indices(x, kernel_size=self.kernel_size,
stride=self.stride, padding=self.padding)
u = torch.gather(u.view(B, C, -1), 2, idx.view(B, C, -1)).view_as(idx)
v = torch.gather(v.view(B, C, -1), 2, idx.view(B, C, -1)).view_as(idx)
return u, v
class VectorDropout(nn.Module):
'''Dropout with synchronized masks
'''
def __init__(self, p=0.5):
assert p < 1.0
super().__init__()
self.p = p
def forward(self, input):
u, v = input
probs = u.data.new(u.data.size()).fill_(1 - self.p)
mask = torch.bernoulli(probs) / (1 - self.p)
return u * mask, v * mask
# FUNCTIONS
def vector2scalar(u, v):
'''Vector field to Scalar field projection
(u, v) --> sqrt(u**2 + v**2)
'''
return (u**2 + v**2)**0.5
def conv_scale(x, weight, scale, padding, stride):
original_size = x.shape[-1]
kernel_size = weight.shape[-1]
output_size = (original_size + 1 + padding * 2 - kernel_size) // stride
size = int(round(original_size * scale))
x = F.interpolate(x, size=size, align_corners=False, mode='bilinear')
x = F.conv2d(x, weight, stride=stride, padding=padding)
x = F.interpolate(x, size=output_size, align_corners=False, mode='bilinear')
return x
| 37.748428
| 98
| 0.611296
|
4d2090e620433b5a2c806849e43556875786d170
| 8,632
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_horizontal_pod_autoscaler_status.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_horizontal_pod_autoscaler_status.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_horizontal_pod_autoscaler_status.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1HorizontalPodAutoscalerStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'current_cpu_utilization_percentage': 'int',
'current_replicas': 'int',
'desired_replicas': 'int',
'last_scale_time': 'datetime',
'observed_generation': 'int'
}
attribute_map = {
'current_cpu_utilization_percentage': 'currentCPUUtilizationPercentage',
'current_replicas': 'currentReplicas',
'desired_replicas': 'desiredReplicas',
'last_scale_time': 'lastScaleTime',
'observed_generation': 'observedGeneration'
}
def __init__(self, current_cpu_utilization_percentage=None, current_replicas=None, desired_replicas=None, last_scale_time=None, observed_generation=None): # noqa: E501
"""V1HorizontalPodAutoscalerStatus - a model defined in OpenAPI""" # noqa: E501
self._current_cpu_utilization_percentage = None
self._current_replicas = None
self._desired_replicas = None
self._last_scale_time = None
self._observed_generation = None
self.discriminator = None
if current_cpu_utilization_percentage is not None:
self.current_cpu_utilization_percentage = current_cpu_utilization_percentage
self.current_replicas = current_replicas
self.desired_replicas = desired_replicas
if last_scale_time is not None:
self.last_scale_time = last_scale_time
if observed_generation is not None:
self.observed_generation = observed_generation
@property
def current_cpu_utilization_percentage(self):
"""Gets the current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. # noqa: E501
current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU. # noqa: E501
:return: The current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:rtype: int
"""
return self._current_cpu_utilization_percentage
@current_cpu_utilization_percentage.setter
def current_cpu_utilization_percentage(self, current_cpu_utilization_percentage):
"""Sets the current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus.
current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU. # noqa: E501
:param current_cpu_utilization_percentage: The current_cpu_utilization_percentage of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:type: int
"""
self._current_cpu_utilization_percentage = current_cpu_utilization_percentage
@property
def current_replicas(self):
"""Gets the current_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
current number of replicas of pods managed by this autoscaler. # noqa: E501
:return: The current_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:rtype: int
"""
return self._current_replicas
@current_replicas.setter
def current_replicas(self, current_replicas):
"""Sets the current_replicas of this V1HorizontalPodAutoscalerStatus.
current number of replicas of pods managed by this autoscaler. # noqa: E501
:param current_replicas: The current_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:type: int
"""
if current_replicas is None:
raise ValueError("Invalid value for `current_replicas`, must not be `None`") # noqa: E501
self._current_replicas = current_replicas
@property
def desired_replicas(self):
"""Gets the desired_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
desired number of replicas of pods managed by this autoscaler. # noqa: E501
:return: The desired_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:rtype: int
"""
return self._desired_replicas
@desired_replicas.setter
def desired_replicas(self, desired_replicas):
"""Sets the desired_replicas of this V1HorizontalPodAutoscalerStatus.
desired number of replicas of pods managed by this autoscaler. # noqa: E501
:param desired_replicas: The desired_replicas of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:type: int
"""
if desired_replicas is None:
raise ValueError("Invalid value for `desired_replicas`, must not be `None`") # noqa: E501
self._desired_replicas = desired_replicas
@property
def last_scale_time(self):
"""Gets the last_scale_time of this V1HorizontalPodAutoscalerStatus. # noqa: E501
last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. # noqa: E501
:return: The last_scale_time of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:rtype: datetime
"""
return self._last_scale_time
@last_scale_time.setter
def last_scale_time(self, last_scale_time):
"""Sets the last_scale_time of this V1HorizontalPodAutoscalerStatus.
last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. # noqa: E501
:param last_scale_time: The last_scale_time of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:type: datetime
"""
self._last_scale_time = last_scale_time
@property
def observed_generation(self):
"""Gets the observed_generation of this V1HorizontalPodAutoscalerStatus. # noqa: E501
most recent generation observed by this autoscaler. # noqa: E501
:return: The observed_generation of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1HorizontalPodAutoscalerStatus.
most recent generation observed by this autoscaler. # noqa: E501
:param observed_generation: The observed_generation of this V1HorizontalPodAutoscalerStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HorizontalPodAutoscalerStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 37.694323
| 187
| 0.673772
|
b156ed1ceed1ee2291a718d1b04c28870adf6c5f
| 1,624
|
py
|
Python
|
mchgeo/_api.py
|
nantes-m2-rps-exp/mchgeo
|
7c044f3d425fe11e9ee8d9fd3111286fafb8c858
|
[
"MIT"
] | null | null | null |
mchgeo/_api.py
|
nantes-m2-rps-exp/mchgeo
|
7c044f3d425fe11e9ee8d9fd3111286fafb8c858
|
[
"MIT"
] | null | null | null |
mchgeo/_api.py
|
nantes-m2-rps-exp/mchgeo
|
7c044f3d425fe11e9ee8d9fd3111286fafb8c858
|
[
"MIT"
] | 1
|
2022-02-01T08:57:40.000Z
|
2022-02-01T08:57:40.000Z
|
from ._constants import LIST_OF_DEIDS
import json
import math
from importlib.resources import open_text
def _asJSON():
geomFile= open_text('.'.join((__package__,'data')),'de-geometry.json')
g = json.load(geomFile)
return g
def feature(deid: int):
assert(deid in LIST_OF_DEIDS), "{} is not a valid detection element ID".format(deid)
feat = [f for f in JSON if f["properties"]["deid"] == deid]
return feat[0]
def polygon(deid: int):
feat = feature(deid)
return feat["geometry"]
def transformation(deid: int):
feat = feature(deid)
props = feat["properties"]
trans = {k: v for k, v in props.items() if k in ["tx", "ty", "tz",
"yaw", "pitch", "roll"]}
return trans
def offset(deid: int):
feat = feature(deid)
props = feat["properties"]
return {k: v for k, v in props.items() if k in ["x", "y"]}
def angles2matrix(yaw, pitch, roll, degrees=True):
if degrees == True:
yaw = math.radians(yaw)
pitch = math.radians(pitch)
roll = math.radians(roll)
sinpsi = math.sin(roll)
cospsi = math.cos(roll)
sinthe = math.sin(pitch)
costhe = math.cos(pitch)
sinphi = math.sin(yaw)
cosphi = math.cos(yaw)
return [
costhe * cosphi,
-costhe * sinphi,
sinthe,
sinpsi * sinthe * cosphi + cospsi * sinphi,
-sinpsi * sinthe * sinphi + cospsi * cosphi,
-costhe * sinpsi,
-cospsi * sinthe * cosphi + sinpsi * sinphi,
cospsi * sinthe * sinphi + sinpsi * cosphi,
costhe * cospsi
]
JSON = _asJSON()
| 26.193548
| 88
| 0.586207
|
db544f810fcac429cd11422d183eb0a3606e3817
| 3,776
|
py
|
Python
|
magpie/test.py
|
FertileFragrance/data_not_scientific
|
316495f923277ba1f1f3fe70e15b8688797f644f
|
[
"Apache-2.0"
] | 6
|
2021-01-25T17:36:17.000Z
|
2021-05-18T12:14:02.000Z
|
magpie/test.py
|
FertileFragrance/data_not_scientific
|
316495f923277ba1f1f3fe70e15b8688797f644f
|
[
"Apache-2.0"
] | null | null | null |
magpie/test.py
|
FertileFragrance/data_not_scientific
|
316495f923277ba1f1f3fe70e15b8688797f644f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@Author: njuselhx
@Time: 2021/1/21 下午2:04
@File: test.py
@Software: PyCharm
"""
import pymongo
import datetime
from magpie import Magpie
# labels = ['Gravitation and Cosmology', 'Experiment-HEP', 'Theory-HEP']
# labels = ['军事', '旅游', '政治']
# magpie = Magpie(
# keras_model='save/keras_model_zh.h5',
# word2vec_model='save/word2vec_model_zh',
# scaler='save/scaler_zh',
# labels=labels
# )
# # print(magpie.predict_from_file('data/hep-categories/1002413.txt'))
# print(magpie.predict_from_text('特朗普在联合国大会发表演讲谈到这届美国政府成绩时,称他已经取得了美国历史上几乎最大的成就。随后大会现场\
# 传出了嘲笑声,特朗普立即回应道:“这是真的。”此外,美军方也有专门的低轨甚至超低轨小型卫星星座计划,这些卫星不仅可用于通信和侦察,还可用于支援反高超音速导弹作战。'))
# print(magpie.predict_from_text('此外,美军方也有专门的低轨甚至超低轨小型卫星星座计划,这些卫星不仅可用于通信和侦察,还可用于支援反高超\
# 音速导弹作战。特朗普在联合国大会发表演讲谈到这届美国政府成绩时,称他已经取得了美国历史上几乎最大的成就。随后大会现场传出了嘲笑声,特朗普立即回应道:“这是真的。”'))
labels = ['满意', '喜悦', '乐观', '愤怒', '悲哀', '恐惧', '厌恶', '焦虑', '怀疑']
magpie = Magpie(
keras_model='save/emotion_keras_model.h5',
word2vec_model='save/emotion_word2vec_model',
scaler='save/emotion_scaler',
labels=labels
)
# print(magpie.predict_from_text('害怕,恐怖如斯'))
# print(magpie.predict_from_text('气死我了'))
# print(magpie.predict_from_text('加油,很快就会好的'))
# print(magpie.predict_from_text('希望早日康复'))
# print(magpie.predict_from_text('英国航母战斗群已于1月达到初始作战能力,这标志着英国海军投射力量能力的一个阶段性变化。'))
# print(magpie.predict_from_text('近年来伊朗、叙利亚、缅甸正逐渐成为朝鲜核技术和导弹技术出口的主要客户,其中伊朗所占的比重较高。'))
emotion_dict = {
'满意': 0,
'喜悦': 0,
'乐观': 0,
'愤怒': 0,
'悲哀': 0,
'恐惧': 0,
'厌恶': 0,
'焦虑': 0,
'怀疑': 0
}
client = pymongo.MongoClient(host='124.70.84.12', port=27017, username="pkun", password="lcyyds")
db = client['weibo_keyword_epidemic']
date = '2019-12-08'
with open('data/emotion_frequency.csv', 'a+', encoding='utf-8') as f:
f.write('日期,满意,喜悦,乐观,愤怒,悲哀,恐惧,厌恶,焦虑,怀疑' + '\n')
while datetime.datetime.strptime(date, '%Y-%m-%d') <= datetime.datetime.strptime('2020-01-08', '%Y-%m-%d'):
print(date)
collection = db[date]
documents_obj = collection.find({})
for i in range(0, min(collection.count_documents({}), 3000)):
# print(documents_obj[i]['text'])
# 拿到每一条微博的情感分析结果
res = magpie.predict_from_text(documents_obj[i]['text'])
# 如果最大的数字小于0.75表明没有明显的情绪,跳过
if res[0][1] < 0.75:
continue
# 第二大的数字比最大的数字小0.05以上则只保留第一个
if res[0][1] - res[1][1] > 0.05:
emotion_dict[res[0][0]] = emotion_dict[res[0][0]] + 1
continue
# 第三大的数字比第二大的数字小0.03以上则只保留前两个
if res[1][1] - res[2][1] > 0.03:
emotion_dict[res[0][0]] = emotion_dict[res[0][0]] + 1 / 2
emotion_dict[res[1][0]] = emotion_dict[res[1][0]] + 1 / 2
continue
# 保留前三个
emotion_dict[res[0][0]] = emotion_dict[res[0][0]] + 1 / 3
emotion_dict[res[1][0]] = emotion_dict[res[1][0]] + 1 / 3
emotion_dict[res[2][0]] = emotion_dict[res[2][0]] + 1 / 3
with open('data/emotion_frequency.csv', 'a+', encoding='utf-8') as f:
f.write(date + ',')
f.write(str(round(emotion_dict['满意'], 2)) + ',')
f.write(str(round(emotion_dict['喜悦'], 2)) + ',')
f.write(str(round(emotion_dict['乐观'], 2)) + ',')
f.write(str(round(emotion_dict['愤怒'], 2)) + ',')
f.write(str(round(emotion_dict['悲哀'], 2)) + ',')
f.write(str(round(emotion_dict['恐惧'], 2)) + ',')
f.write(str(round(emotion_dict['厌恶'], 2)) + ',')
f.write(str(round(emotion_dict['焦虑'], 2)) + ',')
f.write(str(round(emotion_dict['怀疑'], 2)) + '\n')
for key in emotion_dict.keys():
emotion_dict[key] = 0
date = datetime.datetime.strptime(date, '%Y-%m-%d')
date = date + datetime.timedelta(days=1)
date = str(date).split(' ')[0]
| 37.76
| 107
| 0.622617
|
4357046cb7ec870cfe1dff142366f80f2b4f1879
| 34,324
|
py
|
Python
|
pytorch_lightning/callbacks/model_checkpoint.py
|
tchaton/pytorch-lightning
|
e19f7b43192bcd02ae778ee68b196b2284eec848
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/callbacks/model_checkpoint.py
|
tchaton/pytorch-lightning
|
e19f7b43192bcd02ae778ee68b196b2284eec848
|
[
"Apache-2.0"
] | 1
|
2020-12-08T09:00:55.000Z
|
2020-12-08T09:00:55.000Z
|
pytorch_lightning/callbacks/model_checkpoint.py
|
tchaton/pytorch-lightning
|
e19f7b43192bcd02ae778ee68b196b2284eec848
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model Checkpointing
===================
Automatically save model checkpoints during training.
"""
import logging
import os
import re
import time
from copy import deepcopy
from datetime import timedelta
from typing import Any, Dict, Optional
from weakref import proxy
import numpy as np
import torch
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.types import _METRIC, _PATH, STEP_OUTPUT
from pytorch_lightning.utilities.warnings import WarningCache
log = logging.getLogger(__name__)
warning_cache = WarningCache()
class ModelCheckpoint(Callback):
r"""
Save the model periodically by monitoring a quantity. Every metric logged with
:meth:`~pytorch_lightning.core.lightning.log` or :meth:`~pytorch_lightning.core.lightning.log_dict` in
LightningModule is a candidate for the monitor key. For more information, see
:ref:`weights_loading`.
After training finishes, use :attr:`best_model_path` to retrieve the path to the
best checkpoint file and :attr:`best_model_score` to retrieve its score.
Args:
dirpath: directory to save the model file.
Example::
# custom path
# saves a file like: my/path/epoch=0-step=10.ckpt
>>> checkpoint_callback = ModelCheckpoint(dirpath='my/path/')
By default, dirpath is ``None`` and will be set at runtime to the location
specified by :class:`~pytorch_lightning.trainer.trainer.Trainer`'s
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.default_root_dir` or
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.weights_save_path` arguments,
and if the Trainer uses a logger, the path will also contain logger name and version.
filename: checkpoint filename. Can contain named formatting options to be auto-filled.
Example::
# save any arbitrary metrics like `val_loss`, etc. in name
# saves a file like: my/path/epoch=2-val_loss=0.02-other_metric=0.03.ckpt
>>> checkpoint_callback = ModelCheckpoint(
... dirpath='my/path',
... filename='{epoch}-{val_loss:.2f}-{other_metric:.2f}'
... )
By default, filename is ``None`` and will be set to ``'{epoch}-{step}'``.
monitor: quantity to monitor. By default it is ``None`` which saves a checkpoint only for the last epoch.
verbose: verbosity mode. Default: ``False``.
save_last: When ``True``, always saves the model at the end of the epoch to
a file `last.ckpt`. Default: ``None``.
save_top_k: if ``save_top_k == k``,
the best k models according to
the quantity monitored will be saved.
if ``save_top_k == 0``, no models are saved.
if ``save_top_k == -1``, all models are saved.
Please note that the monitors are checked every ``every_n_epochs`` epochs.
if ``save_top_k >= 2`` and the callback is called multiple
times inside an epoch, the name of the saved file will be
appended with a version count starting with ``v1``.
mode: one of {min, max}.
If ``save_top_k != 0``, the decision to overwrite the current save file is made
based on either the maximization or the minimization of the monitored quantity.
For ``'val_acc'``, this should be ``'max'``, for ``'val_loss'`` this should be ``'min'``, etc.
auto_insert_metric_name: When ``True``, the checkpoints filenames will contain the metric name.
For example, ``filename='checkpoint_{epoch:02d}-{acc:02d}`` with epoch 1 and acc 80 will resolve to
``checkpoint_epoch=01-acc=80.ckp``. Is useful to set it to ``False`` when metric names contain ``/``
as this will result in extra folders.
save_weights_only: if ``True``, then only the model's weights will be
saved (``model.save_weights(filepath)``), else the full model
is saved (``model.save(filepath)``).
every_n_train_steps: Number of training steps between checkpoints.
If ``every_n_train_steps == None or every_n_train_steps == 0``, we skip saving during training.
To disable, set ``every_n_train_steps = 0``. This value must be ``None`` or non-negative.
This must be mutually exclusive with ``train_time_interval`` and ``every_n_epochs``.
train_time_interval: Checkpoints are monitored at the specified time interval.
For all practical purposes, this cannot be smaller than the amount
of time it takes to process a single training batch. This is not
guaranteed to execute at the exact time specified, but should be close.
This must be mutually exclusive with ``every_n_train_steps`` and ``every_n_epochs``.
every_n_epochs: Number of epochs between checkpoints.
If ``every_n_epochs == None or every_n_epochs == 0``, we skip saving when the epoch ends.
To disable, set ``every_n_epochs = 0``. This value must be ``None`` or non-negative.
This must be mutually exclusive with ``every_n_train_steps`` and ``train_time_interval``.
Setting both ``ModelCheckpoint(..., every_n_epochs=V, save_on_train_epoch_end=False)`` and
``Trainer(max_epochs=N, check_val_every_n_epoch=M)``
will only save checkpoints at epochs 0 < E <= N
where both values for ``every_n_epochs`` and ``check_val_every_n_epoch`` evenly divide E.
save_on_train_epoch_end: Whether to run checkpointing at the end of the training epoch.
If this is ``False``, then the check runs at the end of the validation.
Note:
For extra customization, ModelCheckpoint includes the following attributes:
- ``CHECKPOINT_JOIN_CHAR = "-"``
- ``CHECKPOINT_NAME_LAST = "last"``
- ``FILE_EXTENSION = ".ckpt"``
- ``STARTING_VERSION = 1``
For example, you can change the default last checkpoint name by doing
``checkpoint_callback.CHECKPOINT_NAME_LAST = "{epoch}-last"``
If you want to checkpoint every N hours, every M train batches, and/or every K val epochs,
then you should create multiple ``ModelCheckpoint`` callbacks.
Raises:
MisconfigurationException:
If ``save_top_k`` is smaller than ``-1``,
if ``monitor`` is ``None`` and ``save_top_k`` is none of ``None``, ``-1``, and ``0``, or
if ``mode`` is none of ``"min"`` or ``"max"``.
ValueError:
If ``trainer.save_checkpoint`` is ``None``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import ModelCheckpoint
# saves checkpoints to 'my/path/' at every epoch
>>> checkpoint_callback = ModelCheckpoint(dirpath='my/path/')
>>> trainer = Trainer(callbacks=[checkpoint_callback])
# save epoch and val_loss in name
# saves a file like: my/path/sample-mnist-epoch=02-val_loss=0.32.ckpt
>>> checkpoint_callback = ModelCheckpoint(
... monitor='val_loss',
... dirpath='my/path/',
... filename='sample-mnist-{epoch:02d}-{val_loss:.2f}'
... )
# save epoch and val_loss in name, but specify the formatting yourself (e.g. to avoid problems with Tensorboard
# or Neptune, due to the presence of characters like '=' or '/')
# saves a file like: my/path/sample-mnist-epoch02-val_loss0.32.ckpt
>>> checkpoint_callback = ModelCheckpoint(
... monitor='val/loss',
... dirpath='my/path/',
... filename='sample-mnist-epoch{epoch:02d}-val_loss{val/loss:.2f}',
... auto_insert_metric_name=False
... )
# retrieve the best checkpoint after training
checkpoint_callback = ModelCheckpoint(dirpath='my/path/')
trainer = Trainer(callbacks=[checkpoint_callback])
model = ...
trainer.fit(model)
checkpoint_callback.best_model_path
.. tip:: Saving and restoring multiple checkpoint callbacks at the same time is supported under variation in the
following arguments:
*monitor, mode, every_n_train_steps, every_n_epochs, train_time_interval, save_on_train_epoch_end*
Read more: :ref:`Persisting Callback State`
"""
CHECKPOINT_JOIN_CHAR = "-"
CHECKPOINT_NAME_LAST = "last"
FILE_EXTENSION = ".ckpt"
STARTING_VERSION = 1
def __init__(
self,
dirpath: Optional[_PATH] = None,
filename: Optional[str] = None,
monitor: Optional[str] = None,
verbose: bool = False,
save_last: Optional[bool] = None,
save_top_k: int = 1,
save_weights_only: bool = False,
mode: str = "min",
auto_insert_metric_name: bool = True,
every_n_train_steps: Optional[int] = None,
train_time_interval: Optional[timedelta] = None,
every_n_epochs: Optional[int] = None,
save_on_train_epoch_end: Optional[bool] = None,
):
super().__init__()
self.monitor = monitor
self.verbose = verbose
self.save_last = save_last
self.save_top_k = save_top_k
self.save_weights_only = save_weights_only
self.auto_insert_metric_name = auto_insert_metric_name
self._save_on_train_epoch_end = save_on_train_epoch_end
self._last_global_step_saved = -1
self._last_time_checked: Optional[float] = None
self.current_score = None
self.best_k_models = {}
self.kth_best_model_path = ""
self.best_model_score = None
self.best_model_path = ""
self.last_model_path = ""
self.__init_monitor_mode(mode)
self.__init_ckpt_dir(dirpath, filename)
self.__init_triggers(every_n_train_steps, every_n_epochs, train_time_interval)
self.__validate_init_configuration()
@property
def state_key(self) -> str:
return self._generate_state_key(
monitor=self.monitor,
mode=self.mode,
every_n_train_steps=self._every_n_train_steps,
every_n_epochs=self._every_n_epochs,
train_time_interval=self._train_time_interval,
save_on_train_epoch_end=self._save_on_train_epoch_end,
)
def on_init_end(self, trainer: "pl.Trainer") -> None:
if self._save_on_train_epoch_end is None:
# if the user runs validation multiple times per training epoch or multiple training epochs without
# validation, then we run after validation instead of on train epoch end
self._save_on_train_epoch_end = trainer.val_check_interval == 1.0 and trainer.check_val_every_n_epoch == 1
def on_pretrain_routine_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""When pretrain routine starts we build the ckpt dir on the fly."""
self.__resolve_ckpt_dir(trainer)
if trainer.is_global_zero:
self.__warn_if_dir_not_empty(self.dirpath)
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._last_time_checked = time.monotonic()
def on_train_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
) -> None:
"""Save checkpoint on train batch end if we meet the criteria for `every_n_train_steps`"""
if self._should_skip_saving_checkpoint(trainer):
return
step = trainer.global_step
skip_batch = self._every_n_train_steps < 1 or ((step + 1) % self._every_n_train_steps != 0)
train_time_interval = self._train_time_interval
skip_time = True
now = time.monotonic()
if train_time_interval:
prev_time_check = self._last_time_checked
skip_time = prev_time_check is None or (now - prev_time_check) < train_time_interval.total_seconds()
# in case we have time differences across ranks
# broadcast the decision on whether to checkpoint from rank 0 to avoid possible hangs
skip_time = trainer.training_type_plugin.broadcast(skip_time)
if skip_batch and skip_time:
return
if not skip_time:
self._last_time_checked = now
self.save_checkpoint(trainer)
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Save a checkpoint at the end of the training epoch."""
# as we advance one step at end of training, we use `global_step - 1` to avoid saving duplicates
trainer.fit_loop.global_step -= 1
if (
not self._should_skip_saving_checkpoint(trainer)
and self._save_on_train_epoch_end
and self._every_n_epochs > 0
and (trainer.current_epoch + 1) % self._every_n_epochs == 0
):
self.save_checkpoint(trainer)
trainer.fit_loop.global_step += 1
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Save a checkpoint at the end of the validation stage."""
if (
self._should_skip_saving_checkpoint(trainer)
or self._save_on_train_epoch_end
or self._every_n_epochs < 1
or (trainer.current_epoch + 1) % self._every_n_epochs != 0
):
return
self.save_checkpoint(trainer)
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Save a checkpoint when training stops.
This will only save a checkpoint if `save_last` is also enabled as the monitor metrics logged during
training/validation steps or end of epochs are not guaranteed to be available at this stage.
"""
if self._should_skip_saving_checkpoint(trainer) or not self.save_last:
return
if self.verbose:
rank_zero_info("Saving latest checkpoint...")
# as we advance one step at end of training, we use `global_step - 1` to avoid saving duplicates
monitor_candidates = self._monitor_candidates(trainer, trainer.current_epoch, trainer.global_step - 1)
trainer.fit_loop.global_step -= 1
self._save_last_checkpoint(trainer, monitor_candidates)
trainer.fit_loop.global_step += 1
def on_save_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any]
) -> Dict[str, Any]:
return {
"monitor": self.monitor,
"best_model_score": self.best_model_score,
"best_model_path": self.best_model_path,
"current_score": self.current_score,
"dirpath": self.dirpath,
}
def on_load_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", callback_state: Dict[str, Any]
) -> None:
self.best_model_score = callback_state["best_model_score"]
self.best_model_path = callback_state["best_model_path"]
def save_checkpoint(self, trainer: "pl.Trainer") -> None:
"""Performs the main logic around saving a checkpoint.
This method runs on all ranks. It is the responsibility of `trainer.save_checkpoint` to correctly handle the
behaviour in distributed training, i.e., saving only on rank 0 for data parallel use cases.
"""
epoch = trainer.current_epoch
global_step = trainer.global_step
<<<<<<< HEAD
if (
self.save_top_k == 0 # no models are saved
or self.period < 1 # no models are saved
or (epoch + 1) % self.period # skip epoch
or trainer.running_sanity_check # don't save anything during sanity check
<<<<<<< HEAD
or self.epoch_last_check == epoch # already saved
=======
return
self._add_backward_monitor_support(trainer)
self._validate_monitor_key(trainer)
# track epoch when ckpt was last checked
<<<<<<< HEAD
self.epoch_last_check = trainer.current_epoch
=======
self.last_global_step_saved = global_step
>>>>>>> upstream/master
=======
self._validate_monitor_key(trainer)
# track epoch when ckpt was last checked
self._last_global_step_saved = global_step
>>>>>>> upstream/master
# what can be monitored
monitor_candidates = self._monitor_candidates(trainer, epoch=epoch, step=global_step)
# callback supports multiple simultaneous modes
# here we call each mode sequentially
# Mode 1: save the top k checkpoints
self._save_top_k_checkpoint(trainer, monitor_candidates)
# Mode 2: save monitor=None checkpoints
self._save_none_monitor_checkpoint(trainer, monitor_candidates)
# Mode 3: save last checkpoints
self._save_last_checkpoint(trainer, monitor_candidates)
# notify loggers
if trainer.is_global_zero and trainer.logger:
trainer.logger.after_save_checkpoint(proxy(self))
def _should_skip_saving_checkpoint(self, trainer: "pl.Trainer") -> bool:
from pytorch_lightning.trainer.states import TrainerFn
return (
trainer.fast_dev_run # disable checkpointing with fast_dev_run
or trainer.state.fn != TrainerFn.FITTING # don't save anything during non-fit
or trainer.sanity_checking # don't save anything during sanity check
or self._last_global_step_saved == trainer.global_step # already saved at the last step
)
def __validate_init_configuration(self) -> None:
if self.save_top_k < -1:
raise MisconfigurationException(f"Invalid value for save_top_k={self.save_top_k}. Must be >= -1")
if self._every_n_train_steps < 0:
raise MisconfigurationException(
f"Invalid value for every_n_train_steps={self._every_n_train_steps}. Must be >= 0"
)
if self._every_n_epochs < 0:
raise MisconfigurationException(f"Invalid value for every_n_epochs={self._every_n_epochs}. Must be >= 0")
every_n_train_steps_triggered = self._every_n_train_steps >= 1
every_n_epochs_triggered = self._every_n_epochs >= 1
train_time_interval_triggered = self._train_time_interval is not None
if every_n_train_steps_triggered + every_n_epochs_triggered + train_time_interval_triggered > 1:
raise MisconfigurationException(
f"Combination of parameters every_n_train_steps={self._every_n_train_steps}, "
f"every_n_epochs={self._every_n_epochs} and train_time_interval={self._train_time_interval} "
"should be mutually exclusive."
)
if self.monitor is None:
# -1: save all epochs, 0: nothing is saved, 1: save last epoch
if self.save_top_k not in (-1, 0, 1):
raise MisconfigurationException(
f"ModelCheckpoint(save_top_k={self.save_top_k}, monitor=None) is not a valid"
" configuration. No quantity for top_k to track."
)
if self.save_top_k == -1 and self.save_last:
rank_zero_info(
"ModelCheckpoint(save_last=True, save_top_k=-1, monitor=None)"
" will duplicate the last checkpoint saved."
)
def __init_ckpt_dir(self, dirpath: Optional[_PATH], filename: Optional[str]) -> None:
self._fs = get_filesystem(dirpath if dirpath else "")
if dirpath and self._fs.protocol == "file":
dirpath = os.path.realpath(dirpath)
self.dirpath = dirpath
self.filename = filename
def __init_monitor_mode(self, mode: str) -> None:
torch_inf = torch.tensor(np.Inf)
mode_dict = {"min": (torch_inf, "min"), "max": (-torch_inf, "max")}
if mode not in mode_dict:
raise MisconfigurationException(f"`mode` can be {', '.join(mode_dict.keys())} but got {mode}")
self.kth_value, self.mode = mode_dict[mode]
def __init_triggers(
self,
every_n_train_steps: Optional[int],
every_n_epochs: Optional[int],
train_time_interval: Optional[timedelta],
) -> None:
# Default to running once after each validation epoch if neither
# every_n_train_steps nor every_n_epochs is set
if every_n_train_steps is None and every_n_epochs is None and train_time_interval is None:
every_n_epochs = 1
every_n_train_steps = 0
log.debug("Both every_n_train_steps and every_n_epochs are not set. Setting every_n_epochs=1")
else:
every_n_epochs = every_n_epochs or 0
every_n_train_steps = every_n_train_steps or 0
self._train_time_interval: Optional[timedelta] = train_time_interval
self._every_n_epochs: int = every_n_epochs
self._every_n_train_steps: int = every_n_train_steps
@property
def every_n_epochs(self) -> Optional[int]:
return self._every_n_epochs
def check_monitor_top_k(self, trainer: "pl.Trainer", current: Optional[torch.Tensor] = None) -> bool:
if current is None:
return False
if self.save_top_k == -1:
return True
less_than_k_models = len(self.best_k_models) < self.save_top_k
if less_than_k_models:
return True
if not isinstance(current, torch.Tensor):
rank_zero_warn(
f"{current} is supposed to be a `torch.Tensor`. Saving checkpoint may not work correctly."
f" HINT: check the value of {self.monitor} in your validation loop",
RuntimeWarning,
)
current = torch.tensor(current)
monitor_op = {"min": torch.lt, "max": torch.gt}[self.mode]
should_update_best_and_save = monitor_op(current, self.best_k_models[self.kth_best_model_path])
# If using multiple devices, make sure all processes are unanimous on the decision.
should_update_best_and_save = trainer.training_type_plugin.reduce_boolean_decision(should_update_best_and_save)
return should_update_best_and_save
@classmethod
def _format_checkpoint_name(
cls,
filename: Optional[str],
metrics: Dict[str, _METRIC],
prefix: str = "",
auto_insert_metric_name: bool = True,
) -> str:
if not filename:
# filename is not set, use default name
filename = "{epoch}" + cls.CHECKPOINT_JOIN_CHAR + "{step}"
# check and parse user passed keys in the string
groups = re.findall(r"(\{.*?)[:\}]", filename)
if len(groups) >= 0:
for group in groups:
name = group[1:]
if auto_insert_metric_name:
filename = filename.replace(group, name + "={" + name)
if name not in metrics:
metrics[name] = 0
filename = filename.format(**metrics)
if prefix:
filename = cls.CHECKPOINT_JOIN_CHAR.join([prefix, filename])
return filename
def format_checkpoint_name(
self, metrics: Dict[str, _METRIC], filename: Optional[str] = None, ver: Optional[int] = None
) -> str:
"""Generate a filename according to the defined template.
Example::
>>> tmpdir = os.path.dirname(__file__)
>>> ckpt = ModelCheckpoint(dirpath=tmpdir, filename='{epoch}')
>>> os.path.basename(ckpt.format_checkpoint_name(dict(epoch=0)))
'epoch=0.ckpt'
>>> ckpt = ModelCheckpoint(dirpath=tmpdir, filename='{epoch:03d}')
>>> os.path.basename(ckpt.format_checkpoint_name(dict(epoch=5)))
'epoch=005.ckpt'
>>> ckpt = ModelCheckpoint(dirpath=tmpdir, filename='{epoch}-{val_loss:.2f}')
>>> os.path.basename(ckpt.format_checkpoint_name(dict(epoch=2, val_loss=0.123456)))
'epoch=2-val_loss=0.12.ckpt'
>>> os.path.basename(ckpt.format_checkpoint_name(dict(epoch=2, val_loss=0.12), filename='{epoch:d}'))
'epoch=2.ckpt'
>>> ckpt = ModelCheckpoint(dirpath=tmpdir,
... filename='epoch={epoch}-validation_loss={val_loss:.2f}',
... auto_insert_metric_name=False)
>>> os.path.basename(ckpt.format_checkpoint_name(dict(epoch=2, val_loss=0.123456)))
'epoch=2-validation_loss=0.12.ckpt'
>>> ckpt = ModelCheckpoint(dirpath=tmpdir, filename='{missing:d}')
>>> os.path.basename(ckpt.format_checkpoint_name({}))
'missing=0.ckpt'
>>> ckpt = ModelCheckpoint(filename='{step}')
>>> os.path.basename(ckpt.format_checkpoint_name(dict(step=0)))
'step=0.ckpt'
"""
filename = filename or self.filename
filename = self._format_checkpoint_name(filename, metrics, auto_insert_metric_name=self.auto_insert_metric_name)
if ver is not None:
filename = self.CHECKPOINT_JOIN_CHAR.join((filename, f"v{ver}"))
ckpt_name = f"{filename}{self.FILE_EXTENSION}"
return os.path.join(self.dirpath, ckpt_name) if self.dirpath else ckpt_name
def __resolve_ckpt_dir(self, trainer: "pl.Trainer") -> None:
"""Determines model checkpoint save directory at runtime. References attributes from the trainer's logger
to determine where to save checkpoints. The base path for saving weights is set in this priority:
1. Checkpoint callback's path (if passed in)
2. The default_root_dir from trainer if trainer has no logger
3. The weights_save_path from trainer, if user provides it
4. User provided weights_saved_path
The base path gets extended with logger name and version (if these are available)
and subfolder "checkpoints".
"""
# Todo: required argument `pl_module` is not used
if self.dirpath is not None:
return # short circuit
if trainer.logger is not None:
if trainer.weights_save_path != trainer.default_root_dir:
# the user has changed weights_save_path, it overrides anything
save_dir = trainer.weights_save_path
else:
save_dir = trainer.logger.save_dir or trainer.default_root_dir
version = (
trainer.logger.version
if isinstance(trainer.logger.version, str)
else f"version_{trainer.logger.version}"
)
ckpt_path = os.path.join(save_dir, str(trainer.logger.name), version, "checkpoints")
else:
ckpt_path = os.path.join(trainer.weights_save_path, "checkpoints")
ckpt_path = trainer.training_type_plugin.broadcast(ckpt_path)
self.dirpath = ckpt_path
if not trainer.fast_dev_run and trainer.training_type_plugin.should_rank_save_checkpoint:
self._fs.makedirs(self.dirpath, exist_ok=True)
def __warn_if_dir_not_empty(self, dirpath: _PATH) -> None:
if self.save_top_k != 0 and self._fs.isdir(dirpath) and len(self._fs.ls(dirpath)) > 0:
rank_zero_warn(f"Checkpoint directory {dirpath} exists and is not empty.")
def _validate_monitor_key(self, trainer: "pl.Trainer") -> None:
metrics = trainer.callback_metrics
# validate metric
if self.monitor is not None and not self._is_valid_monitor_key(metrics):
m = (
f"ModelCheckpoint(monitor='{self.monitor}') not found in the returned metrics:"
f" {list(metrics.keys())}. "
f"HINT: Did you call self.log('{self.monitor}', value) in the LightningModule?"
)
if not trainer.fit_loop.epoch_loop.val_loop._has_run:
warning_cache.warn(m)
else:
raise MisconfigurationException(m)
def _get_metric_interpolated_filepath_name(
self, monitor_candidates: Dict[str, _METRIC], trainer: "pl.Trainer", del_filepath: Optional[str] = None
) -> str:
filepath = self.format_checkpoint_name(monitor_candidates)
version_cnt = self.STARTING_VERSION
while self.file_exists(filepath, trainer) and filepath != del_filepath:
filepath = self.format_checkpoint_name(monitor_candidates, ver=version_cnt)
version_cnt += 1
return filepath
def _monitor_candidates(self, trainer: "pl.Trainer", epoch: int, step: int) -> Dict[str, _METRIC]:
monitor_candidates = deepcopy(trainer.callback_metrics)
monitor_candidates.update(epoch=epoch, step=step)
return monitor_candidates
def _save_last_checkpoint(self, trainer: "pl.Trainer", monitor_candidates: Dict[str, _METRIC]) -> None:
if not self.save_last:
return
filepath = self.format_checkpoint_name(monitor_candidates, self.CHECKPOINT_NAME_LAST)
trainer.save_checkpoint(filepath, self.save_weights_only)
if self.last_model_path and self.last_model_path != filepath:
trainer.training_type_plugin.remove_checkpoint(self.last_model_path)
self.last_model_path = filepath
def _save_top_k_checkpoint(self, trainer: "pl.Trainer", monitor_candidates: Dict[str, _METRIC]) -> None:
if self.monitor is None or self.save_top_k == 0:
return
current = monitor_candidates.get(self.monitor)
if self.check_monitor_top_k(trainer, current):
self._update_best_and_save(current, trainer, monitor_candidates)
elif self.verbose:
epoch = monitor_candidates.get("epoch")
step = monitor_candidates.get("step")
rank_zero_info(f"Epoch {epoch:d}, global step {step:d}: {self.monitor} was not in top {self.save_top_k}")
def _save_none_monitor_checkpoint(self, trainer: "pl.Trainer", monitor_candidates: Dict[str, _METRIC]) -> None:
if self.monitor is not None or self.save_top_k == 0:
return
filepath = self._get_metric_interpolated_filepath_name(monitor_candidates, trainer)
trainer.save_checkpoint(filepath, self.save_weights_only)
if self.save_top_k == 1 and self.best_model_path and self.best_model_path != filepath:
trainer.training_type_plugin.remove_checkpoint(self.best_model_path)
self.best_model_path = filepath
def _is_valid_monitor_key(self, metrics: Dict[str, _METRIC]) -> bool:
return self.monitor in metrics or len(metrics) == 0
def _update_best_and_save(
self, current: torch.Tensor, trainer: "pl.Trainer", monitor_candidates: Dict[str, _METRIC]
) -> None:
k = len(self.best_k_models) + 1 if self.save_top_k == -1 else self.save_top_k
del_filepath = None
if len(self.best_k_models) == k and k > 0:
del_filepath = self.kth_best_model_path
self.best_k_models.pop(del_filepath)
# do not save nan, replace with +/- inf
if isinstance(current, torch.Tensor) and torch.isnan(current):
current = torch.tensor(float("inf" if self.mode == "min" else "-inf"), device=current.device)
filepath = self._get_metric_interpolated_filepath_name(monitor_candidates, trainer, del_filepath)
# save the current score
self.current_score = current
self.best_k_models[filepath] = current
if len(self.best_k_models) == k:
# monitor dict has reached k elements
_op = max if self.mode == "min" else min
self.kth_best_model_path = _op(self.best_k_models, key=self.best_k_models.get)
self.kth_value = self.best_k_models[self.kth_best_model_path]
_op = min if self.mode == "min" else max
self.best_model_path = _op(self.best_k_models, key=self.best_k_models.get)
self.best_model_score = self.best_k_models[self.best_model_path]
if self.verbose:
epoch = monitor_candidates.get("epoch")
step = monitor_candidates.get("step")
rank_zero_info(
f"Epoch {epoch:d}, global step {step:d}: {self.monitor} reached {current:0.5f}"
f' (best {self.best_model_score:0.5f}), saving model to "{filepath}" as top {k}'
)
trainer.save_checkpoint(filepath, self.save_weights_only)
if del_filepath is not None and filepath != del_filepath:
trainer.training_type_plugin.remove_checkpoint(del_filepath)
def to_yaml(self, filepath: Optional[_PATH] = None) -> None:
"""Saves the `best_k_models` dict containing the checkpoint paths with the corresponding scores to a YAML
file."""
best_k = {k: v.item() for k, v in self.best_k_models.items()}
if filepath is None:
filepath = os.path.join(self.dirpath, "best_k_models.yaml")
with self._fs.open(filepath, "w") as fp:
yaml.dump(best_k, fp)
def file_exists(self, filepath: _PATH, trainer: "pl.Trainer") -> bool:
"""Checks if a file exists on rank 0 and broadcasts the result to all other ranks, preventing the internal
state to diverge between ranks."""
exists = self._fs.exists(filepath)
return trainer.training_type_plugin.broadcast(exists)
| 45.522546
| 120
| 0.653595
|
8a2127c6408bf2ab24fb1c90d2fa17cd92bab788
| 1,279
|
py
|
Python
|
leiaapi/helpers/scheduler.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
leiaapi/helpers/scheduler.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
leiaapi/helpers/scheduler.py
|
labinnovationdocapost/leia-api-python-sdk
|
6001dce68362d4e836b57e52d4da17710f25ed12
|
[
"MIT"
] | null | null | null |
import logging
import sched
import time
from threading import Thread, Lock
logger = logging.getLogger(__name__)
class Scheduler:
def __call__(self, func, *args, **kwargs):
logger.info(f'Scheduling method {func.__name__} every {self.interval} seconds')
self.func = func
self.run()
return self
def __init__(self, interval, *args, **kwargs):
self.interval = interval
self.func = None
self.args = args
self.kwargs = kwargs
self.scheduler = sched.scheduler(time.time, time.sleep)
logger.info(f'Setup Scheduler every {self.interval} seconds')
self.last_id = None
self.thread = None
def periodic(self):
self.last_id = self.scheduler.enter(self.interval, 1, self.periodic, ())
self.func(*self.args, **self.kwargs)
def local_run(self):
self.periodic()
self.scheduler.run()
def run(self):
self.thread = Thread(target=self.local_run, name=f'schedule-{self.func.__name__}')
self.thread.daemon = True
self.thread.start()
def cancel(self):
logger.info(f'Cancel Scheduler for {self.func.__name__}')
self.scheduler.cancel(self.last_id)
def scheduled(interval):
return Scheduler(interval)
| 27.212766
| 90
| 0.64269
|
78fe7180e6b717a45c85ffc1271d81f42f3ab236
| 4,080
|
py
|
Python
|
frontends/PyCDE/src/pycde/instance.py
|
jopperm/circt
|
56d7fa356d6662111cfc5026c4288e4755ae1393
|
[
"Apache-2.0"
] | null | null | null |
frontends/PyCDE/src/pycde/instance.py
|
jopperm/circt
|
56d7fa356d6662111cfc5026c4288e4755ae1393
|
[
"Apache-2.0"
] | 1
|
2021-11-25T14:12:00.000Z
|
2021-11-26T17:09:53.000Z
|
frontends/PyCDE/src/pycde/instance.py
|
jopperm/circt
|
56d7fa356d6662111cfc5026c4288e4755ae1393
|
[
"Apache-2.0"
] | null | null | null |
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from __future__ import annotations
from typing import Union
from pycde.devicedb import PhysLocation, PrimitiveDB, PlacementDB
from .appid import AppID
from circt.dialects import hw, msft
import mlir.ir as ir
# TODO: bug: holds an Operation* without releasing it. Use a level of
# indirection.
class Instance:
"""Represents a _specific_ instance, unique in a design. This is in contrast
to a module instantiation within another module."""
import pycde.system as system
def __init__(self,
module: type,
instOp: msft.InstanceOp,
parent: Instance,
sys: system.System,
primdb: PrimitiveDB = None):
assert module is not None
self.module = module
self.instOp = instOp
self.parent = parent
if parent is None:
self.placedb = PlacementDB(sys._get_circt_mod(module), primdb)
assert isinstance(sys, Instance.system.System)
self.sys = sys
@property
def path(self) -> list[Instance]:
if self.parent is None:
return []
return self.parent.path + [self]
@property
def root_module(self) -> hw.HWModuleOp:
if self.parent is None:
return self.module
return self.parent.root_module
@property
def root_instance(self) -> Instance:
if self.parent is None:
return self
return self.parent.root_instance
@property
def path_attr(self) -> msft.RootedInstancePathAttr:
return msft.RootedInstancePathAttr.get(
ir.FlatSymbolRefAttr.get(self.sys._get_module_symbol(self.root_module)),
[x.name_attr for x in self.path[:-1]])
@property
def name(self):
return ir.StringAttr(self.instOp.sym_name).value
@property
def name_attr(self):
return ir.StringAttr(self.instOp.sym_name)
@property
def is_root(self):
return self.parent is None
@property
def appid(self):
return AppID(*[i.name for i in self.path])
def __repr__(self):
path_names = map(lambda i: i.name, self.path)
return "<instance: [" + ", ".join(path_names) + "]>"
def walk(self, callback):
"""Descend the instance hierarchy, calling back on each instance."""
circt_mod = self.sys._get_circt_mod(self.module)
if isinstance(circt_mod, msft.MSFTModuleExternOp):
return
for op in circt_mod.entry_block:
if not isinstance(op, msft.InstanceOp):
continue
assert "moduleName" in op.attributes
tgt_modname = ir.FlatSymbolRefAttr(op.attributes["moduleName"]).value
tgt_mod = self.sys._get_symbol_module(tgt_modname).modcls
assert tgt_mod is not None
inst = Instance(tgt_mod, op, self, self.sys)
callback(inst)
inst.walk(callback)
def _attach_attribute(self, attr_key: str, attr: ir.Attribute):
if isinstance(attr, PhysLocation):
assert attr_key.startswith("loc:")
attr = attr._loc
db = self.root_instance.placedb._db
rc = db.add_placement(attr, self.path_attr, attr_key[4:],
self.instOp.operation)
if not rc:
raise ValueError("Failed to place")
if attr_key not in self.instOp.attributes:
cases = []
else:
existing_attr = self.instOp.attributes[attr_key]
try:
inst_switch = msft.SwitchInstanceAttr(existing_attr)
cases = inst_switch.cases
except TypeError:
raise ValueError(
f"Existing attribute ({existing_attr}) is not msft.switch.inst.")
cases.append((self.path_attr, attr))
self.instOp.attributes[attr_key] = msft.SwitchInstanceAttr.get(cases)
def place(self,
subpath: Union[str, list[str]],
devtype: msft.PrimitiveType,
x: int,
y: int,
num: int = 0):
loc = msft.PhysLocationAttr.get(devtype, x, y, num)
if isinstance(subpath, list):
subpath = "|".join(subpath)
self._attach_attribute(f"loc:{subpath}", loc)
| 30.676692
| 80
| 0.670588
|
3ca1417f35abd930e43b8586aa2e4965e09b76c8
| 4,197
|
py
|
Python
|
tools/replay_dir/metrics.py
|
umn-cris/hfplayer
|
c35a0d0f4a42b673b8ef78c9e7069da30c69a9c3
|
[
"BSD-4-Clause-UC"
] | 16
|
2017-02-13T15:35:20.000Z
|
2022-01-26T13:52:05.000Z
|
tools/replay_dir/metrics.py
|
umn-cris/hfplayer
|
c35a0d0f4a42b673b8ef78c9e7069da30c69a9c3
|
[
"BSD-4-Clause-UC"
] | 3
|
2017-07-05T02:02:17.000Z
|
2017-07-21T03:03:01.000Z
|
tools/replay_dir/metrics.py
|
umn-cris/hfplayer
|
c35a0d0f4a42b673b8ef78c9e7069da30c69a9c3
|
[
"BSD-4-Clause-UC"
] | 7
|
2017-03-01T18:26:31.000Z
|
2018-11-14T07:08:17.000Z
|
#!/usr/bin/python
# metrics to claculate:
# - Avg Latency : accumulate latency value, devide it by number of IOs at the end
# - Execution Time: take start time, subtract it from last IO time at the end
# - IOPS: devide number of IOs by execution time
# - Avg queue depth: accumulate queue depth for each request, devide it by number of IOs
# - Type-P-Reordered: https://tools.ietf.org/html/rfc4737#section-4.1.1
# 1. read original trace file, create dic with <LBA> as key and <order> as value
# 2. read replay trace file, for each IO, maintain IO order number locally. read <LBA> and
# then lookup in the dic for that LBA:
# if (replay order >= expected order):
# package received in order
# else:
# reorder += 1
# - SequenceDiscontinuty: https://tools.ietf.org/html/rfc4737#section-4.1.1
# if package received in order:
# seqDinceDiscontinutySize += replay_order - expected_order
import sys
_lat_col = 19
_seqID_col = 7
_time_col = 2
_queue_col = 5
_lba_col = 14
_acc_latency = 0
_start_time = 0
_end_time = 0
_num_io = 0
_acc_queue = 0
_reorder = 0
_acc_seq_dist = 0
_orig_order = {}
_compare_with_orig = False
def initialize():
global _acc_latency
global _start_time
global _end_time
global _num_io
global _acc_queue
global _reorder
global _acc_seq_dist
global _orig_order
global _compare_with_orig
_acc_latency = 0
_start_time = 0
_end_time = 0
_num_io = 0
_acc_queue = 0
_reorder = 0
_acc_seq_dist = 0
def prepare_orig( trace_file ) :
global _orig_order
f = open(trace_file)
header = f.readline()
order = 1
for line in f :
records = line.split(",")
lba = int(records[_lba_col])
_orig_order[_lba_col] = order
order += 1
def prepare_ds(trace_file):
global _acc_latency
global _start_time
global _end_time
global _num_io
global _acc_queue
global _reorder
global _acc_seq_dist
global _orig_order
order = 1
#import pdb; pdb.set_trace()
f = open(trace_file)
header = f.readline()
firstline = f.readline()
firstlineStrs = firstline.split(",")
_start_time = float(firstlineStrs[_time_col])
f.close()
f = open(trace_file)
header = f.readline()
for line in f :
records = line.split(",")
_acc_latency += float(records[_lat_col])
_num_io += 1
_acc_queue += int(records[_queue_col])
if( _compare_with_orig ):
lba = int(records[_lba_col] )
expected_order = _orig_order[lba]
if(order < expected_order ):
_reorder+=1
else: #inorder
_acc_seq_dist += (order - expected_order)
io_time = float(records[_time_col])
if(io_time < _end_time):
print("Error, IO time is smaller than end_time, timing is wrong")
else:
_end_time = io_time
order += 1
def print_ds(trace_name):
f = open("results.csv","a")
#_acc_latency = 0
#_start_time = 0
#_num_io = 0
#_acc_queue = 0
#_reorder = 0
#_acc_seq_dist = 0
if ( print_ds.header_written == False ) :
header = "Trace Name, Execution Time (s), IO Counts, Avg Latency (us), Avg Queue Depth, OoO IOs, OoO IO\%, Avg Seq Distance\n"
f.write(header)
print_ds.header_written = True
exec_time = "%.2f" % ( (_end_time - _start_time)/1000000 )
num_io = str(_num_io)
avg_latency = "%.2f" % ( _acc_latency / float(_num_io) )
avg_queue = "%.2f" % ( float(_acc_queue) / float(_num_io) )
ooo_ios = str(_reorder)
ooo_ios_prc = "%.2f" % ( float(_reorder) / float(_num_io) )
avg_seq_dist = "%.2f" % ( float(_acc_seq_dist) / float(_num_io) )
f.write( trace_name + ","
+ exec_time + ","
+ num_io + ","
+ avg_latency + ","
+ avg_queue + ","
+ ooo_ios + ","
+ ooo_ios_prc + ","
+ avg_seq_dist
+"\n" )
def pre_initialize():
print_ds.header_written = False
_orig_order.clear()
_compare_with_orig = False
def main(argv):
#import pdb; pdb.set_trace();
global compare_with_orig
pre_initialize()
for trace_file in argv[1:]:
if "orig" in trace_file:
prepare_orig(trace_file)
compare_with_orig = True
for trace_file in argv[1:] :
initialize()
prepare_ds(trace_file)
print_ds(trace_file)
print("Processing %s file is done, Processed IOs: %d"%(trace_file,_num_io) )
exit(0)
if __name__ == "__main__":
main(sys.argv)
| 24.54386
| 128
| 0.683584
|
4778ab0d849b3d529e0f5fdc175616d3f416ae48
| 1,240
|
py
|
Python
|
Jarvis_general.py
|
VishalShenoy2002/J.A.R.V.I.S-Computer-Automation
|
f0cc4109f4f34f258e0a2ec2102ff8651b9c308a
|
[
"MIT"
] | 1
|
2022-01-13T07:28:33.000Z
|
2022-01-13T07:28:33.000Z
|
Jarvis_general.py
|
VishalShenoy2002/J.A.R.V.I.S-Computer-Automation
|
f0cc4109f4f34f258e0a2ec2102ff8651b9c308a
|
[
"MIT"
] | null | null | null |
Jarvis_general.py
|
VishalShenoy2002/J.A.R.V.I.S-Computer-Automation
|
f0cc4109f4f34f258e0a2ec2102ff8651b9c308a
|
[
"MIT"
] | null | null | null |
import pyautogui
class OS_Functions:
def __init__(self):
pass
def increase_volume(self):
pyautogui.press('volumeup',presses=3)
def decrease_volume(self):
pyautogui.press('volumedown',presses=3)
def mute(self):
pyautogui.press('volumemute',presses=3)
def capslock(self):
pyautogui.press('capslock')
def pgup(self):
pyautogui.press('pgup')
def page_down(self):
pyautogui.press('pgdn')
def screen_record(self):
pyautogui.hotkey('win','alt','r')
def screenshot(self):
pyautogui.hotkey('win','alt','prtsc')
def show_gamebar(self):
pyautogui.hotkey('win','g')
class Other_General_Functions:
def __init__(self):
pass
def take_printout(self):
pyautogui.hotkey('ctrl','p')
def copy_text(self):
pyautogui.hotkey('ctrl','c')
def paste_text(self):
pyautogui.hotkey('ctrl','v')
def save_file(self):
pyautogui.hotkey('ctrl','s')
def saveas_file(self):
pyautogui.hotkey('ctrl','shift','s')
def minmax_screen(self):
pyautogui.hotkey('win','d')
def switch_tab(self):
pyautogui.hotkey('alt','tab')
| 19.375
| 47
| 0.595968
|
f5d22f1760ffe14329d633239b5d11fd39faea58
| 1,575
|
py
|
Python
|
mlflow/entities/experiment.py
|
drorata/mlflow
|
dd8610ec9457087eb29c57bbba2f0ddbe3e00f9b
|
[
"Apache-2.0"
] | 1
|
2019-01-26T22:56:33.000Z
|
2019-01-26T22:56:33.000Z
|
mlflow/entities/experiment.py
|
drorata/mlflow
|
dd8610ec9457087eb29c57bbba2f0ddbe3e00f9b
|
[
"Apache-2.0"
] | null | null | null |
mlflow/entities/experiment.py
|
drorata/mlflow
|
dd8610ec9457087eb29c57bbba2f0ddbe3e00f9b
|
[
"Apache-2.0"
] | null | null | null |
from mlflow.entities._mlflow_object import _MLflowObject
from mlflow.protos.service_pb2 import Experiment as ProtoExperiment
class Experiment(_MLflowObject):
"""
Experiment object.
"""
DEFAULT_EXPERIMENT_ID = 0
def __init__(self, experiment_id, name, artifact_location, lifecycle_stage):
super(Experiment, self).__init__()
self._experiment_id = experiment_id
self._name = name
self._artifact_location = artifact_location
self._lifecycle_stage = lifecycle_stage
@property
def experiment_id(self):
"""Integer ID of the experiment."""
return self._experiment_id
@property
def name(self):
"""String name of the experiment."""
return self._name
def _set_name(self, new_name):
self._name = new_name
@property
def artifact_location(self):
"""String corresponding to the root artifact URI for the experiment."""
return self._artifact_location
@property
def lifecycle_stage(self):
"""Lifecycle stage of the experiment. Can either be 'active' or 'deleted'."""
return self._lifecycle_stage
@classmethod
def from_proto(cls, proto):
return cls(proto.experiment_id, proto.name, proto.artifact_location, proto.lifecycle_stage)
def to_proto(self):
proto = ProtoExperiment()
proto.experiment_id = self.experiment_id
proto.name = self.name
proto.artifact_location = self.artifact_location
proto.lifecycle_stage = self.lifecycle_stage
return proto
| 30.288462
| 99
| 0.684444
|
c09e1144780641d5f13ece79d3b92aab41ba75bf
| 4,200
|
py
|
Python
|
submit/vec_feat_xgb_test/mt_metrics/bleu.py
|
ubuntu733/SentencePairs
|
367139cdd94be36c04899c3ea01df6f58e796241
|
[
"Apache-2.0"
] | null | null | null |
submit/vec_feat_xgb_test/mt_metrics/bleu.py
|
ubuntu733/SentencePairs
|
367139cdd94be36c04899c3ea01df6f58e796241
|
[
"Apache-2.0"
] | null | null | null |
submit/vec_feat_xgb_test/mt_metrics/bleu.py
|
ubuntu733/SentencePairs
|
367139cdd94be36c04899c3ea01df6f58e796241
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
# return (bleu, precisions, bp, ratio, translation_length, reference_length)
return bleu, precisions
| 38.888889
| 81
| 0.67381
|
a907ef634dda5de5122ef138a7ff1632cd99d839
| 2,165
|
py
|
Python
|
abp/examples/open_ai/pong/dqn.py
|
osu-xai/abp
|
cd83eaa2810a1c5350c849303d61639576c0bb0d
|
[
"MIT"
] | null | null | null |
abp/examples/open_ai/pong/dqn.py
|
osu-xai/abp
|
cd83eaa2810a1c5350c849303d61639576c0bb0d
|
[
"MIT"
] | 9
|
2018-11-14T23:35:05.000Z
|
2019-05-22T18:31:30.000Z
|
abp/examples/open_ai/pong/dqn.py
|
osu-xai/abp
|
cd83eaa2810a1c5350c849303d61639576c0bb0d
|
[
"MIT"
] | 1
|
2018-11-14T22:34:09.000Z
|
2018-11-14T22:34:09.000Z
|
import gym
import numpy as np
from abp import DQNAdaptive
from tensorboardX import SummaryWriter
from tqdm import tqdm
def run_task(evaluation_config, network_config, reinforce_config):
env = gym.make(evaluation_config.env)
max_episode_steps = env._max_episode_steps
state = env.reset()
UP, DOWN = [2, 3]
choices = [UP, DOWN]
agent = DQNAdaptive(name="Pong",
choices=choices,
network_config=network_config,
reinforce_config=reinforce_config)
training_summaries_path = evaluation_config.summaries_path + "/train"
train_summary_writer = SummaryWriter(training_summaries_path)
test_summaries_path = evaluation_config.summaries_path + "/test"
test_summary_writer = SummaryWriter(test_summaries_path)
# Training Episodes
for episode in tqdm(range(evaluation_config.training_episodes)):
state = env.reset()
total_reward = 0
for steps in range(max_episode_steps):
action, q_values = agent.predict(np.rollaxis(state, 2))
state, reward, done, info = env.step(action)
agent.reward(reward) # Reward for every step
total_reward += reward
if done:
agent.end_episode(np.rollaxis(state, 2))
train_summary_writer.add_scalar(tag="Episode Reward", scalar_value=total_reward,
global_step=episode + 1)
break
agent.disable_learning()
for episode in (range(evaluation_config.test_episodes)):
state = env.reset()
total_reward = 0
for step in range(max_episode_steps):
if evaluation_config.render:
env.render()
action, q_values = agent.predict(np.rollaxis(state, 2))
state, reward, done, info = env.step(action)
total_reward += reward
if done:
test_summary_writer.add_scalar(tag="Episode Reward", scalar_value=total_reward,
global_step=episode + 1)
break
env.close()
| 32.313433
| 96
| 0.612009
|
18b30a8cc17409205caa77b06e555fab9e053d54
| 1,699
|
py
|
Python
|
uspy/postprocessing/stack_image_tiles.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
uspy/postprocessing/stack_image_tiles.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
uspy/postprocessing/stack_image_tiles.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from osgeo import gdal
from osgeo import osr
from ..utilities.io import *
def get_sorted_tiles(image_dir):
image_names = [os.path.join(image_dir, n) for n in os.listdir(image_dir)]
feature_types = ["_LBP_", "_MBI_", "_GLCM_", "_PANTEX_", "_HOG_", "_GAB_", "_LAC_", "_SIFT_"]
# sort the tiles that need to stacked into their own lists
sorted_files = {}
for filename in image_names:
for ftype in feature_types:
if ftype in filename:
tilename = filename[:filename.index(ftype)]
if tilename not in sorted_files:
sorted_files[tilename] = [filename]
else:
sorted_files[tilename].append(filename)
return sorted_files
def stack_image_tiles(image_dir, outdir, remove_old=False):
files = get_sorted_tiles(image_dir)
for f in files:
out_name = os.path.join(outdir, os.path.basename(f)) + "_features.tif"
arr = []
out_geotran = None
out_srs_wkt = None
for image in files[f]:
ds = gdal.Open(image)
if out_geotran == None:
out_geotran = ds.GetGeoTransform()
if out_srs_wkt == None:
out_srs = osr.SpatialReference()
out_srs.ImportFromEPSG(4326)
out_srs_wkt = out_srs.ExportToWkt()
im = ds.ReadAsArray()
if len(im.shape) == 3:
for b in im:
arr.append(b)
else:
arr.append(im)
ds = None
write_geotiff(out_name, np.array(arr), out_geotran, out_srs_wkt)
| 36.934783
| 98
| 0.563273
|
4ea5611b3b745d51cb2a94a5bd56acfbc1620b8a
| 508
|
py
|
Python
|
app/db/session.py
|
ergo-pad/paideia-api
|
7ffc78366567c72722d107f06ad37aa7557b05be
|
[
"MIT"
] | null | null | null |
app/db/session.py
|
ergo-pad/paideia-api
|
7ffc78366567c72722d107f06ad37aa7557b05be
|
[
"MIT"
] | 23
|
2022-03-09T11:31:32.000Z
|
2022-03-31T08:53:27.000Z
|
app/db/session.py
|
ergo-pad/paideia-api
|
7ffc78366567c72722d107f06ad37aa7557b05be
|
[
"MIT"
] | 2
|
2022-02-16T03:40:05.000Z
|
2022-02-16T22:40:15.000Z
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from config import Config, Network # api specific config
CFG = Config[Network]
engine = create_engine(CFG.connectionString)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
except:
db.rollback()
finally:
db.close()
| 22.086957
| 75
| 0.73622
|
226630f3307dc64459de55d5afca6c92b8238a02
| 21,606
|
py
|
Python
|
strategies/Treway.py
|
Mai-Te-Pora/Demex-Trading-Bot
|
4b9a034ec00e7aca8cdc9e4ddd0221ebb2820513
|
[
"MIT"
] | 6
|
2021-09-17T01:14:23.000Z
|
2022-01-16T15:14:30.000Z
|
strategies/Treway.py
|
Mai-Te-Pora/Demex-Trading-Bot
|
4b9a034ec00e7aca8cdc9e4ddd0221ebb2820513
|
[
"MIT"
] | null | null | null |
strategies/Treway.py
|
Mai-Te-Pora/Demex-Trading-Bot
|
4b9a034ec00e7aca8cdc9e4ddd0221ebb2820513
|
[
"MIT"
] | 2
|
2021-11-25T22:23:45.000Z
|
2022-01-16T15:14:33.000Z
|
import pandas as pd
import json
import itertools
import asyncio
import time
import os, sys, logging
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from authenticated_client import demex_auth
from data_processing import SavingRecords
loop = 0
balances = []
swth_usdc_orderbook = []
swth_eth_orderbook = []
eth_usdc_orderbook = []
eth_wbtc_orderbook = []
wbtc_usdc_orderbook = []
usdc_max_quantity = 400
wbtc_max_quantity = 0.01
swth_max_quantity = 50000
eth_max_quantity = 0.125
swth_min_quantity_extra = 180
eth_min_quantity_extra = 0.00025
dem_client = demex_auth.auth_client()
logger = logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def analyze_wbtc(wbtc_max_quantity, over):
wbtc_max_quantity = wbtc_max_quantity
over = over
with open(p + r"/data_processing/storage/orderbooks/eth_usdc_orderbook.json", "r") as read_file:
eth_usdc_orderbook = pd.read_json(read_file)
with open(p + r"/data_processing/storage/orderbooks/eth_wbtc_orderbook.json", "r") as read_file:
eth_wbtc_orderbook = pd.read_json(read_file)
with open(p + r"/data_processing/storage/orderbooks/wbtc_usdc_orderbook.json", "r") as read_file:
wbtc_usdc_orderbook = pd.read_json(read_file)
eth_usdc_orderbook['total'] = eth_usdc_orderbook['quantity'] * eth_usdc_orderbook['price']
eth_wbtc_orderbook['total'] = eth_wbtc_orderbook['quantity'] * eth_wbtc_orderbook['price']
wbtc_usdc_orderbook['total'] = wbtc_usdc_orderbook['quantity'] * wbtc_usdc_orderbook['price']
#Checking WBTC-USDC (sell), ETH-USDC (Buy), ETH-WBTC (Sell) balance
#WBTC-USDC
logger.info("Starting WBTC-USDC, ETH-USDC, ETH-WBTC Imbalance Check")
logger.info("Starting WBTC Qty: " + str(wbtc_max_quantity))
hold_qty = wbtc_max_quantity
hold_price = 0
paid_percentage = .0025
df = wbtc_usdc_orderbook.loc[(wbtc_usdc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
hold_price += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
#Document prices for next order
hold_price += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
usdc_paid = hold_price*paid_percentage
total = hold_price-usdc_paid
wbtc_usdc_received = total
logger.info("Received USDC Qty: " + str(wbtc_usdc_received))
#ETH-USDC
hold_qty = wbtc_usdc_received
new_hold_qty = 0
hold_price = 0
df = eth_usdc_orderbook.loc[(eth_usdc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
hold_price += df.iloc[position]['total']
elif df.iloc[position]['total'] > hold_qty:
#Document prices for next order
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_price += hold_qty
hold_qty = 0
position += 1
eth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-eth_paid
eth_usdc_received = total
logger.info("Received ETH Qty: " + str(eth_usdc_received))
#ETH-WBTC
hold_qty = eth_usdc_received
hold_price = 0
df = eth_wbtc_orderbook.loc[(eth_wbtc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
hold_price += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
#Document prices for next order
hold_price += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
wbtc_paid = hold_price*paid_percentage
total = hold_price-wbtc_paid
eth_wbtc_received = total
logger.info("End Result WBTC Qty: " + str(eth_wbtc_received))
if (eth_wbtc_received-wbtc_max_quantity) > over:
logger.info("Trades Recommended")
logger.info("Performing Recommended Trades")
dem_client.market_sell(pair='wbtc1_usdc1', quantity=str(wbtc_max_quantity))
dem_client.market_buy(pair='eth1_usdc1', quantity=str(wbtc_usdc_received))
dem_client.market_sell(pair='eth1_wbtc1', quantity=str(eth_usdc_received))
else:
logger.info("No Trades Recommended")
#Checking ETH-WBTC (sell), ETH-USDC (Sell), WBTC-USDC (Buy) balance
#WBTC-USDC
logger.info("Starting ETH-WBTC, ETH-USDC, WBTC-USDC Imbalance Check")
logger.info("Starting WBTC Qty: " + str(wbtc_max_quantity))
hold_qty = wbtc_max_quantity
new_hold_qty = 0
hold_price = 0
df = eth_wbtc_orderbook.loc[(eth_wbtc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
#hold_price += df.iloc[position]['total']
elif df.iloc[position]['total'] > hold_qty:
#Document prices for next order
new_hold_qty += hold_qty/df.iloc[position]['price']
#hold_price += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
eth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-eth_paid
eth_wbtc_received = total
logger.info("Received ETH Qty: " + str(eth_wbtc_received))
#ETH-USDC
hold_qty = eth_wbtc_received
new_hold_qty = 0
hold_price = 0
df = eth_usdc_orderbook.loc[(eth_usdc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
new_hold_qty += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
#Document prices for next order
new_hold_qty += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
eth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-eth_paid
eth_usdc_received = total
logger.info("Received USDC Qty: " + str(eth_usdc_received))
#WBTC-USDC
hold_qty = eth_usdc_received
new_hold_qty = 0
hold_price = 0
df = wbtc_usdc_orderbook.loc[(wbtc_usdc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
elif df.iloc[position]['total'] > hold_qty:
#Document prices for next order
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_qty = 0
position += 1
wbtc_paid = new_hold_qty*paid_percentage
total = new_hold_qty-wbtc_paid
wbtc_usdc_received = total
logger.info("Received WBTC Qty: " + str(wbtc_usdc_received))
if (wbtc_usdc_received - wbtc_max_quantity) > over:
logger.info("Trades Recommended")
logger.info("Performing Recommended Trades")
dem_client.market_buy(pair='eth1_wbtc1', quantity=str(wbtc_max_quantity))
dem_client.market_sell(pair='eth1_usdc1', quantity=str(eth_wbtc_received))
dem_client.market_buy(pair='wbtc1_usdc1', quantity=str(eth_usdc_received))
else:
logger.info("No Trades Recommended")
def analyze_swth(swth_max_quantity, over):
swth_max_quantity = swth_max_quantity
over = over
with open( p + r"/data_processing/storage/orderbooks/swth_usdc_orderbook.json", "r") as read_file:
swth_usdc_orderbook = pd.read_json(read_file)
with open(p + r"/data_processing/storage/orderbooks/swth_eth_orderbook.json", "r") as read_file:
swth_eth_orderbook = pd.read_json(read_file)
with open(p + r"/data_processing/storage/orderbooks/eth_usdc_orderbook.json", "r") as read_file:
eth_usdc_orderbook = pd.read_json(read_file)
swth_usdc_orderbook['total'] = swth_usdc_orderbook['quantity'] * swth_usdc_orderbook['price']
swth_eth_orderbook['total'] = swth_eth_orderbook['quantity'] * swth_eth_orderbook['price']
eth_usdc_orderbook['total'] = eth_usdc_orderbook['quantity'] * eth_usdc_orderbook['price']
#Checking SWTH-USDC (Sell), ETH-USDC (Buy), SWTH-ETH (Buy)
#SWTH-USDC
logger.info("Starting SWTH-USDC, ETH-USDC, SWTH-ETH Imbalance Check")
logger.info("Starting SWTH Qty: " + str(swth_max_quantity))
hold_qty = swth_max_quantity
new_hold_qty = 0
paid_percentage = 0.0025
paid_qty = 0
df = swth_usdc_orderbook.loc[(swth_usdc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
new_hold_qty += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
new_hold_qty += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
usdc_paid = new_hold_qty*paid_percentage
total = new_hold_qty-usdc_paid
swth_usdc_received = total
logger.info("Received USDC Qty: " + str(swth_usdc_received))
#ETH-USDC
hold_qty = swth_usdc_received
new_hold_qty = 0
df = eth_usdc_orderbook.loc[(eth_usdc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
elif df.iloc[position]['total'] > hold_qty:
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_qty = 0
position += 1
eth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-eth_paid
eth_usdc_received = total
logger.info("Received ETH Qty: " + str(eth_usdc_received))
#SWTH-ETH
hold_qty = eth_usdc_received
new_hold_qty = 0
df = swth_eth_orderbook.loc[(swth_eth_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
elif df.iloc[position]['total'] > hold_qty:
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_qty = 0
position += 1
swth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-swth_paid
swth_eth_received = total
logger.info("Received SWTH Qty: " + str(swth_eth_received))
if (swth_eth_received - swth_max_quantity) > over:
logger.info("Trades Recommended")
logger.info("Performing Recommended Trades")
dem_client.market_sell(pair='swth_usdc1', quantity=str(swth_max_quantity))
dem_client.market_buy(pair='eth1_usdc1', quantity=str(swth_usdc_received))
dem_client.market_buy(pair='swth_eth1', quantity=str(eth_usdc_received))
else:
logger.info("No Trades Recommended")
#Checking SWTH-ETH, ETH-USDC, SWTH-USDC
#SWTH-ETH
logger.info("Starting SWTH-ETH, ETH-USDC, SWTH-USDC Imbalance Check")
logger.info("Starting SWTH Qty: " + str(swth_max_quantity))
hold_qty = swth_max_quantity
new_hold_qty = 0
df = swth_eth_orderbook.loc[(swth_eth_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
new_hold_qty += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
new_hold_qty += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
eth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-eth_paid
swth_eth_received = total
logger.info("Received ETH Qty: " + str(swth_eth_received))
#ETH-USDC
hold_qty = swth_eth_received
new_hold_qty = 0
df = eth_usdc_orderbook.loc[(eth_usdc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
new_hold_qty += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
new_hold_qty += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
usdc_paid = new_hold_qty*paid_percentage
total = new_hold_qty-usdc_paid
eth_usdc_received = total
logger.info("Received USDC Qty: " + str(eth_usdc_received))
#SWTH-USDC
hold_qty = eth_usdc_received
new_hold_qty = 0
df = swth_usdc_orderbook.loc[(swth_usdc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
elif df.iloc[position]['total'] > hold_qty:
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_qty = 0
position += 1
swth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-swth_paid
swth_usdc_received = total
logger.info("Received USDC Qty: " + str(swth_usdc_received))
if (swth_usdc_received - swth_max_quantity) > over:
logger.info("Trades Recommended")
logger.info("Performing Recommended Trades")
dem_client.market_sell(pair='swth_eth1', quantity=str(swth_max_quantity))
dem_client.market_sell(pair='eth1_usdc1', quantity=str(swth_eth_received))
dem_client.market_buy(pair='swth_usdc1', quantity=str(eth_usdc_received))
else:
logger.info("No Trades Recommended")
def analyze_eth(eth_max_quantity, over):
eth_max_quantity = eth_max_quantity
over = over
with open( p + r"/data_processing/storage/orderbooks/wbtc_usdc_orderbook.json", "r") as read_file:
wbtc_usdc_orderbook = pd.read_json(read_file)
with open(p + r"/data_processing/storage/orderbooks/eth_usdc_orderbook.json", "r") as read_file:
eth_usdc_orderbook = pd.read_json(read_file)
with open(p + r"/data_processing/storage/orderbooks/eth_wbtc_orderbook.json", "r") as read_file:
eth_wbtc_orderbook = pd.read_json(read_file)
wbtc_usdc_orderbook['total'] = wbtc_usdc_orderbook['quantity'] * wbtc_usdc_orderbook['price']
eth_wbtc_orderbook['total'] = eth_wbtc_orderbook['quantity'] * eth_wbtc_orderbook['price']
eth_usdc_orderbook['total'] = eth_usdc_orderbook['quantity'] * eth_usdc_orderbook['price']
#Checking ETH-WBTC (Sell), WBTC-USDC(Sell), ETH-USDC(Buy)
#ETH-WBTC
logger.info("Starting ETH-WBTC, WBTC-USDC, ETH-USDC Imbalance Check")
logger.info("Starting ETH Qty: " + str(eth_max_quantity))
hold_qty = eth_max_quantity
new_hold_qty = 0
paid_percentage = 0.0025
df = eth_wbtc_orderbook.loc[(eth_wbtc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
new_hold_qty += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
new_hold_qty += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
wbtc_paid = new_hold_qty*paid_percentage
total = new_hold_qty-wbtc_paid
eth_wbtc_received = total
logger.info("Received WBTC Qty: " + str(eth_wbtc_received))
hold_qty = eth_wbtc_received
new_hold_qty = 0
df = wbtc_usdc_orderbook.loc[(wbtc_usdc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
new_hold_qty += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
new_hold_qty += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
usdc_paid = new_hold_qty*paid_percentage
total = new_hold_qty-usdc_paid
wbtc_usdc_received = total
logger.info("Received WBTC Qty: " + str(wbtc_usdc_received))
hold_qty = wbtc_usdc_received
new_hold_qty = 0
df = eth_usdc_orderbook.loc[(eth_usdc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
elif df.iloc[position]['total'] > hold_qty:
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_qty = 0
position += 1
eth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-eth_paid
eth_usdc_received = total
logger.info("Received ETH Qty: " + str(eth_usdc_received))
if (eth_usdc_received - eth_max_quantity) > over:
logger.info("Trades Recommended")
logger.info("Performing Recommended Trades")
dem_client.market_sell(pair='eth1_wbtc1', quantity=str(eth_max_quantity))
dem_client.market_sell(pair='wbtc1_usdc1', quantity=str(eth_wbtc_received))
dem_client.market_buy(pair='eth1_usdc1', quantity=str(wbtc_usdc_received))
else:
logger.info("No Trades Recommended")
#Checking ETH-USDC (Sell), WBTC-USDC(Buy), ETH-WBTC(Buy)
#ETH-USDC
logger.info("Starting ETH-USDC, WBTC-USDC, ETH-WBTC Imbalance Check")
logger.info("Starting ETH Qty: " + str(eth_max_quantity))
hold_qty = eth_max_quantity
new_hold_qty = 0
df = eth_usdc_orderbook.loc[(eth_usdc_orderbook['side'] == 'buy')]
df = df.sort_values(by='price', ascending=False)
position = 0
while hold_qty > 0:
if df.iloc[position]['quantity'] <= hold_qty:
hold_qty -= df.iloc[position]['quantity']
new_hold_qty += df.iloc[position]['total']
elif df.iloc[position]['quantity'] > hold_qty:
new_hold_qty += hold_qty*df.iloc[position]['price']
hold_qty = 0
position += 1
usdc_paid = new_hold_qty*paid_percentage
total = new_hold_qty-usdc_paid
eth_usdc_received = total
logger.info("Received USDC Qty: " + str(eth_usdc_received))
#WBTC-USDC
hold_qty = eth_usdc_received
new_hold_qty = 0
df = wbtc_usdc_orderbook.loc[(wbtc_usdc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
elif df.iloc[position]['total'] > hold_qty:
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_qty = 0
position += 1
wbtc_paid = new_hold_qty*paid_percentage
total = new_hold_qty-wbtc_paid
wbtc_usdc_received = total
logger.info("Received WBTC Qty: " + str(wbtc_usdc_received))
#ETH-WBTC
hold_qty = wbtc_usdc_received
new_hold_qty = 0
df = eth_wbtc_orderbook.loc[(eth_wbtc_orderbook['side'] == 'sell')]
df = df.sort_values(by='price', ascending=True)
position = 0
while hold_qty > 0:
if df.iloc[position]['total'] <= hold_qty:
hold_qty -= df.iloc[position]['total']
new_hold_qty += df.iloc[position]['quantity']
elif df.iloc[position]['total'] > hold_qty:
new_hold_qty += hold_qty/df.iloc[position]['price']
hold_qty = 0
position += 1
eth_paid = new_hold_qty*paid_percentage
total = new_hold_qty-eth_paid
eth_wbtc_received = total
logger.info("Received ETH Qty: " + str(eth_wbtc_received))
if (eth_wbtc_received - eth_max_quantity) > over:
logger.info("Trades Recommended")
logger.info("Performing Recommended Trades")
dem_client.market_sell(pair='eth1_usdc1', quantity=str(eth_max_quantity))
dem_client.market_buy(pair='wbtc1_usdc1', quantity=str(eth_usdc_received))
dem_client.market_buy(pair='eth1_wbtc1', quantity=str(wbtc_usdc_received))
else:
logger.info("No Trades Recommended")
| 41.390805
| 103
| 0.64732
|
1330f6cb4470e2455714e97279e56487075b0280
| 1,315
|
py
|
Python
|
s3upload/setup.py
|
chanzuckerberg/miniwdl-s3parcp
|
fa1baf802bf71b76dbcbc8922ad04d5e16c21b48
|
[
"MIT"
] | 1
|
2020-02-14T15:45:57.000Z
|
2020-02-14T15:45:57.000Z
|
s3upload/setup.py
|
chanzuckerberg/miniwdl-s3parcp
|
fa1baf802bf71b76dbcbc8922ad04d5e16c21b48
|
[
"MIT"
] | 1
|
2020-02-25T06:04:04.000Z
|
2020-03-02T19:37:12.000Z
|
s3upload/setup.py
|
chanzuckerberg/miniwdl-s3parcp
|
fa1baf802bf71b76dbcbc8922ad04d5e16c21b48
|
[
"MIT"
] | 1
|
2020-02-19T08:05:04.000Z
|
2020-02-19T08:05:04.000Z
|
#!/usr/bin/env python3
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(path.dirname(__file__), "README.md")) as f:
long_description = f.read()
setup(
name="miniwdl-s3upload",
version="0.0.8",
description="miniwdl plugin for progressive upload of task output files to Amazon S3",
url="https://github.com/chanzuckerberg/miniwdl-s3upload",
project_urls={
"Documentation": "https://github.com/chanzuckerberg/miniwdl-s3upload",
"Source Code": "https://github.com/chanzuckerberg/miniwdl-s3upload",
"Issue Tracker": "https://github.com/chanzuckerberg/miniwdl-s3upload/issues"
},
long_description=long_description,
long_description_content_type="text/markdown",
author="Mike Lin, Andrey Kislyuk",
py_modules=["miniwdl_s3upload"],
python_requires=">=3.6",
setup_requires=["reentry"],
install_requires=["boto3"],
reentry_register=True,
entry_points={
'miniwdl.plugin.task': ['s3_progressive_upload_task = miniwdl_s3upload:task'],
'miniwdl.plugin.workflow': ['s3_progressive_upload_workflow = miniwdl_s3upload:workflow'],
'miniwdl.plugin.cache_backend': ['s3_progressive_upload_call_cache_backend = miniwdl_s3upload:CallCache'],
}
)
| 39.848485
| 114
| 0.719392
|
53de1e7917a8a35854bd254949e100ea5f9465af
| 560
|
py
|
Python
|
resize.py
|
lorenzoferrante/DeepStar
|
90bacede200e1a3b53e779dd0bd07d7e6a4d088e
|
[
"MIT"
] | null | null | null |
resize.py
|
lorenzoferrante/DeepStar
|
90bacede200e1a3b53e779dd0bd07d7e6a4d088e
|
[
"MIT"
] | null | null | null |
resize.py
|
lorenzoferrante/DeepStar
|
90bacede200e1a3b53e779dd0bd07d7e6a4d088e
|
[
"MIT"
] | null | null | null |
from PIL import Image
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True, help='Path to directory to resize') # example 'data/RaFD/train/clothes/'
parser.add_argument('--new_size', type=int, default=128, help='Size of output image')
args = parser.parse_args()
for filename in os.listdir(args.path):
if filename.endswith('.jpg'):
img = Image.open(args.path + filename)
img = img.resize((args.new_size, args.new_size), Image.ANTIALIAS)
img.save(args.path + filename)
| 35
| 127
| 0.7125
|
c033bf828561bd3f575d9002ed1e02de6900b14f
| 32,027
|
py
|
Python
|
batchapps/test/unittest_config.py
|
Azure/azure-batch-apps-python
|
d7edf210d601137ba5ed187a58f4fe49e413ce1e
|
[
"MIT"
] | 16
|
2015-02-25T23:35:18.000Z
|
2021-06-10T23:58:49.000Z
|
batchapps/test/unittest_config.py
|
Azure/azure-batch-apps-python
|
d7edf210d601137ba5ed187a58f4fe49e413ce1e
|
[
"MIT"
] | 1
|
2021-02-24T04:11:17.000Z
|
2021-02-24T04:11:17.000Z
|
batchapps/test/unittest_config.py
|
Azure/azure-batch-apps-python
|
d7edf210d601137ba5ed187a58f4fe49e413ce1e
|
[
"MIT"
] | 9
|
2015-03-09T15:16:19.000Z
|
2021-06-08T19:00:53.000Z
|
#-------------------------------------------------------------------------
# The Azure Batch Apps Python Client
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
"""Unit tests for Configuration"""
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
try:
import ConfigParser as configparser
except ImportError:
import configparser
try:
from builtins import open
BUILTIN_OPEN = "builtins.open"
except ImportError:
BUILTIN_OPEN = "__builtin__.open"
import os
import logging
import batchapps.config
from batchapps import Configuration
from batchapps.exceptions import InvalidConfigException
# pylint: disable=W0212
class TestConfiguration(unittest.TestCase):
"""Unit tests for Configuration"""
def setUp(self):
self.userdir = os.path.expanduser("~")
self.cwd = os.path.dirname(os.path.abspath(__file__))
self.test_dir = os.path.join(self.cwd, "test_assets", "test_config")
self.use_test_files = os.path.exists(self.test_dir)
return super(TestConfiguration, self).setUp()
@mock.patch.object(Configuration, '_check_directory')
@mock.patch.object(Configuration, '_configure_logging')
@mock.patch.object(Configuration, '_set_logging_level')
@mock.patch.object(Configuration, 'save_config')
@mock.patch.object(batchapps.config.os.path, 'isfile')
@mock.patch.object(batchapps.config.configparser.RawConfigParser, 'read')
def test_config_set_defaults(self,
mock_read,
mock_file,
mock_save,
mock_level,
mock_logging,
mock_dir):
"""Test _set_defaults"""
mock_dir.return_value = False
mock_logging.return_value = logging.getLogger("defaults")
mock_file.return_value = False
cfg = Configuration(default=True)
self.assertTrue(mock_save.called)
self.assertFalse(mock_read.called)
self.assertFalse(mock_file.called)
mock_logging.assert_called_with(
os.path.join(self.userdir, "BatchAppsData"))
mock_level.assert_called_with(30)
self.assertEqual(sorted(cfg._config.sections()),
sorted(["Authentication",
"Blender",
"Logging",
"Test"]))
cfg = Configuration()
self.assertTrue(mock_save.called)
self.assertFalse(mock_read.called)
self.assertTrue(mock_file.called)
mock_logging.assert_called_with(
os.path.join(self.userdir, "BatchAppsData"))
self.assertEqual(sorted(cfg._config.sections()),
sorted(["Authentication",
"Blender",
"Logging",
"Test"]))
cfg = Configuration(data_path="c:\\mypath",
log_level=10,
datadir="data")
self.assertFalse(mock_read.called)
mock_dir.assert_any_call("c:\\mypath")
mock_dir.assert_any_call(self.userdir)
mock_logging.assert_called_with(os.path.join(self.userdir, "data"))
mock_level.assert_called_with(10)
mock_file.return_value = True
cfg = Configuration(default=True)
self.assertTrue(mock_save.called)
self.assertFalse(mock_read.called)
mock_save.reset()
mock_read.side_effect = OSError("test")
cfg = Configuration(data_path=self.test_dir, application='Blender')
self.assertTrue(mock_save.called)
self.assertTrue(mock_read.called)
self.assertEqual(cfg.jobtype, "Blender")
self.assertEqual(cfg.job_type, "Blender")
cfg = Configuration(data_path=self.test_dir, jobtype=None)
self.assertEqual(cfg.jobtype, "Blender")
self.assertEqual(cfg.job_type, "Blender")
with self.assertRaises(InvalidConfigException):
Configuration(application='TestApp', default=True)
with self.assertRaises(InvalidConfigException):
Configuration(jobtype=42, default=True)
@mock.patch.object(Configuration, '_check_directory')
@mock.patch.object(Configuration, '_configure_logging')
@mock.patch.object(Configuration, '_set_logging_level')
@mock.patch.object(Configuration, 'save_config')
@mock.patch.object(batchapps.config.os.path, 'isfile')
def test_config_read_defaults(self,
mock_file,
mock_save,
mock_level,
mock_logging,
mock_dir):
"""Test read"""
if not self.use_test_files:
self.skipTest("No test files present")
mock_dir.return_value = True
mock_logging.return_value = logging.getLogger("read_defaults")
mock_file.return_value = True
cfg = Configuration(data_path=self.test_dir, datadir="")
self.assertFalse(mock_save.called)
mock_dir.assert_called_with(self.test_dir)
mock_file.assert_called_with(
os.path.join(self.test_dir, "batch_apps.ini"))
self.assertEqual(cfg.jobtype, "Blender")
@mock.patch.object(batchapps.config.os.path, 'isdir')
@mock.patch.object(batchapps.config.os, 'mkdir')
@mock.patch.object(batchapps.config.os, 'remove')
@mock.patch(BUILTIN_OPEN)
def test_config_check_directory_a(self,
mock_open,
mock_rem,
mock_mkdir,
mock_isdir):
"""Test _check_directory"""
cfg = mock.create_autospec(Configuration)
cfg._dir = "BatchAppsData"
mock_isdir.return_value = True
check = Configuration._check_directory(cfg, "c:\\my_dir")
self.assertFalse(mock_mkdir.called)
mock_isdir.return_value = False
check = Configuration._check_directory(cfg, "c:\\my_dir")
mock_isdir.assert_called_with("c:\\my_dir\\BatchAppsData")
mock_mkdir.assert_called_with("c:\\my_dir\\BatchAppsData")
mock_open.assert_called_with("c:\\my_dir\\BatchAppsData\\aba_test", 'w')
mock_rem.assert_called_with("c:\\my_dir\\BatchAppsData\\aba_test")
self.assertTrue(check)
@mock.patch.object(batchapps.config.os.path, 'isdir')
@mock.patch.object(batchapps.config.os, 'mkdir')
@mock.patch.object(batchapps.config.os, 'remove')
@mock.patch(BUILTIN_OPEN)
def test_config_check_directory_b(self,
mock_open,
mock_rem,
mock_mkdir,
mock_isdir):
"""Test _check_directory"""
cfg = mock.create_autospec(Configuration)
cfg._dir = "BatchAppsData"
mock_isdir.return_value = False
mock_mkdir.side_effect = OSError("boom!")
check = Configuration._check_directory(cfg, "c:\\my_dir")
self.assertFalse(mock_open.called)
self.assertFalse(mock_rem.called)
self.assertFalse(check)
mock_isdir.return_value = True
mock_open.side_effect = OSError("oops!")
check = Configuration._check_directory(cfg, "c:\\my_dir")
self.assertTrue(mock_open.called)
self.assertFalse(mock_rem.called)
self.assertFalse(check)
@mock.patch.object(batchapps.config.logging, 'Formatter')
@mock.patch.object(batchapps.config.logging, 'StreamHandler')
@mock.patch.object(batchapps.config.logging, 'FileHandler')
@mock.patch.object(batchapps.config.logging, 'getLogger')
@mock.patch.object(batchapps.config.os.path, 'isfile')
@mock.patch.object(batchapps.config.os.path, 'getsize')
@mock.patch.object(batchapps.config.shutil, 'move')
def test_config_configure_logging_a(self,
mock_move,
mock_size,
mock_isfile,
mock_logger,
mock_file,
mock_stream,
mock_format):
"""Test _configure_logging"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._config = _cfg
mock_logger.return_value = logging.getLogger("configure_logging_a")
cfg._write_file = True
mock_isfile.return_value = True
mock_size.return_value = 20485760
Configuration._configure_logging(cfg, self.test_dir)
self.assertTrue(mock_format.called)
self.assertTrue(mock_move.called)
self.assertTrue(mock_size.called)
mock_file.assert_called_with(
os.path.join(self.test_dir, "batch_apps.log"))
@mock.patch.object(batchapps.config.logging, 'Formatter')
@mock.patch.object(batchapps.config.logging, 'StreamHandler')
@mock.patch.object(batchapps.config.logging, 'FileHandler')
@mock.patch.object(batchapps.config.logging, 'getLogger')
@mock.patch.object(batchapps.config.os.path, 'isfile')
@mock.patch.object(batchapps.config.os.path, 'getsize')
@mock.patch.object(batchapps.config.shutil, 'move')
def test_config_configure_logging_b(self,
mock_move,
mock_size,
mock_isfile,
mock_logger,
mock_file,
mock_stream,
mock_format):
"""Test _configure_logging"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._config = _cfg
cfg._write_file = True
mock_logger.return_value = logging.getLogger("configure_logging_b")
mock_isfile.return_value = False
Configuration._configure_logging(cfg, self.test_dir)
self.assertFalse(mock_size.called)
self.assertFalse(mock_move.called)
self.assertFalse(mock_file.called)
@mock.patch.object(batchapps.config.logging, 'Formatter')
@mock.patch.object(batchapps.config.logging, 'StreamHandler')
@mock.patch.object(batchapps.config.logging, 'FileHandler')
@mock.patch.object(batchapps.config.logging, 'getLogger')
@mock.patch.object(batchapps.config.os.path, 'isfile')
@mock.patch.object(batchapps.config.os.path, 'getsize')
@mock.patch.object(batchapps.config.shutil, 'move')
def test_config_configure_logging_c(self,
mock_move,
mock_size,
mock_isfile,
mock_logger,
mock_file,
mock_stream,
mock_format):
"""Test _configure_logging"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._config = _cfg
mock_logger.return_value = logging.getLogger("configure_logging_c")
cfg._write_file = False
Configuration._configure_logging(cfg, self.test_dir)
self.assertFalse(mock_file.called)
self.assertFalse(mock_size.called)
def test_config_set_logging_level(self):
"""Test _set_logging_level"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
_cfg.add_section("Logging")
cfg._config = _cfg
cfg._log = logging.getLogger("set_logging_level")
lev = Configuration._set_logging_level(cfg, 10)
self.assertEqual(lev, 'DEBUG')
self.assertEqual(_cfg.get("Logging", "level"), 10)
self.assertEqual(cfg._log.level, 10)
lev = Configuration._set_logging_level(cfg, "deBug")
self.assertEqual(lev, 'DEBUG')
self.assertEqual(_cfg.get("Logging", "level"), 10)
self.assertEqual(cfg._log.level, 10)
for i in [23, "test", None, 0, "20"]:
lev = Configuration._set_logging_level(cfg, i)
self.assertEqual(lev, 'WARNING')
self.assertEqual(_cfg.get("Logging", "level"), 30)
self.assertEqual(cfg._log.level, 30)
@mock.patch.object(Configuration, 'save_config')
def test_config_set_default_application(self, mock_save):
"""Test deprecated method set_default_application"""
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("set_default_application")
Configuration.set_default_application(cfg)
self.assertTrue(cfg.set_default_jobtype.called)
@mock.patch.object(Configuration, 'save_config')
def test_config_set_default_jobtype(self, mock_save):
"""Test set_default_jobtype"""
if not self.use_test_files:
self.skipTest("No test files present")
_cfg = configparser.RawConfigParser()
_cfg.read(os.path.join(self.test_dir, "batch_apps.ini"))
cfg = mock.create_autospec(Configuration)
cfg._config = _cfg
cfg.jobtype = "Test"
cfg._write_file = True
cfg._log = logging.getLogger("set_default_jobtype")
Configuration.set_default_jobtype(cfg)
self.assertFalse(cfg._config.has_option('Blender', 'default_jobtype'))
self.assertTrue(cfg._config.has_option('Test', 'default_jobtype'))
cfg.jobtype = "Test"
Configuration.set_default_jobtype(cfg)
self.assertFalse(cfg._config.has_option('Blender', 'default_jobtype'))
self.assertTrue(cfg._config.has_option('Test', 'default_jobtype'))
@mock.patch(BUILTIN_OPEN)
def test_config_save_config(self, mock_open):
"""Test save_config"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._config = _cfg
cfg._write_file = False
cfg._cfg_file = "my_file.ini"
cfg._log = logging.getLogger("save_config")
save = Configuration.save_config(cfg)
self.assertFalse(save)
cfg._write_file = True
save = Configuration.save_config(cfg)
mock_open.assert_called_with("my_file.ini", 'w')
self.assertTrue(save)
mock_open.side_effect = OSError("test")
save = Configuration.save_config(cfg)
self.assertFalse(save)
@mock.patch.object(batchapps.config.os, 'remove')
@mock.patch.object(batchapps.config.Configuration, 'save_config')
def test_config_clear_config(self, mock_save, mock_rem):
"""Test clear_config"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("clear_config")
cfg._config = _cfg
cfg._write_file = False
cfg._cfg_file = "my_file.ini"
clr = Configuration.clear_config(cfg)
self.assertTrue(clr)
mock_rem.side_effect = OSError("Boom!")
clr = Configuration.clear_config(cfg)
self.assertFalse(clr)
def test_config_endpoint(self):
"""Test endpoint"""
_cfg = configparser.RawConfigParser()
_cfg.add_section('TestApp')
_cfg.set('TestApp', 'endpoint', 'http://test')
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("endpoint")
cfg._config = _cfg
cfg.jobtype = "SomeApp"
with self.assertRaises(InvalidConfigException):
Configuration.endpoint(cfg)
_cfg.add_section('Authentication')
with self.assertRaises(InvalidConfigException):
Configuration.endpoint(cfg)
cfg.jobtype = "TestApp"
ept = Configuration.endpoint(cfg)
self.assertEqual(_cfg.get('TestApp', 'endpoint'), 'http://test')
self.assertEqual(ept, 'test')
ept = Configuration.endpoint(cfg, "https://new_test/")
self.assertEqual(_cfg.get('TestApp', 'endpoint'), 'http://test')
self.assertEqual(_cfg.get('Authentication', 'endpoint'), 'https://new_test/')
self.assertEqual(ept, 'new_test/')
def test_config_logging_level(self):
"""Test logging_level"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("logging_level")
cfg._config = _cfg
with self.assertRaises(InvalidConfigException):
Configuration.logging_level(cfg)
_cfg.add_section('Logging')
_cfg.set('Logging', 'level', '30')
ept = Configuration.logging_level(cfg)
self.assertEqual(ept, 'WARNING')
ept = Configuration.logging_level(cfg, None)
cfg._set_logging_level.assert_called_with("None")
ept = Configuration.logging_level(cfg, "warning")
cfg._set_logging_level.assert_called_with("warning")
def test_config_application(self):
"""Test depcrecated method application"""
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("application")
Configuration.application(cfg)
self.assertTrue(cfg.current_jobtype.called)
Configuration.application(cfg, "test")
cfg.current_jobtype.assert_called_with("test")
def test_config_current_jobtype(self):
"""Test current_jobtype"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("jobtype")
cfg._config = _cfg
cfg.jobtype = "TestApp"
app = Configuration.current_jobtype(cfg)
self.assertEqual(app, cfg.jobtype)
_cfg.add_section('TestApp2')
with self.assertRaises(InvalidConfigException):
Configuration.current_jobtype(cfg, 'DifferentApp')
app = Configuration.current_jobtype(cfg, "TestApp2")
self.assertEqual(app, 'TestApp2')
self.assertEqual(cfg.jobtype, 'TestApp2')
self.assertEqual(cfg.job_type, 'TestApp2')
def test_config_applications(self):
"""Test deprecated method applications"""
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("applications")
Configuration.applications(cfg)
self.assertTrue(cfg.list_jobtypes.called)
def test_config_list_jobtypes(self):
"""Test list_jobtypes"""
_cfg = configparser.RawConfigParser()
_cfg.add_section("Logging")
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("list_jobtypes")
cfg._config = _cfg
with self.assertRaises(InvalidConfigException):
apps = Configuration.list_jobtypes(cfg)
_cfg.add_section("Authentication")
apps = Configuration.list_jobtypes(cfg)
self.assertEqual(apps, [])
_cfg.add_section("Blender")
_cfg.add_section("NewTestApp")
apps = Configuration.list_jobtypes(cfg)
self.assertEqual(sorted(apps), sorted(['Blender', 'NewTestApp']))
def test_config_default_params(self):
"""Test default_params"""
_cfg = configparser.RawConfigParser()
_cfg.add_section("TestApp")
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("default_params")
cfg._config = _cfg
cfg.jobtype = "TestApp"
params = Configuration.default_params(cfg)
self.assertEqual(params, {})
cfg._config.set("TestApp", "1", "teST")
cfg._config.set("TestApp", "2", None)
cfg._config.set("TestApp", "3", [])
params = Configuration.default_params(cfg)
self.assertEqual(params, {'1':'teST', '2':None, '3':[]})
def test_config_add_application(self):
"""Testing deprecated method add_application"""
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("add_application")
Configuration.add_application(cfg, "1", "2", three="3")
cfg.add_jobtype.assert_called_with("1", three="3")
def test_config_add_jobtype(self):
"""Test add_jobtype"""
_cfg = configparser.RawConfigParser()
_cfg.add_section("TestApp")
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("add_jobtype")
cfg._config = _cfg
Configuration.add_jobtype(cfg,
"TestApp")
self.assertEqual(cfg._config.sections(), ['TestApp'])
self.assertEqual(dict(cfg._config.items('TestApp')), {})
Configuration.add_jobtype(cfg,
"TestApp2",
a="1",
b=2,
c=None)
self.assertEqual(cfg._config.sections(), ['TestApp', 'TestApp2'])
self.assertEqual(dict(cfg._config.items('TestApp2')),
{'a':'1',
'b':2,
'c':None})
def test_config_set(self):
"""Test set"""
_cfg = configparser.RawConfigParser()
_cfg.add_section("TestApp")
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("config_set")
cfg._config = _cfg
cfg.jobtype = "TestApp"
Configuration.set(cfg, "key", "value")
self.assertEqual(dict(cfg._config.items('TestApp')), {'key':'value'})
cfg.jobtype = "TestApp2"
with self.assertRaises(InvalidConfigException):
Configuration.set(cfg, "key", "value")
def test_config_get(self):
"""Test get"""
_cfg = configparser.RawConfigParser()
_cfg.add_section("TestApp")
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("config_get")
cfg._config = _cfg
cfg.jobtype = "TestApp2"
param = Configuration.get(cfg, "endpoint")
self.assertIsNone(param)
cfg.jobtype = "TestApp"
param = Configuration.get(cfg, "endpoint")
self.assertIsNone(param)
cfg._config.set("TestApp", "endpoint", "http://test")
param = Configuration.get(cfg, "endpoint")
self.assertEqual(param, "http://test")
param = Configuration.get(cfg, 42)
self.assertIsNone(param)
def test_config_remove(self):
"""Test remove"""
_cfg = configparser.RawConfigParser()
_cfg.add_section("TestApp")
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("config_remove")
cfg._config = _cfg
cfg.jobtype = "TestApp"
rem = Configuration.remove(cfg, "TestApp")
self.assertFalse(rem)
rem = Configuration.remove(cfg, "TestApp2")
self.assertFalse(rem)
rem = Configuration.remove(cfg, 42)
self.assertFalse(rem)
rem = Configuration.remove(cfg, None)
self.assertFalse(rem)
cfg._config.set("TestApp", "1", 1)
cfg._config.set("TestApp", "2", 2)
rem = Configuration.remove(cfg, "1")
self.assertTrue(rem)
self.assertEqual(dict(cfg._config.items('TestApp')), {'2':2})
_cfg.add_section("Logging")
rem = Configuration.remove(cfg, "Logging")
self.assertFalse(rem)
cfg.jobtype = "TestApp2"
rem = Configuration.remove(cfg, "TestApp")
self.assertTrue(rem)
self.assertEqual(cfg._config.sections(), ['Logging'])
def test_config_aad_config(self):
"""Test aad_config"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("aad")
cfg._config = _cfg
cfg._reformat_config.return_value = {"a":1, "b":2}
cfg._validate_auth = lambda a: dict(_cfg.items("Authentication"))
with self.assertRaises(InvalidConfigException):
Configuration.aad_config(cfg)
_cfg.add_section("Authentication")
aad = Configuration.aad_config(cfg)
self.assertEqual(aad, {"a":1, "b":2})
aad = Configuration.aad_config(cfg, client_id="a", tenant="b",
endpoint="c")
self.assertEqual(aad, {"a":1, "b":2, "client_id":"a", "tenant":"b",
"endpoint":"c"})
_cfg.remove_section("Authentication")
_cfg.add_section("Authentication")
_cfg.set("Authentication", "root", "test")
aad = Configuration.aad_config(cfg, key=3, redirect=4)
self.assertEqual(aad, {"root":"test", "unattended_key":"3",
"redirect_uri":"4"})
_cfg.remove_section("Authentication")
_cfg.add_section("Authentication")
_cfg.set("Authentication", "root", "test")
aad = Configuration.aad_config(cfg, account=3)
aad = Configuration.aad_config(cfg, account="test;test")
aad = Configuration.aad_config(cfg, account="ClientID=abc;TenantID=xyz")
self.assertEqual(aad, {"root":"test", "unattended_account":"ClientID=abc;TenantID=xyz"})
_cfg.remove_section("Authentication")
_cfg.add_section("Authentication")
_cfg.set("Authentication", "root", "test")
aad = Configuration.aad_config(cfg, account="ClientID=abc;TenantID=xyz",
client_id="foo", tenant="bar")
self.assertEqual(aad, {"root":"test", "client_id":"foo", "tenant":"bar",
"unattended_account":"ClientID=abc;TenantID=xyz"})
def test_config_validate_auth(self):
"""Test validate_auth"""
_cfg = configparser.RawConfigParser()
cfg = mock.create_autospec(Configuration)
cfg._invalid_data = lambda s: Configuration._invalid_data(cfg, s)
_cfg.add_section("Authentication")
cfg._config = _cfg
with self.assertRaises(InvalidConfigException):
Configuration._validate_auth(cfg, False)
_cfg.set("Authentication", "auth_uri", "a")
_cfg.set("Authentication", "resource", "b")
_cfg.set("Authentication", "token_uri", "c")
_cfg.set("Authentication", "endpoint", "d")
_cfg.set("Authentication", "client_id", "e")
_cfg.set("Authentication", "tenant", "f")
_cfg.set("Authentication", "root", "g")
_cfg.set("Authentication", "redirect_uri", "{redirect}")
with self.assertRaises(InvalidConfigException):
Configuration._validate_auth(cfg, False)
_cfg.set("Authentication", "redirect_uri", "h")
auth = Configuration._validate_auth(cfg, False)
with self.assertRaises(InvalidConfigException):
Configuration._validate_auth(cfg, True)
_cfg.set("Authentication", "unattended_account", None)
_cfg.set("Authentication", "unattended_key", "i")
with self.assertRaises(InvalidConfigException):
Configuration._validate_auth(cfg, True)
_cfg.set("Authentication", "unattended_account", "j")
auth = Configuration._validate_auth(cfg, True)
_cfg.remove_option("Authentication", "redirect_uri")
auth = Configuration._validate_auth(cfg, True)
def test_config_reformat_config(self):
"""Test reformat_config"""
old_cfg = configparser.RawConfigParser()
old_cfg.add_section("Authentication")
old_cfg.set("Authentication", "auth_uri",
"login.windows.net/common/oauth2/authorize")
old_cfg.set("Authentication", "resource", "https://batchapps.core.windows.net/")
old_cfg.set("Authentication", "token_uri", "login.windows.net/common/oauth2/token")
old_cfg.add_section("TestJob")
old_cfg.set("TestJob", "endpoint", "test.com")
old_cfg.set("TestJob", "client_id", "abc")
old_cfg.set("TestJob", "redirect_uri", "redirect.com")
cfg = mock.create_autospec(Configuration)
cfg._log = logging.getLogger("aad")
cfg._config = old_cfg
cfg.jobtype = "TestJob"
aad = Configuration._reformat_config(
cfg, dict(cfg._config.items("Authentication")))
self.assertEqual(aad, {"endpoint":"test.com", "client_id":"abc",
"auth_uri":"/oauth2/authorize",
"root":"login.windows.net/",
"token_uri":"/oauth2/token",
"redirect_uri":"redirect.com",
"unattended_account":None,
"unattended_key":None,
"tenant":"common",
"resource":"https://batchapps.core.windows.net/"})
old_cfg.set("Authentication", "service_principal", "")
old_cfg.set("Authentication", "service_principal_key", "")
aad = Configuration._reformat_config(
cfg, dict(cfg._config.items("Authentication")))
self.assertEqual(aad, {"endpoint":"test.com", "client_id":"abc",
"auth_uri":"/oauth2/authorize",
"root":"login.windows.net/",
"token_uri":"/oauth2/token",
"redirect_uri":"redirect.com",
"unattended_account":"",
"unattended_key":"",
"tenant":"common",
"resource":"https://batchapps.core.windows.net/"})
old_cfg.set("Authentication", "service_principal_key", "%$#")
old_cfg.set("Authentication", "service_principal",
"ClientId=xyz;TenantId=test_account.onmicrosoft.com")
aad = Configuration._reformat_config(
cfg, dict(cfg._config.items("Authentication")))
self.assertEqual(aad, {"endpoint":"test.com", "client_id":"abc",
"auth_uri":"/oauth2/authorize",
"root":"login.windows.net/",
"token_uri":"/oauth2/token",
"redirect_uri":"redirect.com",
"unattended_account":"ClientId=xyz;TenantId=test_account.onmicrosoft.com",
"unattended_key":"%$#",
"tenant":"common",
"resource":"https://batchapps.core.windows.net/"})
| 39.588381
| 105
| 0.604209
|
fe7bf77cb4f9ec979746db8548739de00655f724
| 8,628
|
py
|
Python
|
installer/dials_installer.py
|
graeme-winter/dials
|
78a696a6591e224e73204846f39771ebac0c2668
|
[
"BSD-3-Clause"
] | 2
|
2021-03-17T11:25:46.000Z
|
2021-11-18T04:20:54.000Z
|
installer/dials_installer.py
|
graeme-winter/dials
|
78a696a6591e224e73204846f39771ebac0c2668
|
[
"BSD-3-Clause"
] | null | null | null |
installer/dials_installer.py
|
graeme-winter/dials
|
78a696a6591e224e73204846f39771ebac0c2668
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import os
import shutil
import sys
import traceback
# This file needs to remain Python 2.7 compatible
# due to the underlying cctbx installer logic
installer_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
libtbx_path = os.path.join(installer_path, "lib")
if libtbx_path not in sys.path:
sys.path.append(libtbx_path)
from libtbx.auto_build import install_distribution
class installer(install_distribution.installer):
organization = "dials"
product_name = "DIALS"
dest_dir_prefix = "dials"
make_apps = []
configure_modules = ["dials", "xia2", "iota", "prime"]
include_gui_packages = True
base_package_options = ["--dials"]
installer_dir = installer_path
modules = [
# hot
"annlib",
"ccp4io",
# base
"cbflib",
"cctbx_project",
"gui_resources",
"ccp4io_adaptbx",
"annlib_adaptbx",
# dials
"dxtbx",
"dials",
"xia2",
"iota",
"prime",
]
flags = list(install_distribution.installer.flags)
try:
flags.remove("create_versioned_dispatchers")
except ValueError:
pass
def product_specific_preinstallation_hook(self):
prefix = os.path.abspath(self.options.prefix)
if prefix.startswith(installer_path):
sys.exit(
"Invalid installation option: --prefix={givenprefix}\n\n"
"Please install DIALS to a location outside of the installer directory.\n"
"Suggested alternative: --prefix={suggestedprefix}".format(
givenprefix=self.options.prefix,
suggestedprefix=os.path.dirname(prefix),
)
)
def reconfigure(self, log=None, *args, **kwargs):
"""Intercept any errors and print log excerpt"""
try:
return super(installer, self).reconfigure(log=log, *args, **kwargs)
except Exception:
if not self.options.verbose:
print("\n" + " -=-" * 20)
print("\nAn error occured during installation\n")
print("Excerpt from installation log:")
with open(log.name, "r") as fh:
for line in fh.readlines()[-30:]:
print(" :", line, end="")
print("\nThis led to ", end="")
sys.stdout.flush()
traceback.print_exc()
print("\n")
sys.exit(
"Please report this installation error to dials-support@lists.sourceforge.net"
)
def product_specific_prepackage_hook(self, directory):
"""
Remove irrelevant files from installer.
"""
self.print_header("Deflating installer")
suffixes = ["B", "KB", "MB", "GB", "TB", "PB"]
def humansize(nbytes):
if nbytes == 0:
return "0 B"
i = 0
while nbytes >= 1024 and i < len(suffixes) - 1:
nbytes /= 1024.0
i += 1
f = ("%.2f" % nbytes).rstrip("0").rstrip(".")
return "%s %s" % (f, suffixes[i])
self._cleaned_size, self._cleaned_files = 0, 0
def rmdir(subdir):
fullpath = os.path.join(directory, subdir)
if not os.path.exists(fullpath):
print("Skipping", " " * 26, subdir)
return
num_files, total_size = 0, 0
for dirpath, dirnames, filenames in os.walk(fullpath):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
num_files += 1
print(
"Removing %9s, %4d files from %s"
% (humansize(total_size), num_files, subdir)
)
shutil.rmtree(fullpath)
self._cleaned_size = self._cleaned_size + total_size
self._cleaned_files = self._cleaned_files + num_files
def rmext(subdir, extension):
fullpath = os.path.join(directory, subdir)
if not os.path.exists(fullpath):
print("Skipping *%s" % extension, " " * 26, subdir)
return
filelist, total_size = [], 0
for dirpath, dirnames, filenames in os.walk(fullpath):
for f in filenames:
if f.endswith(extension):
fp = os.path.join(dirpath, f)
filelist.append(fp)
total_size += os.path.getsize(fp)
print(
"Removing %9s, %4d %s files from %s"
% (humansize(total_size), len(filelist), extension, subdir)
)
for f in filelist:
os.remove(f)
self._cleaned_size = self._cleaned_size + total_size
self._cleaned_files = self._cleaned_files + len(filelist)
def rmfile(filename):
fullpath = os.path.join(directory, filename)
if not os.path.exists(fullpath):
print("Skipping", " " * 26, filename)
return
filesize = os.path.getsize(fullpath)
print("Removing %9s, file %s" % (humansize(filesize), filename))
os.remove(fullpath)
self._cleaned_size = self._cleaned_size + filesize
self._cleaned_files = self._cleaned_files + 1
# Deduce matplotlib path
# (base/lib/python2.??/site-packages/matplotlib-????/matplotlib)
# (base/Python.framework/Versions/?.?/lib/python?.?/site-packages/matplotlib-(...) on MacOS)
try:
import inspect
import matplotlib
matplotpath = os.path.dirname(
os.path.dirname(inspect.getsourcefile(matplotlib))
)
relpath = []
matplotpath, d = os.path.split(matplotpath)
relpath.append(d)
while d and (d != "base"):
matplotpath, d = os.path.split(matplotpath)
relpath.append(d)
if d == "base":
relpath.reverse()
# delete matplotlib tests
matplotpath = os.path.join(*relpath)
rmdir(os.path.join(matplotpath, "matplotlib", "tests"))
rmdir(os.path.join(matplotpath, "mpl_toolkits", "tests"))
# ...while we're here
sitepath = os.path.dirname(matplotpath)
rmdir(os.path.join(sitepath, "numpy/core/tests"))
rmdir(os.path.join(sitepath, "numpy/doc"))
rmdir(os.path.join(sitepath, "numpy/distutils/tests"))
rmdir(os.path.join(sitepath, "numpy/f2py/docs"))
pythonpath = os.path.dirname(sitepath)
rmdir(os.path.join(pythonpath, "test"))
except Exception:
print("Could not deduce python package paths")
rmdir("base/man")
rmdir("base/share/doc")
rmdir("base/share/gtk-doc")
rmdir("base/share/hdf5_examples")
rmdir("base/share/man")
rmdir("build/dials_data")
rmdir("build/precommitbx")
rmdir("build/regression_data")
rmdir("build/xia2_regression")
rmext("build", ".o")
for f in ("setpaths", "setpaths_debug", "setpaths_all", "unsetpaths"):
for ext in (".sh", ".csh"):
rmfile(os.path.join("build", f + ext))
for p in (
"chrono",
"date_time",
"detail",
"filesystem",
"program_options",
"python",
"system",
"thread",
"timer",
):
rmdir(os.path.join("modules/boost/libs", p, "example"))
rmdir(os.path.join("modules/boost/libs", p, "doc"))
rmdir(os.path.join("modules/boost/libs", p, "test"))
rmdir(os.path.join("modules/boost/libs", p, "tutorial"))
rmdir("modules/boost/libs/date_time/xmldoc")
rmdir("modules/cbflib/doc")
rmdir("modules/cbflib/examples")
rmdir("modules/cbflib/ply-3.2/doc")
rmdir("modules/cbflib/ply-3.2/example")
rmdir("modules/cbflib/ply-3.2/test")
rmext("modules/cbflib", ".cbf")
rmdir("modules/clipper/examples")
print("-" * 60)
print(
"Deleted %d files, decrufting installation by %s\n"
% (self._cleaned_files, humansize(self._cleaned_size))
)
if __name__ == "__main__":
installer(sys.argv[1:]).install()
| 36.714894
| 100
| 0.540102
|
dbb498b9546347a0ec6943ce26f33b85f416a606
| 2,707
|
py
|
Python
|
scraper.py
|
pconwell/covid-scraper
|
26b77619ba6c074915270c15e5735ebb4a1dbbe8
|
[
"MIT"
] | null | null | null |
scraper.py
|
pconwell/covid-scraper
|
26b77619ba6c074915270c15e5735ebb4a1dbbe8
|
[
"MIT"
] | null | null | null |
scraper.py
|
pconwell/covid-scraper
|
26b77619ba6c074915270c15e5735ebb4a1dbbe8
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import re
today = datetime.today()
today_url = f"https://www.asafenashville.org/updates/mphd-daily-covid-19-update-for-{today.strftime('%B').lower()}-{today.strftime('%d').lstrip('0')}/"
yesterday_url = f"https://www.asafenashville.org/updates/mphd-daily-covid-19-update-for-{(today - timedelta(1)).strftime('%B').lower()}-{(today - timedelta(1)).strftime('%d').lstrip('0')}/"
print(today_url)
today_page = requests.get(today_url)
yesterday_page =requests.get(yesterday_url)
data = []
for page in [today_page, yesterday_page]:
soup = BeautifulSoup(page.content, 'html.parser')
#print(soup)
table = soup.table
#print(table)
table_rows = table.find_all('tr')
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
if row[0] in ["Total", "Inactive/Recovered", "Deaths", "Total active cases"]:
# print(row)
data.append(int(row[1].replace(',', '')))
# print("\n")
total_delta = (data[0] - data[4])
inactive_delta = (data[1] - data[5])
deaths_delta = (data[2] - data[6])
active_delta = (data[3] - data[7])
print(f"Cases: {data[0]:,} ({total_delta:+d})\nRecovered/Inactive: {data[1]:,} ({inactive_delta:+d})\nDeaths: {data[2]:,} ({deaths_delta:+d})\nActive: {data[3]:,} ({active_delta:+d})")
##############################################################################################
##############################################################################################
##############################################################################################
key_metric_url = "https://www.asafenashville.org/reopening-key-metrics"
metric_page = requests.get(key_metric_url)
soup = BeautifulSoup(metric_page.content, 'html.parser')
info = soup(text=re.compile('Current: '))
metrics = []
for i in info:
metrics.append(i.split(":")[1].split(" ")[1])
print(f"Transmission Rate: {metrics[0]}\nHospital Floor Bed Capacity: {metrics[4]} (Goal: 20%)\nHospital ICU Bed Capacity: {metrics[5]} (Goal: 20%)\nNew Cases per 100K Residents: {metrics[6]} (Goal: <10)\n7-Day Positive Test Rate: {metrics[7]} (Goal: <10)")
##############################################################################################
##############################################################################################
##############################################################################################
state__pdf_url = "https://www.tn.gov/content/tn/health/cedep/ncov/data.html"
state_data_url = "https://www.tn.gov/health/cedep/ncov.html"
page = requests.get(key_metric_url)
soup = BeautifulSoup(page.content, 'html.parser')
| 41.015152
| 257
| 0.538604
|
06ce670924b1eb60b293c39cd7a2d01bf9f3229e
| 490
|
py
|
Python
|
web/migrations/0001_initial.py
|
turing0/cervical-cell-classification
|
5d01d7bab62356046e7cb5cc818fd73d00a1ae74
|
[
"MIT"
] | null | null | null |
web/migrations/0001_initial.py
|
turing0/cervical-cell-classification
|
5d01d7bab62356046e7cb5cc818fd73d00a1ae74
|
[
"MIT"
] | null | null | null |
web/migrations/0001_initial.py
|
turing0/cervical-cell-classification
|
5d01d7bab62356046e7cb5cc818fd73d00a1ae74
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2022-01-14 07:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('headimg', models.FileField(upload_to='img/')),
],
),
]
| 22.272727
| 117
| 0.573469
|
3cea52da8ac5bc1f024c704bbccfeafdd4f28264
| 2,612
|
py
|
Python
|
keyboards/inlinekb.py
|
vogelfenx/storagebot
|
64ab07b068bf645d7cdf5bb1cd5db91c0e2a9228
|
[
"MIT"
] | null | null | null |
keyboards/inlinekb.py
|
vogelfenx/storagebot
|
64ab07b068bf645d7cdf5bb1cd5db91c0e2a9228
|
[
"MIT"
] | null | null | null |
keyboards/inlinekb.py
|
vogelfenx/storagebot
|
64ab07b068bf645d7cdf5bb1cd5db91c0e2a9228
|
[
"MIT"
] | null | null | null |
from aiogram.types import InlineKeyboardMarkup
from aiogram.types import InlineKeyboardButton
from geopy import distance
from utils.get_nearest_storage_boxes import get_nearest_storage_boxes
def select_storage_kb(location):
keyboard = InlineKeyboardMarkup()
for box_id, box_location in get_nearest_storage_boxes(location).items():
box_address = box_location.get("address")
distance_to_user = box_location.get("distance_to_user")
if distance_to_user:
keyboard.add(
InlineKeyboardButton(
text=(f'{box_address}, {distance_to_user}'),
callback_data=box_id,
)
)
else:
keyboard.add(
InlineKeyboardButton(
text=box_address,
callback_data=box_id,
)
)
return keyboard
def what_to_store_kb():
keyboard = InlineKeyboardMarkup()
keyboard.add(
InlineKeyboardButton(
text='сезонные вещи',
callback_data='season_things'
)
)
keyboard.add(
InlineKeyboardButton(
text='другое',
callback_data='another_things'
)
)
return keyboard
def season_things_kb():
keyboard = InlineKeyboardMarkup()
keyboard.add(
InlineKeyboardButton(
text='лыжи',
callback_data='ski'
)
)
keyboard.add(
InlineKeyboardButton(
text='сноуборд',
callback_data='snowboard'
)
)
keyboard.add(
InlineKeyboardButton(
text='велосипед',
callback_data='bicycle'
)
)
keyboard.add(
InlineKeyboardButton(
text='комплект колес',
callback_data='wheel'
)
)
return keyboard
def weeks_or_months_kb():
keyboard = InlineKeyboardMarkup()
keyboard.add(
InlineKeyboardButton(
text='недели',
callback_data='weeks'
)
)
keyboard.add(
InlineKeyboardButton(
text='месяцы',
callback_data='months'
)
)
return keyboard
def pay_kb():
keyboard = InlineKeyboardMarkup()
keyboard.add(
InlineKeyboardButton(
text='забронировать',
callback_data='book'
)
)
return keyboard
def back_kb():
keyboard = InlineKeyboardMarkup()
keyboard.add(
InlineKeyboardButton(
text='Назад',
callback_data='back'
)
)
return keyboard
| 20.092308
| 76
| 0.563553
|
641a960901c93ce060e62d4aa064315e593c4bbb
| 7,163
|
py
|
Python
|
cpo_analysis/cpo_analysis/plot_from_file.py
|
utiasASRL/cpo
|
b60a14d641efb6a533322dd12b43bb37ddaacb2a
|
[
"Apache-2.0"
] | 2
|
2021-09-15T17:06:50.000Z
|
2022-03-29T09:32:56.000Z
|
cpo_analysis/cpo_analysis/plot_from_file.py
|
utiasASRL/cpo
|
b60a14d641efb6a533322dd12b43bb37ddaacb2a
|
[
"Apache-2.0"
] | null | null | null |
cpo_analysis/cpo_analysis/plot_from_file.py
|
utiasASRL/cpo
|
b60a14d641efb6a533322dd12b43bb37ddaacb2a
|
[
"Apache-2.0"
] | 2
|
2021-09-15T17:06:37.000Z
|
2022-03-29T09:06:55.000Z
|
#!/usr/bin/env python
import csv
import os.path as osp
import argparse
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from math import sqrt
from pyproj import Proj
import seaborn as sns
sns.set_style("whitegrid")
matplotlib.use("TkAgg") # Can change to 'Agg' for non-interactive mode
matplotlib.rcParams["pdf.fonttype"] = 42
matplotlib.rcParams["ps.fonttype"] = 42
# difference between starts of Unix time (Jan.1/70) and GPS time (Jan.6/80)
UNIX_GPS_OFFSET = 315964800
LEAP_SECONDS = 18
def safe_float(field):
try:
return float(field)
except ValueError:
return float('NaN')
def safe_int(field):
try:
return int(field)
except ValueError:
return 0
def read_gpgga(gga_path, gps_day, proj_origin, start_time=0.0, end_time=4999999999.9):
"""Read file of ASCII GPGGA messages and return measurements as array in UTM coordinates"""
# note: transverse mercator projection very slightly different from Euclidean ENU but negligible if position near
# origin (e.g. < 2km)
projection = Proj(
"+proj=etmerc +ellps=WGS84 +lat_0={0} +lon_0={1} +x_0=0 +y_0=0 +z_0={2} +k_0=1".format(proj_origin[0],
proj_origin[1],
proj_origin[2]))
day_seconds = UNIX_GPS_OFFSET + gps_day * 24 * 3600
with open(gga_path, newline='') as resultfile:
spamreader = csv.reader(resultfile, delimiter=',', quotechar='|')
tmp = []
distance_along_path = 0
for i, row in enumerate(spamreader):
if row[0] != "$GPGGA":
continue
lat_tmp = row[2]
lat = safe_float(lat_tmp[0:2]) + safe_float(lat_tmp[2:]) / 60.0
long_tmp = row[4]
long = safe_float(long_tmp[0:3]) + safe_float(long_tmp[3:]) / 60.0
if row[5] == 'W':
long = -long
z = safe_float(row[9])
x, y = projection(long, lat)
fix_type = safe_int(row[6])
time_of_day = row[1]
timestamp = day_seconds + safe_float(time_of_day[0:2]) * 3600.0 + safe_float(
time_of_day[2:4]) * 60.0 + safe_float(time_of_day[4:])
if start_time <= timestamp <= end_time:
if len(tmp) > 0:
prev_x = tmp[-1][1]
prev_y = tmp[-1][2]
dist_added = sqrt((x - prev_x) ** 2 + (y - prev_y) ** 2)
distance_along_path += dist_added
tmp.append([timestamp, x, y, z, fix_type, long, lat, distance_along_path])
return np.array(tmp)
def main():
parser = argparse.ArgumentParser(description='Plot integrated carrier phase odometry estimates.')
parser.add_argument('--dataset', '-d', type=str, help='Name of dataset to retrieve groundtruth file.',
default='feb15c')
parser.add_argument('--groundtruth_dir', '-g', type=str, help='Path to directory with RTK ground truth (optional)',
default='~/cpo_workspace/src/cpo/cpo_analysis/data/groundtruth/')
parser.add_argument('--estimates_path', '-e', type=str, help='Path to our TDCP estimates CSV file.',
default='~/cpo_workspace/src/cpo/cpo_analysis/data/estimates/cpo.csv')
args = parser.parse_args()
plt.rc('axes', labelsize=12, titlesize=14)
plt.rcParams["font.family"] = "serif"
dataset = args.dataset
trim_start_rows = 10 # optionally can be used to trim off part before robot begins driving
estimates_path = osp.expanduser(args.estimates_path)
enu_origin = np.genfromtxt(estimates_path, delimiter=',', max_rows=1)
estimates = np.genfromtxt(estimates_path, delimiter=',', skip_header=1 + trim_start_rows)
# get start and end time of SWF data to get correct section from Doppler, ground truth
start_time = safe_float(estimates[0, 0])
end_time = safe_float(estimates[-1, 0])
gt_dir = osp.expanduser(args.groundtruth_dir)
gt_file = dataset + "_gga.ASC"
# GPS day required to parse ground truth. We determine it here.
if dataset[:5] == "feb10":
day = 2144 * 7 + 3 # Feb.10/21
elif dataset[:5] == "feb15":
day = 2145 * 7 + 1 # Feb.15/21
else:
raise Exception("Unknown dataset - {0}".format(dataset))
r_gt = read_gpgga(osp.join(gt_dir, gt_file), day, enu_origin, start_time=start_time, end_time=end_time)
if not len(r_gt) > 0:
raise ValueError('Ground truth between start and end time empty. Check if using the correct ground truth file.')
# extract portion of GPS that had RTK-Fixed fix (the gold standard)
r_rtk = r_gt[r_gt[:, 4] == 4]
# overhead plot
fig1 = plt.figure(1, figsize=[9, 4.5])
fig1.subplots_adjust(left=0.10, bottom=0.10, right=0.97, top=0.92)
# plt.plot(r_gt[:, 1] - r_gt[0, 1], r_gt[:, 2] - r_gt[0, 2], label='GPS Ground Truth', c='C0', alpha=0.5)
plt.plot(r_rtk[:, 1] - r_gt[0, 1], r_rtk[:, 2] - r_gt[0, 2], label='RTK Ground Truth', c='C0')
plt.plot(estimates[:, 2] - estimates[0, 2], estimates[:, 3] - estimates[0, 3], label='Estimated', c='C1')
plt.axis('equal')
plt.title('Overhead View - {0} Dataset'.format(dataset))
plt.xlabel('Easting (m)')
plt.ylabel('Northing (m)')
plt.legend()
# error plot
tmp = []
for row in r_gt:
idx_np = np.where(estimates[:, 0] == row[0])
if idx_np[0].size != 0:
idx = safe_int(idx_np[0][0])
tmp.append([estimates[idx, 0], # GPS ref. timestamp
row[1], # ground truth x (down-sampled)
row[2], # "" y
row[3], # "" z
(estimates[idx, 2] - estimates[0, 2]) - (row[1] - r_gt[0, 1]), # estimator error x
(estimates[idx, 3] - estimates[0, 3]) - (row[2] - r_gt[0, 2]), # "" y
(estimates[idx, 4] - estimates[0, 4]) - (row[3] - r_gt[0, 3]), # "" z
row[7], # distance along path
])
relative_errors = np.array(tmp)
fig2, ax2 = plt.subplots(nrows=3, ncols=1, figsize=[8, 8])
fig2.subplots_adjust(left=0.10, bottom=0.06, right=0.96, top=0.93)
ax2[0].plot(relative_errors[:, 7] - relative_errors[0, 7], relative_errors[:, 4], c='C0') # x errors
ax2[1].plot(relative_errors[:, 7] - relative_errors[0, 7], relative_errors[:, 5], c='C0') # y errors
ax2[2].plot(relative_errors[:, 7] - relative_errors[0, 7],
np.sqrt(relative_errors[:, 4] ** 2 + relative_errors[:, 5] ** 2), c='C0') # planar errors
ax2[0].set_title('Position Errors wrt Ground Truth - {0}'.format(dataset))
ax2[2].set_xlabel('Distance Along Path (m)')
ax2[0].set_ylabel('x Error (m)')
ax2[0].set_ylim([-1.6, 1.6])
ax2[1].set_ylabel('y Error (m)')
ax2[1].set_ylim([-1.6, 1.6])
ax2[2].set_ylabel('2D Position Error (m)')
ax2[2].set_ylim([0, 2])
plt.show()
if __name__ == '__main__':
main()
| 40.01676
| 120
| 0.582577
|
108c6b821c4b722d5f2c2a204df349419002f76e
| 3,398
|
py
|
Python
|
export/tools.py
|
sigmacms/django-export
|
b581090ba35a06be72d93e077961d169f134af2e
|
[
"BSD-3-Clause"
] | null | null | null |
export/tools.py
|
sigmacms/django-export
|
b581090ba35a06be72d93e077961d169f134af2e
|
[
"BSD-3-Clause"
] | null | null | null |
export/tools.py
|
sigmacms/django-export
|
b581090ba35a06be72d93e077961d169f134af2e
|
[
"BSD-3-Clause"
] | null | null | null |
import mimetypes
from django import template
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers
from django.http import HttpResponse
from django.shortcuts import render
from django.utils.translation import ugettext as _
import object_tools
from export import forms, tasks, utils
class Export(object_tools.ObjectTool):
name = 'export'
label = 'Export'
help_text = 'Export filtered objects for download.'
form_class = forms.Export
def serialize(self, format, queryset, fields=[]):
return utils.serialize(format, queryset, fields)
def gen_filename(self, format):
app_label = self.model._meta.app_label
object_name = self.model._meta.object_name.lower()
if format == 'python':
format = 'py'
return '%s-%s-%s.%s' % (self.name, app_label, object_name, format)
def order(self, queryset, by, direction):
return utils.order_queryset(queryset, by, direction)
def has_celery(self):
return 'djcelery' in getattr(settings, 'INSTALLED_APPS', [])
def get_queryset(self, form):
return utils.get_queryset(form, self.model)
def get_data(self, form):
queryset = self.get_queryset(form)
format = form.cleaned_data['export_format']
fields = form.cleaned_data['export_fields']
data = self.serialize(format, queryset, fields)
return format, data
def export_response(self, form):
format, data = self.get_data(form)
filename = self.gen_filename(format)
response = HttpResponse(
data, content_type=mimetypes.guess_type(filename)[0]
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def mail_response(self, request, extra_context=None):
form = extra_context['form']
format = form.cleaned_data['export_format']
filename = self.gen_filename(format)
serializer_kwargs = {
'fields': form.cleaned_data['export_fields'],
'format': format
}
query_kwargs = {
'form': form,
'model': self.model
}
# if celery is available send the task, else run as normal
if self.has_celery():
return tasks.mail_export.delay(
request.user.email, filename, serializer_kwargs, query_kwargs
)
return utils.mail_export(
request.user.email, filename, serializer_kwargs, query_kwargs
)
def view(self, request, extra_context=None, process_form=True):
form = extra_context['form']
if form.is_valid() and process_form:
if '_export_mail' in request.POST:
message = _('The export has been generated and will be emailed \
to %s.' % (request.user.email))
messages.add_message(request, messages.SUCCESS, message)
self.mail_response(request, extra_context)
else:
return self.export_response(form)
adminform = helpers.AdminForm(form, form.fieldsets, {})
context = {'adminform': adminform}
context.update(extra_context or {})
return render(
request,
'export/export_form.html',
context,
)
object_tools.tools.register(Export)
| 32.361905
| 80
| 0.633902
|
0f637d1396ca478388d4d7f0a4a7c4599907bd56
| 1,992
|
py
|
Python
|
boyd_bot/__init__.py
|
ineshbose/boyd_bot_messenger
|
90603a421b1819875672eff75f43c3ea356d5369
|
[
"MIT"
] | 7
|
2020-06-22T11:38:23.000Z
|
2020-09-02T14:56:15.000Z
|
boyd_bot/__init__.py
|
ineshbose/boyd_bot_messenger
|
90603a421b1819875672eff75f43c3ea356d5369
|
[
"MIT"
] | 24
|
2020-07-05T10:10:06.000Z
|
2021-11-17T14:37:14.000Z
|
boyd_bot/__init__.py
|
ineshbose/boyd_bot_messenger
|
90603a421b1819875672eff75f43c3ea356d5369
|
[
"MIT"
] | 2
|
2020-07-05T16:16:00.000Z
|
2020-09-21T22:53:18.000Z
|
# flake8: noqa
import os
import logging
from flask import Flask, Blueprint
app = Flask(__name__)
app.logger.setLevel(logging.INFO)
app_url = os.environ.get("APP_URL", "http://127.0.0.1:5000")
app.config["SECRET_KEY"] = os.environ.get("FLASK_KEY")
app.config["DEBUG"] = "127.0.0.1" in app_url
from . import _config
app.logger.handlers[0].setFormatter(logging.Formatter(app.config["LOG"]["FORMAT"]))
blueprint = Blueprint("boyd_bot", __name__, template_folder="templates")
from . import views
from .forms import RegisterForm
webhook_token = os.environ.get("VERIFY_TOKEN")
wb_arg_name = os.environ.get("WB_ARG_NAME")
from .timetable import Timetable
timetable = Timetable()
from .services.guard import Guard
guard = Guard(key=os.environ.get("GUARD_KEY"))
from .services.database import Database
db = Database(
db_token=os.environ.get("DB_MAIN_TOKEN"),
key1=os.environ.get("DB_KEY1", "key1"),
key2=os.environ.get("DB_KEY2", "key2"),
)
from .services.parser import Parser
parser = Parser()
from .services.platform import Platform
platform = Platform(platform_token=os.environ.get("PLATFORM_TOKEN"))
from .services.scheduler import Scheduler
if app.config["FEATURES"]["SCHEDULER"]:
scheduler = Scheduler()
scheduler.run()
def log(message):
app.logger.info(message)
from .bot import webhook, new_user_registration
app.register_blueprint(blueprint, url_prefix=app.config["URL_ROOT"])
@app.after_request
def secure_http_header(response):
response.headers["Strict-Transport-Security"] = "max-age=31536000"
response.headers["Content-Security-Policy"] = "default-src 'self' *"
response.headers["X-Frame-Options"] = "SAMEORIGIN"
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["Referrer-Policy"] = "same-origin"
response.headers["Feature-Policy"] = "geolocation 'none'"
response.headers["Permissions-Policy"] = "geolocation=()"
response.headers["Expect-CT"] = "max-age=0"
return response
| 24
| 83
| 0.730924
|
60fcf6b1131ae1ac025f5ac1def402ad1d0daa0d
| 6,805
|
py
|
Python
|
altair/utils/plugin_registry.py
|
hugovk/altair
|
a3c9f06790f7a8c5c7e2c98278d0f69e4630b5be
|
[
"BSD-3-Clause"
] | 1
|
2022-03-13T21:42:09.000Z
|
2022-03-13T21:42:09.000Z
|
altair/utils/plugin_registry.py
|
RoyMachineLearning/altair
|
74a765b373694776e63d224d99536975cc173810
|
[
"BSD-3-Clause"
] | null | null | null |
altair/utils/plugin_registry.py
|
RoyMachineLearning/altair
|
74a765b373694776e63d224d99536975cc173810
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Generic, TypeVar, cast
import entrypoints
from toolz import curry
PluginType = TypeVar('PluginType')
class PluginEnabler(object):
"""Context manager for enabling plugins
This object lets you use enable() as a context manager to
temporarily enable a given plugin::
with plugins.enable('name'):
do_something() # 'name' plugin temporarily enabled
# plugins back to original state
"""
def __init__(self, registry, name, **options):
self.registry = registry
self.name = name
self.options = options
self.original_state = registry._get_state()
self.registry._enable(name, **options)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.registry._set_state(self.original_state)
def __repr__(self):
return "{0}.enable({1!r})".format(self.registry.__class__.__name__, self.name)
class PluginRegistry(Generic[PluginType]):
"""A registry for plugins.
This is a plugin registry that allows plugins to be loaded/registered
in two ways:
1. Through an explicit call to ``.register(name, value)``.
2. By looking for other Python packages that are installed and provide
a setuptools entry point group.
When you create an instance of this class, provide the name of the
entry point group to use::
reg = PluginRegister('my_entrypoint_group')
"""
# this is a mapping of name to error message to allow custom error messages
# in case an entrypoint is not found
entrypoint_err_messages = {}
# global settings is a key-value mapping of settings that are stored globally
# in the registry rather than passed to the plugins
_global_settings = {}
def __init__(self, entry_point_group='', plugin_type=object):
# type: (str, Any) -> None
"""Create a PluginRegistry for a named entry point group.
Parameters
==========
entry_point_group: str
The name of the entry point group.
plugin_type: object
A type that will optionally be used for runtime type checking of
loaded plugins using isinstance.
"""
self.entry_point_group = entry_point_group
self.plugin_type = plugin_type
self._active = None # type: None
self._active_name = '' # type: str
self._plugins = {} # type: dict
self._options = {} # type: dict
self._global_settings = self.__class__._global_settings.copy() # type: dict
def register(self, name, value):
# type: (str, Union[PluginType, None]) -> PluginType
"""Register a plugin by name and value.
This method is used for explicit registration of a plugin and shouldn't be
used to manage entry point managed plugins, which are auto-loaded.
Parameters
==========
name: str
The name of the plugin.
value: PluginType or None
The actual plugin object to register or None to unregister that plugin.
Returns
=======
plugin: PluginType
The plugin that was registered or unregistered.
"""
if value is None and name in self._plugins:
return self._plugins.pop(name)
else:
assert isinstance(value, self.plugin_type)
self._plugins[name] = value
return value
def names(self):
# type: () -> List[str]
"""List the names of the registered and entry points plugins."""
exts = list(self._plugins.keys())
more_exts = [ep.name for ep in entrypoints.get_group_all(self.entry_point_group)]
exts.extend(more_exts)
return sorted(set(exts))
def _get_state(self):
"""Return a dictionary representing the current state of the registry"""
return {'_active': self._active,
'_active_name': self._active_name,
'_plugins': self._plugins.copy(),
'_options': self._options.copy(),
'_global_settings': self._global_settings.copy()}
def _set_state(self, state):
"""Reset the state of the registry"""
assert set(state.keys()) == {'_active', '_active_name',
'_plugins', '_options', '_global_settings'}
for key, val in state.items():
setattr(self, key, val)
def _enable(self, name, **options):
# type: (str, **Any) -> None
if name not in self._plugins:
try:
ep = entrypoints.get_single(self.entry_point_group, name)
except entrypoints.NoSuchEntryPoint:
if name in self.entrypoint_err_messages:
raise ValueError(self.entrypoint_err_messages[name])
else:
raise
value = cast(PluginType, ep.load())
assert isinstance(value, self.plugin_type)
self.register(name, value)
self._active_name = name
self._active = self._plugins[name]
for key in set(options.keys()) & set(self._global_settings.keys()):
self._global_settings[key] = options.pop(key)
self._options = options
def enable(self, name=None, **options):
# type: (str, **Any) -> PluginEnabler
"""Enable a plugin by name.
This can be either called directly, or used as a context manager.
Parameters
----------
name : string (optional)
The name of the plugin to enable. If not specified, then use the
current active name.
**options :
Any additional parameters will be passed to the plugin as keyword
arguments
Returns
-------
PluginEnabler:
An object that allows enable() to be used as a context manager
"""
if name is None:
name = self.active
return PluginEnabler(self, name, **options)
@property
def active(self):
# type: () -> str
"""Return the name of the currently active plugin"""
return self._active_name
@property
def options(self):
# type: () -> str
"""Return the current options dictionary"""
return self._options
def get(self):
# type: () -> PluginType
"""Return the currently active plugin."""
if self._options:
return curry(self._active, **self._options)
else:
return self._active
def __repr__(self):
# type: () -> str
return ("{0}(active={1!r}, registered={2!r})"
"".format(self.__class__.__name__,
self._active_name,
list(self.names())))
| 34.025
| 89
| 0.597061
|
04aae694b6d0589ecc374e71bb4a85a32b868254
| 4,040
|
py
|
Python
|
networks/pose_cnn.py
|
xdr940/DeepSfM
|
c5d97fcbc075b80ff9c0117a9f89693707fb32f4
|
[
"MIT"
] | null | null | null |
networks/pose_cnn.py
|
xdr940/DeepSfM
|
c5d97fcbc075b80ff9c0117a9f89693707fb32f4
|
[
"MIT"
] | null | null | null |
networks/pose_cnn.py
|
xdr940/DeepSfM
|
c5d97fcbc075b80ff9c0117a9f89693707fb32f4
|
[
"MIT"
] | null | null | null |
# Copyright Niantic 2019. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
class PoseCNN(nn.Module):
def __init__(self, num_input_frames):
super(PoseCNN, self).__init__()
self.num_input_frames = num_input_frames
self.convs = {}
self.convs[0] = nn.Conv2d(3 * num_input_frames, 16, 7, 2, 3)
self.convs[1] = nn.Conv2d(16, 32, 5, 2, 2)
self.convs[2] = nn.Conv2d(32, 64, 3, 2, 1)
self.convs[3] = nn.Conv2d(64, 128, 3, 2, 1)
self.convs[4] = nn.Conv2d(128, 256, 3, 2, 1)
self.convs[5] = nn.Conv2d(256, 256, 3, 2, 1)
self.convs[6] = nn.Conv2d(256, 256, 3, 2, 1)
self.pose_conv = nn.Conv2d(256, 6 * (num_input_frames - 1), 1)
self.num_convs = len(self.convs)
self.relu = nn.ReLU(True)
self.net = nn.ModuleList(list(self.convs.values()))
def forward(self, out):
for i in range(self.num_convs):
out = self.convs[i](out)
out = self.relu(out)
out = self.pose_conv(out)
out = out.mean(3).mean(2)
out = 0.01 * out.view(-1, self.num_input_frames - 1, 1, 6)
return out
#axisangle = out[..., :3]
#translation = out[..., 3:]
#return axisangle, translation
class PoseC3D(nn.Module):
def __init__(self, num_input_frames):
super(PoseC3D, self).__init__()
self.num_input_frames = num_input_frames
self.convs = {}
self.conv3d = nn.Conv3d(3, 16, kernel_size=(3, 7, 7), stride=(1, 1, 1), padding=(1, 3, 3))
self.bn3d = nn.BatchNorm3d(16)
self.relu = nn.ReLU(inplace=True)
self.maxpool3d = nn.MaxPool3d(kernel_size=(3, 2, 2), stride=(1, 2, 2))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
#self.convs[0] = nn.Conv2d(3 * num_input_frames, 16, 7, 2, 3)
self.convs[1] = nn.Conv2d(16, 32, 5, 2, 2)
self.convs[2] = nn.Conv2d(32, 64, 3, 2, 1)
self.convs[3] = nn.Conv2d(64, 128, 3, 2, 1)
self.convs[4] = nn.Conv2d(128, 256, 3, 2, 1)
self.convs[5] = nn.Conv2d(256, 256, 3, 2, 1)
self.convs[6] = nn.Conv2d(256, 256, 3, 2, 1)
self.pose_conv = nn.Conv2d(256, 6 * (num_input_frames - 1), 1)
self.num_convs = len(self.convs)
self.relu = nn.ReLU(True)
self.net = nn.ModuleList(list(self.convs.values()))
def forward(self, x):
x = self.conv3d(x)
x = self.bn3d(x)
x = self.relu(x)
x = self.maxpool3d(x)
x = torch.squeeze(x, dim=2)
for i in range(1,self.num_convs):#[8,64,320,96]
x = self.convs[i](x)
x = self.relu(x)
x = self.pose_conv(x)
out = x.mean(3).mean(2)
out = 0.01 * out.view(-1, self.num_input_frames - 1, 1, 6)
return out
def getPoseCNN(mode):
if mode=="3in-cnn":
return PoseCNN(3)
elif mode == "3din-cnn":
return PoseC3D(3)
elif mode =='2in-cnn':
return PoseCNN(2)
if __name__ == '__main__':
network = getPoseCNN("3d-in")
example_inputs = torch.rand(8, 3,3, 640, 192)
out = network(example_inputs)
print(out.shape)
#
encoder_out = torch.onnx.export(model=network,
args=example_inputs,
input_names=["input"],
f= "./pose_cnn_3in.onnx",
# output_names=['f0', 'f1', 'f2', 'f3', 'f4'],
verbose=True,
export_params=True # 带参数输出
)
| 30.839695
| 99
| 0.52599
|
9fdd77b1c998f941ccfc56122488ac03acd9282e
| 293
|
py
|
Python
|
naeval/log.py
|
sdspieg/naeval
|
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
|
[
"MIT"
] | 36
|
2020-03-22T09:37:10.000Z
|
2022-01-17T14:49:30.000Z
|
naeval/log.py
|
sdspieg/naeval
|
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
|
[
"MIT"
] | 11
|
2020-03-25T09:39:45.000Z
|
2020-08-16T05:37:02.000Z
|
naeval/log.py
|
sdspieg/naeval
|
52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f
|
[
"MIT"
] | 6
|
2020-05-16T05:52:04.000Z
|
2022-01-16T06:45:29.000Z
|
import sys
from datetime import datetime
def dot():
print('.', end='', file=sys.stderr, flush=True)
def log(format, *args):
message = format % args
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(
'[%s] %s' % (now, message),
file=sys.stderr
)
| 17.235294
| 54
| 0.552901
|
b2be507ab10eda881408db742029c1a7b1053ac4
| 332
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/status.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-01-31T19:16:29.000Z
|
2021-01-31T19:16:29.000Z
|
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/status.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/status.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2019-09-11T03:02:27.000Z
|
2019-09-11T03:02:27.000Z
|
import graphene
class GrapheneRunStatus(graphene.Enum):
QUEUED = "QUEUED"
NOT_STARTED = "NOT_STARTED"
MANAGED = "MANAGED"
STARTING = "STARTING"
STARTED = "STARTED"
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
CANCELING = "CANCELING"
CANCELED = "CANCELED"
class Meta:
name = "RunStatus"
| 19.529412
| 39
| 0.63253
|
205859497ab0a81302eb932995eb86626abba718
| 6,446
|
py
|
Python
|
intropyproject-classify-pet-images/check_images.py
|
Eyongkevin/AIPND-revision
|
b28ff7c495726ca39109ba663f2cc75272a44733
|
[
"MIT"
] | null | null | null |
intropyproject-classify-pet-images/check_images.py
|
Eyongkevin/AIPND-revision
|
b28ff7c495726ca39109ba663f2cc75272a44733
|
[
"MIT"
] | null | null | null |
intropyproject-classify-pet-images/check_images.py
|
Eyongkevin/AIPND-revision
|
b28ff7c495726ca39109ba663f2cc75272a44733
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/check_images.py
#
# TODO 0: Add your information below for Programmer & Date Created.
# PROGRAMMER: Eyong Kevin Enowanyo
# DATE CREATED: Oct 19, 2019
# REVISED DATE:
# PURPOSE: Classifies pet images using a pretrained CNN model, compares these
# classifications to the true identity of the pets in the images, and
# summarizes how well the CNN performed on the image classification task.
# Note that the true identity of the pet (or object) in the image is
# indicated by the filename of the image. Therefore, your program must
# first extract the pet image label from the filename before
# classifying the images using the pretrained CNN model. With this
# program we will be comparing the performance of 3 different CNN model
# architectures to determine which provides the 'best' classification.
#
# Use argparse Expected Call with <> indicating expected user input:
# python check_images.py --dir <directory with images> --arch <model>
# --dogfile <file that contains dognames>
# Example call:
# python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt
##
# Imports python modules
from time import time, sleep
# Imports print functions that check the lab
from print_functions_for_lab_checks import *
# Imports functions created for this program
from get_input_args import get_input_args
from get_pet_labels import get_pet_labels
from classify_images import classify_images
from adjust_results4_isadog import adjust_results4_isadog
from calculates_results_stats import calculates_results_stats
from print_results import print_results
# Main program function defined below
def main():
# TODO 0: Measures total program runtime by collecting start time
start_time = time()
# TODO 1: Define get_input_args function within the file get_input_args.py
# This function retrieves 3 Command Line Arugments from user as input from
# the user running the program from a terminal window. This function returns
# the collection of these command line arguments from the function call as
# the variable in_arg
in_arg = get_input_args()
# Function that checks command line arguments using in_arg
check_command_line_arguments(in_arg)
# TODO 2: Define get_pet_labels function within the file get_pet_labels.py
# Once the get_pet_labels function has been defined replace 'None'
# in the function call with in_arg.dir Once you have done the replacements
# your function call should look like this:
# get_pet_labels(in_arg.dir)
# This function creates the results dictionary that contains the results,
# this dictionary is returned from the function call as the variable results
results = get_pet_labels(in_arg.dir)
#print(len(results))
#print(results)
# Function that checks Pet Images in the results Dictionary using results
check_creating_pet_image_labels(results)
# TODO 3: Define classify_images function within the file classiy_images.py
# Once the classify_images function has been defined replace first 'None'
# in the function call with in_arg.dir and replace the last 'None' in the
# function call with in_arg.arch Once you have done the replacements your
# function call should look like this:
# classify_images(in_arg.dir, results, in_arg.arch)
# Creates Classifier Labels with classifier function, Compares Labels,
# and adds these results to the results dictionary - results
classify_images(in_arg.dir, results, in_arg.arch)
# Function that checks Results Dictionary using results
check_classifying_images(results)
# TODO 4: Define adjust_results4_isadog function within the file adjust_results4_isadog.py
# Once the adjust_results4_isadog function has been defined replace 'None'
# in the function call with in_arg.dogfile Once you have done the
# replacements your function call should look like this:
# adjust_results4_isadog(results, in_arg.dogfile)
# Adjusts the results dictionary to determine if classifier correctly
# classified images as 'a dog' or 'not a dog'. This demonstrates if
# model can correctly classify dog images as dogs (regardless of breed)
adjust_results4_isadog(results, in_arg.dogfile)
# Function that checks Results Dictionary for is-a-dog adjustment using results
check_classifying_labels_as_dogs(results)
# TODO 5: Define calculates_results_stats function within the file calculates_results_stats.py
# This function creates the results statistics dictionary that contains a
# summary of the results statistics (this includes counts & percentages). This
# dictionary is returned from the function call as the variable results_stats
# Calculates results of run and puts statistics in the Results Statistics
# Dictionary - called results_stats
results_stats = calculates_results_stats(results)
# Function that checks Results Statistics Dictionary using results_stats
check_calculating_results(results, results_stats)
# TODO 6: Define print_results function within the file print_results.py
# Once the print_results function has been defined replace 'None'
# in the function call with in_arg.arch Once you have done the
# replacements your function call should look like this:
# print_results(results, results_stats, in_arg.arch, True, True)
# Prints summary results, incorrect classifications of dogs (if requested)
# and incorrectly classified breeds (if requested)
print_results(results, results_stats, in_arg.arch, True, True)
# TODO 0: Measure total program runtime by collecting end time
end_time = time()
# TODO 0: Computes overall runtime in seconds & prints it in hh:mm:ss format
tot_time = end_time - start_time #calculate difference between end time and start time
print("\n** Total Elapsed Runtime:",
str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
+str(int((tot_time%3600)%60)) )
# Call to main function to run the program
if __name__ == "__main__":
main()
| 48.104478
| 144
| 0.727583
|
56a1a4cd84d1cf025dec24e610e672a0b858d8fe
| 2,604
|
py
|
Python
|
test/test_ghidra_capa_features.py
|
L1NNA/JARV1S-Disassembler
|
6bc9d9459bd5142406fdda0ed88ba636934c94c6
|
[
"Apache-2.0"
] | 7
|
2020-12-19T18:56:23.000Z
|
2021-11-21T20:29:58.000Z
|
test/test_ghidra_capa_features.py
|
L1NNA/JARV1S-Disassembler
|
6bc9d9459bd5142406fdda0ed88ba636934c94c6
|
[
"Apache-2.0"
] | 1
|
2020-12-20T07:57:37.000Z
|
2020-12-28T18:10:11.000Z
|
test/test_ghidra_capa_features.py
|
L1NNA/JARV1S-Ghidra
|
84b551b2a1266b6bcb9454aaa01b97b21d7d4d4f
|
[
"Apache-2.0"
] | 2
|
2020-12-20T11:32:20.000Z
|
2021-03-17T15:36:16.000Z
|
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import sys
import test.test_capa.tests.fixtures as fixtures
from test.test_capa.tests.fixtures import *
from functools import lru_cache
def get_function_jvd(extractor, fva):
for f in extractor.get_functions():
if f.addr_start == fva:
return f
for f in extractor.get_functions():
for b in f.blocks:
for i in b.ins:
if i.ea == fva:
return f
raise ValueError("function not found")
fixtures.get_function = get_function_jvd
@lru_cache()
def get_jvd_ghidra_extractor(path):
from jvd import get_disassembler
from jvd.capa.extractor import JVDExtractor
from jvd.disassembler import DisassemblerAbstract
disassembler = get_disassembler(disassembler='ghidra')
disassembler: DisassemblerAbstract
gz_file, logs = disassembler.disassemble(
path, cleanup=False, additional_ext='.ghr')
extractor = JVDExtractor(gz_file, path)
return extractor
@parametrize(
"sample,scope,feature,expected",
FEATURE_PRESENCE_TESTS,
indirect=["sample", "scope"],
)
def test_jvd_ghidra_features(sample, scope, feature, expected):
if '0x4556E5' in scope.__name__ and 'characteristic(recursive call)' in str(feature):
expected = True
if '0x4556E5' in scope.__name__ and 'characteristic(calls to)' in str(feature):
expected = True
with xfail(sys.version_info < (3, 0), reason="JVD only works on py3"):
do_test_feature_presence(
get_jvd_ghidra_extractor, sample, scope, feature, expected)
@parametrize(
"sample,scope,feature,expected",
FEATURE_COUNT_TESTS,
indirect=["sample", "scope"],
)
def test_jvd_ghidra_feature_counts(sample, scope, feature, expected):
if '0x4556E5' in scope.__name__ and 'characteristic(calls to)' in str(feature):
expected = 1
with xfail(sys.version_info < (3, 0), reason="JVD only works on py3"):
do_test_feature_count(get_jvd_ghidra_extractor,
sample, scope, feature, expected)
| 37.73913
| 111
| 0.710061
|
055a37b402b32565af4632a59de778a951268fdf
| 10,560
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/mso/plugins/modules/mso_schema_template_externalepg.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/mso/plugins/modules/mso_schema_template_externalepg.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/cisco/mso/plugins/modules/mso_schema_template_externalepg.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_external_epg
short_description: Manage external EPGs in schema templates
description:
- Manage external EPGs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
external_epg:
description:
- The name of the external EPG to manage.
type: str
aliases: [ name, externalepg ]
type:
description:
- The type of external epg.
- anp needs to be associated with external epg when the type is cloud.
- l3out can be associated with external epg when the type is on-premise.
type: str
choices: [ on-premise, cloud ]
default: on-premise
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
vrf:
description:
- The VRF associated with the external epg.
type: dict
suboptions:
name:
description:
- The name of the VRF to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced VRF.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced VRF.
- If this parameter is unspecified, it defaults to the current template.
type: str
l3out:
description:
- The L3Out associated with the external epg.
type: dict
suboptions:
name:
description:
- The name of the L3Out to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced L3Out.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced L3Out.
- If this parameter is unspecified, it defaults to the current template.
type: str
anp:
description:
- The anp associated with the external epg.
type: dict
suboptions:
name:
description:
- The name of the anp to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced anp.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced anp.
- If this parameter is unspecified, it defaults to the current template.
type: str
preferred_group:
description:
- Preferred Group is enabled for this External EPG or not.
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: cisco.mso.modules
'''
EXAMPLES = r'''
- name: Add a new external EPG
cisco.mso.mso_schema_template_external_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
external_epg: External EPG 1
vrf:
name: VRF
schema: Schema 1
template: Template 1
state: present
delegate_to: localhost
- name: Add a new external EPG with external epg in cloud
cisco.mso.mso_schema_template_external_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
external_epg: External EPG 1
type: cloud
vrf:
name: VRF
schema: Schema 1
template: Template 1
anp:
name: ANP1
schema: Schema 1
template: Template 1
state: present
delegate_to: localhost
- name: Remove an external EPG
cisco.mso.mso_schema_template_external_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
external_epg: external EPG1
state: absent
delegate_to: localhost
- name: Query a specific external EPGs
cisco.mso.mso_schema_template_external_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
external_epg: external EPG1
state: query
delegate_to: localhost
register: query_result
- name: Query all external EPGs
cisco.mso.mso_schema_template_external_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.mso.plugins.module_utils.mso import MSOModule, mso_argument_spec, mso_reference_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
external_epg=dict(type='str', aliases=['name', 'externalepg']), # This parameter is not required for querying all objects
display_name=dict(type='str'),
vrf=dict(type='dict', options=mso_reference_spec()),
l3out=dict(type='dict', options=mso_reference_spec()),
anp=dict(type='dict', options=mso_reference_spec()),
preferred_group=dict(type='bool'),
type=dict(type='str', default='on-premise', choices=['on-premise', 'cloud']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['external_epg']],
['state', 'present', ['external_epg', 'vrf']],
['type', 'cloud', ['anp']],
],
)
schema = module.params.get('schema')
template = module.params.get('template').replace(' ', '')
external_epg = module.params.get('external_epg')
display_name = module.params.get('display_name')
vrf = module.params.get('vrf')
if vrf is not None and vrf.get('template') is not None:
vrf['template'] = vrf.get('template').replace(' ', '')
l3out = module.params.get('l3out')
if l3out is not None and l3out.get('template') is not None:
l3out['template'] = l3out.get('template').replace(' ', '')
anp = module.params.get('anp')
if anp is not None and anp.get('template') is not None:
anp['template'] = anp.get('template').replace(' ', '')
preferred_group = module.params.get('preferred_group')
type_ext_epg = module.params.get('type')
state = module.params.get('state')
mso = MSOModule(module)
# Get schema objects
schema_id, schema_path, schema_obj = mso.query_schema(schema)
# Get template
templates = [t.get('name') for t in schema_obj.get('templates')]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get external EPGs
external_epgs = [e.get('name') for e in schema_obj.get('templates')[template_idx]['externalEpgs']]
if external_epg is not None and external_epg in external_epgs:
external_epg_idx = external_epgs.index(external_epg)
mso.existing = schema_obj.get('templates')[template_idx]['externalEpgs'][external_epg_idx]
if 'externalEpgRef' in mso.existing:
del mso.existing['externalEpgRef']
if 'vrfRef' in mso.existing:
mso.existing['vrfRef'] = mso.dict_from_ref(mso.existing.get('vrfRef'))
if 'l3outRef' in mso.existing:
mso.existing['l3outRef'] = mso.dict_from_ref(mso.existing.get('l3outRef'))
if 'anpRef' in mso.existing:
mso.existing['anpRef'] = mso.dict_from_ref(mso.existing.get('anpRef'))
if state == 'query':
if external_epg is None:
mso.existing = schema_obj.get('templates')[template_idx]['externalEpgs']
elif not mso.existing:
mso.fail_json(msg="External EPG '{external_epg}' not found".format(external_epg=external_epg))
mso.exit_json()
eepgs_path = '/templates/{0}/externalEpgs'.format(template)
eepg_path = '/templates/{0}/externalEpgs/{1}'.format(template, external_epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=eepg_path))
elif state == 'present':
vrf_ref = mso.make_reference(vrf, 'vrf', schema_id, template)
l3out_ref = mso.make_reference(l3out, 'l3out', schema_id, template)
anp_ref = mso.make_reference(anp, 'anp', schema_id, template)
if display_name is None and not mso.existing:
display_name = external_epg
payload = dict(
name=external_epg,
displayName=display_name,
vrfRef=vrf_ref,
preferredGroup=preferred_group,
)
if type_ext_epg == 'cloud':
payload['extEpgType'] = 'cloud'
payload['anpRef'] = anp_ref
else:
payload['l3outRef'] = l3out_ref
mso.sanitize(payload, collate=True)
if mso.existing:
# clean contractRef to fix api issue
for contract in mso.sent.get('contractRelationships'):
contract['contractRef'] = mso.dict_from_ref(contract.get('contractRef'))
ops.append(dict(op='replace', path=eepg_path, value=mso.sent))
else:
ops.append(dict(op='add', path=eepgs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| 32.293578
| 131
| 0.647064
|
6c189c19845213d5ebddac08cae7004792258682
| 1,061
|
py
|
Python
|
tag_sentenze/serializers.py
|
AlbertoZerbinati/sentag
|
542c10e68372352cf1dcca056452220532ad81ed
|
[
"MIT"
] | 3
|
2021-12-11T12:20:07.000Z
|
2022-01-15T14:34:07.000Z
|
tag_sentenze/serializers.py
|
AlbertoZerbinati/sentag
|
542c10e68372352cf1dcca056452220532ad81ed
|
[
"MIT"
] | 1
|
2021-11-28T22:34:58.000Z
|
2021-11-28T22:35:43.000Z
|
tag_sentenze/serializers.py
|
AlbertoZerbinati/sentag
|
542c10e68372352cf1dcca056452220532ad81ed
|
[
"MIT"
] | 1
|
2022-01-26T16:41:12.000Z
|
2022-01-26T16:41:12.000Z
|
from rest_framework import serializers
from users.models import Tagging, TaggingTask
class TaggingSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='judgment.name', read_only=True)
initial_text = serializers.CharField(
source='judgment.initial_text', read_only=True)
tags = serializers.CharField(source='judgment.xsd.tags', read_only=True)
class Meta:
model = Tagging
fields = ['name', 'initial_text', 'xml_text',
'tags', 'token_manager', 'comments', 'completed']
class TaggingTaskSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='judgment.name', read_only=True)
initial_text = serializers.CharField(
source='judgment.initial_text', read_only=True)
tags = serializers.CharField(source='task.xsd.tags', read_only=True)
class Meta:
model = TaggingTask
fields = ['name', 'initial_text', 'xml_text',
'tags', 'token_manager', 'arguments_graph', 'relations_graph', 'comments', 'completed']
| 39.296296
| 105
| 0.69934
|
c15267d90bd97ad0221957252924b91865605f86
| 2,233
|
py
|
Python
|
dev/scripts/app_routes_gen.py
|
sephrat/mealie
|
1f35742d8bad8e9066cd40e7bff8f5a41c4cd2ba
|
[
"MIT"
] | null | null | null |
dev/scripts/app_routes_gen.py
|
sephrat/mealie
|
1f35742d8bad8e9066cd40e7bff8f5a41c4cd2ba
|
[
"MIT"
] | 1
|
2021-04-23T01:13:25.000Z
|
2021-04-23T01:13:25.000Z
|
dev/scripts/app_routes_gen.py
|
sephrat/mealie
|
1f35742d8bad8e9066cd40e7bff8f5a41c4cd2ba
|
[
"MIT"
] | null | null | null |
import json
import re
from pathlib import Path
from typing import Optional
import slugify
from jinja2 import Template
from mealie.app import app
from pydantic import BaseModel
CWD = Path(__file__).parent
OUT_FILE = CWD.joinpath("output", "app_routes.py")
code_template = """
class AppRoutes:
def __init__(self) -> None:
self.prefix = '{{paths.prefix}}'
{% for path in paths.static_paths %}
self.{{ path.router_slug }} = "{{path.prefix}}{{ path.route }}"{% endfor %}
{% for path in paths.function_paths %}
def {{path.router_slug}}(self, {{path.var|join(", ")}}):
return f"{self.prefix}{{ path.route }}"
{% endfor %}
"""
def get_variables(path):
path = path.replace("/", " ")
print(path)
var = re.findall(r" \{.*\}", path)
print(var)
if var:
return [v.replace("{", "").replace("}", "") for v in var]
else:
return None
class RouteObject:
def __init__(self, route_string) -> None:
self.prefix = "/" + route_string.split("/")[1]
self.route = route_string.replace(self.prefix, "")
self.parts = route_string.split("/")[1:]
self.var = re.findall(r"\{(.*?)\}", route_string)
self.is_function = "{" in self.route
self.router_slug = slugify.slugify("_".join(self.parts[1:]), separator="_")
def __repr__(self) -> str:
return f"""Route: {self.route}
Parts: {self.parts}
Function: {self.is_function}
Var: {self.var}
Slug: {self.router_slug}
"""
def get_paths(app):
paths = []
print(json.dumps(app.openapi()))
for key, value in app.openapi().items():
if key == "paths":
for key, value in value.items():
paths.append(key)
return paths
def generate_template(app):
paths = get_paths(app)
new_paths = [RouteObject(path) for path in paths]
static_paths = [p for p in new_paths if not p.is_function]
function_paths = [p for p in new_paths if p.is_function]
template = Template(code_template)
content = template.render(paths={"prefix": "/api", "static_paths": static_paths, "function_paths": function_paths})
with open(OUT_FILE, "w") as f:
f.write(content)
if __name__ == "__main__":
generate_template(app)
| 26.583333
| 119
| 0.623824
|
9e85b9f05b4fab47d47204543544d59f7847424a
| 4,185
|
py
|
Python
|
mtdata/index/other.py
|
kpu/mtdata
|
22775046b653484b25769ca8c087601a499f48db
|
[
"Apache-2.0"
] | null | null | null |
mtdata/index/other.py
|
kpu/mtdata
|
22775046b653484b25769ca8c087601a499f48db
|
[
"Apache-2.0"
] | null | null | null |
mtdata/index/other.py
|
kpu/mtdata
|
22775046b653484b25769ca8c087601a499f48db
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# All other single corpus
# Author: Thamme Gowda [tg (at) isi (dot) edu]
# Created: 5/23/20
from mtdata.index import Index, Entry
def load_all(index: Index):
# === IITB hin eng http://www.cfilt.iitb.ac.in/iitb_parallel/
cite = index.ref_db.get_bibtex('Kunchukuttan-etal-iitb')
l1, l2 = 'hi', 'en'
for version, prefix in [
#('v1_0', 'http://www.cfilt.iitb.ac.in/iitb_parallel/iitb_corpus_download'),
('v1_5', 'http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download')]:
# they also have v2, but the link is broken http://www.cfilt.iitb.ac.in/iitb_parallel/
# version is not explicit, but guessed from file modification time and description
url = prefix + "/parallel.tgz"
ent = Entry(langs=(l1, l2), url=url, filename=f'IITB{version}-hin_eng-parallel.tar.gz',
name=f'IITB{version}_train', in_ext='txt', cite=cite,
in_paths=[f'parallel/IITB.en-hi.{l1}',
f'parallel/IITB.en-hi.{l2}'])
index.add_entry(ent)
url = prefix + "/dev_test.tgz"
for split in ['dev', 'test']:
f1 = f'dev_test/{split}.{l1}'
f2 = f'dev_test/{split}.{l2}'
ent = Entry(langs=(l1, l2), url=url, filename=f'IITB{version}-hin_eng-dev_test.tar.gz',
name=f'IITB{version}_{split}', in_ext='txt',
in_paths=[f1, f2], cite=cite)
index.add_entry(ent)
# == Japanese ==
cite = index.ref_db.get_bibtex('neubig11kftt')
url = "http://www.phontron.com/kftt/download/kftt-data-1.0.tar.gz"
l1, l2 = 'en', 'ja'
for split in ['train', 'test', 'dev', 'tune']:
f1 = f'kftt-data-1.0/data/orig/kyoto-{split}.{l1}'
f2 = f'kftt-data-1.0/data/orig/kyoto-{split}.{l2}'
ent = Entry(langs=(l1, l2), url=url, filename="kftt-data-1.0.tar.gz",
name=f'kftt_v1_{split}', in_ext='txt',
in_paths=[f1, f2], cite=cite)
index.add_entry(ent)
url = "http://lotus.kuee.kyoto-u.ac.jp/WAT/my-en-data/wat2020.my-en.zip"
cite = index.ref_db.get_bibtex('ding2020a')
for split in ['dev', 'test', 'train']:
ent = Entry(langs=('my', 'en'), url=url, name=f'WAT2020_ALT_{split}', in_ext='txt',
cite=cite, filename='wat2020.my-en.zip',
in_paths=[f'wat2020.my-en/alt/{split}.alt.my', f'wat2020.my-en/alt/{split}.alt.en'])
index.add_entry(ent)
l1, l2 = 'iu', 'en'
url="https://nrc-digital-repository.canada.ca/eng/view/dataset/?id=c7e34fa7-7629-43c2-bd6d-19b32bf64f60"
cite = index.ref_db.get_bibtex('joanis-etal-2020-nunavut')
for split in ['dev', 'devtest', 'test', 'train']:
path_pref = f'Nunavut-Hansard-Inuktitut-English-Parallel-Corpus-3.0/split/{split}'
if split != 'train':
path_pref += '-dedup'
ent = Entry(langs=(l1, l2), url=url, name=f'NunavutHansard_v3_{split}', in_ext='txt',
cite=cite, filename='NunavutHansard_iuen_v3.tgz',
in_paths=[f'{path_pref}.{l1}', f'{path_pref}.{l2}'])
index.add_entry(ent)
# https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-2122
url = "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2122/khresmoi-summary-test-set-2.0.zip"
cite = index.ref_db.get_bibtex('Khresmoi')
langs = ["cs", "de", "en", "es", "fr", "hu", "pl", "sv"]
for i, l1 in enumerate(langs):
for l2 in langs[i+1:]:
ent = Entry(langs=(l1, l2), url=url, name='Khresmoi_Summary_Test_v2', filename='khresmoi-summary-test-set-2.0.zip', cite=cite, in_paths=[f"khresmoi-summary-test-set-2.0/khresmoi-summary-test.{l1}", f"khresmoi-summary-test-set-2.0/khresmoi-summary-test.{l2}"], in_ext='txt')
index.add_entry(ent)
ent = Entry(langs=(l1, l2), url=url, name='Khresmoi_Summary_Dev_v2', filename='khresmoi-summary-test-set-2.0.zip', cite=cite, in_paths=[f"khresmoi-summary-test-set-2.0/khresmoi-summary-dev.{l1}", f"khresmoi-summary-test-set-2.0/khresmoi-summary-dev.{l2}"], in_ext='txt')
index.add_entry(ent)
| 52.974684
| 285
| 0.605974
|
e1644a0ebf43f674b8250f30b7eec955acaba9b0
| 413
|
py
|
Python
|
docs/dev.py
|
healeycodes/adventlang
|
a894a007434c1c905f20d5697f1a8852406cb1c0
|
[
"MIT"
] | 28
|
2021-11-28T14:58:21.000Z
|
2022-03-02T04:31:04.000Z
|
docs/dev.py
|
healeycodes/adventlang
|
a894a007434c1c905f20d5697f1a8852406cb1c0
|
[
"MIT"
] | null | null | null |
docs/dev.py
|
healeycodes/adventlang
|
a894a007434c1c905f20d5697f1a8852406cb1c0
|
[
"MIT"
] | 1
|
2022-01-01T15:48:40.000Z
|
2022-01-01T15:48:40.000Z
|
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
Handler.extensions_map.update(
{".wasm": "application/wasm", ".js": "application/javascript"}
)
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer(("", PORT), Handler) as httpd:
httpd.allow_reuse_address = True
print(f"🎅 http://localhost:{PORT}")
httpd.serve_forever()
| 25.8125
| 66
| 0.745763
|
27bc5d838ac7142dea50d53dfb88eaa050589b39
| 4,315
|
py
|
Python
|
PTML.py
|
Qkrisi/PTML-Framework
|
f46ac98a8653de66b18f5bfd740ec6124232fb6e
|
[
"MIT"
] | 1
|
2020-12-22T16:16:20.000Z
|
2020-12-22T16:16:20.000Z
|
PTML.py
|
Qkrisi/PTML-Framework
|
f46ac98a8653de66b18f5bfd740ec6124232fb6e
|
[
"MIT"
] | null | null | null |
PTML.py
|
Qkrisi/PTML-Framework
|
f46ac98a8653de66b18f5bfd740ec6124232fb6e
|
[
"MIT"
] | null | null | null |
from flask import Flask
from html.parser import HTMLParser
from os import path, listdir, remove
from PTML_Constants import *
import PTML_Models
import PTML_Tags
import PTML_Sockets
SelfModule = __import__(__name__)
WSPort = -1
def UpdateAttributes(attributes, Route):
attrib = []
PTML_Models.Element_Model.IDCounter+=1
cl = f"ptml-id-{PTML_Models.Element_Model.IDCounter}"
for attribute in attributes:
if attribute[0]=="class":cl+=" "+attribute[1]
for attribute in attributes:
if attribute[0]=="class":continue
add = True
for key in PTML_Tags.Functions[Route]:
if attribute[1]==f"{key}()":
cl+=f" ptml-attribute-{attribute[0]}-{key}"
add = False
break
if add:attrib.append(attribute)
if cl!="":attrib.append(("class", cl))
return attrib
class PTML_Parser(HTMLParser):
def __init__(self, file, RoutePath):
super().__init__()
self.File = file
self.CurrentTag = ""
self.Datas = {}
self.Route = RoutePath
self.ParentID = -1
def handle_starttag(self, tag, attrs):
attrs = UpdateAttributes(attrs, self.Route)
self.ParentID+=1
if not tag in PTML_Tags.Tags:
self.File.write(f"<{tag}{' '+' '.join(name+'='+QUOTE+value+QUOTE for name, value in attrs) if len(attrs)>0 else ''}>")
self.CurrentTag = ""
if tag=="head":self.File.write(f"\n<script>const WSPort={WSPort}</script>\n<script src={QUOTE}PTML.js{QUOTE}></script>\n")
return
if not tag in self.Datas:self.Datas[tag]=[]
self.CurrentTag = tag
self.Datas[tag].append([attrs, ""])
def handle_endtag(self, tag):
if not tag in PTML_Tags.Tags:
self.File.write(f"</{tag}>")
return
self.CurrentTag = ""
kwargs = {}
data = self.Datas[tag].pop(-1)
for attribute in data[0]:
kwargs[attribute[0]]=attribute[1]
kwargs["Route"]=self.Route
kwargs["ParentID"]=self.ParentID
kwargs["data"]=data[1]
self.File.write(PTML_Tags.Tags[tag](**kwargs) + "\n")
def handle_data(self, data):
if self.CurrentTag=="":
self.File.write(data)
return
self.Datas[self.CurrentTag][-1][1]+=data
def ReadFromStatic(_path = "index.html", _separator = "") -> str:
if not path.isfile(_path):raise NameError("Invalid path")
with open(_path,"r") as f:
content = f.readlines()
return _separator.join(content)
def Parse(input, out, RoutePath):
with open(out, "w") as WriteFile:
Parser = PTML_Parser(WriteFile, RoutePath)
with open(input, "r") as ReadFile:
Parser.feed("\n".join(ReadFile.readlines()))
def ListFiles(Directory):
for member in listdir(Directory):
_path = path.join(Directory, member)
if path.isfile(path.join(Directory, member)):yield _path
else:yield from ListFiles(_path)
def Run(Directory, AppName, ip="localhost", HTTPPort=5000, WebSocketPort=5001):
global WSPort
app = Flask(AppName)
Counter = 0
RemoveAfter = []
WSPort=WebSocketPort
@app.route("/PTML.js")
def PTML_Script():
return ReadFromStatic("PTML.js")
for _path in ListFiles(Directory):
Counter+=1
directory = path.dirname(_path)
file = path.basename(_path)
Parsed = directory+f"/{path.splitext(file)[0]}_ptml.html"
use_ptml = False
BasePath = "/"+directory.replace(Directory, "", 1)
RoutePath = BasePath+path.splitext(file)[0]
PTML_Tags.ExecuteOnLoad[RoutePath] = []
PTML_Tags.Functions[RoutePath] = {}
if path.splitext(file)[1]==".ptml":
use_ptml = True
Parse(_path, Parsed, RoutePath)
PTML_Models.IDStart = PTML_Models.Element_Model.IDCounter+1
RemoveAfter.append(Parsed)
exec(DYNAMIC_ROUTE.format(IndexPath = f'@app.route("{BasePath}")' if file in ["index.html", "index.ptml"] else "", Path=RoutePath, Counter=Counter, Parsed=Parsed if use_ptml else _path))
PTML_Sockets.Start(ip, WebSocketPort)
app.run(host=ip, port=HTTPPort)
for file in RemoveAfter:remove(file)
if __name__ == '__main__':
Run("Test", "Testing")
| 35.368852
| 194
| 0.616454
|
378a66e086f241189af5bd5f1d5c9f0c65c5da63
| 28,315
|
py
|
Python
|
SEQA/model_graph/predictor.py
|
nju-websoft/SR3
|
bb602926f8a260887326a812e19e23f50e7eb273
|
[
"Apache-2.0"
] | null | null | null |
SEQA/model_graph/predictor.py
|
nju-websoft/SR3
|
bb602926f8a260887326a812e19e23f50e7eb273
|
[
"Apache-2.0"
] | null | null | null |
SEQA/model_graph/predictor.py
|
nju-websoft/SR3
|
bb602926f8a260887326a812e19e23f50e7eb273
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import codecs
import os
import math
import torch
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from evaluate import *
from others.util import rouge_results_to_str,tile,test_rouge
from translate.beam import GNMTGlobalScorer
import copy
def build_predictor(args, tokenizer, symbols, model, logger=None):
scorer = GNMTGlobalScorer(args.alpha,length_penalty='wu')
translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)
return translator
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
cuda (bool): use cuda
beam_trace (bool): trace beam search for debugging
logger(logging.Logger): logger.
"""
def __init__(self,
args,
model,
vocab,
symbols,
global_scorer=None,
logger=None,
dump_beam=""):
self.logger = logger
self.cuda = args.gpu != '-1'
self.args = args
self.model = model
# self.generator = self.model.generator
self.vocab = vocab
self.symbols = symbols
self.start_token = symbols['BOS']
self.end_token = symbols['EOS']
self.split_token = symbols['EOQ']
self.global_scorer = global_scorer
self.beam_size = args.beam_size
self.min_length = args.min_length
self.max_length = args.max_length
self.dump_beam = dump_beam
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
tensorboard_log_dir = args.model_path
self.tensorboard_writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def _build_target_tokens(self, pred):
# vocab = self.fields["tgt"].vocab
tokens = []
for tok in pred:
tok = int(tok)
tokens.append(tok)
if tokens[-1] == self.end_token:
tokens = tokens[:-1]
break
tokens = [t for t in tokens if t < len(self.vocab)]
tokens = self.vocab.DecodeIds(tokens).split(' ')
return tokens
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert (len(translation_batch["gold_score"]) ==
len(translation_batch["predictions"]))
batch_size = batch['src_tokens'].size(0)
preds, pred_score, gold_score, tgt_str, src = translation_batch["predictions"],translation_batch["scores"],translation_batch["gold_score"],batch['target'], batch['src_tokens']
if self.args.split_qm:
local_idx2token = batch['local_idx2token']
local_idx2tokenid = batch['local_idx2tokenid']
translations = []
for b in range(batch_size):
if len(preds[b])>0:
pred_sents=''
containn=[]
# id2token=False
for n in preds[b][0]:
n = int(n)
if self.args.split_qm and n in local_idx2token[b]:
if n not in containn:
# id2token=True
if self.args.dataset == 'geo':
pred_sents+=local_idx2token[b][n]
else:
# pred_sents += " "+local_idx2token[b][n]
pred_sents += " " + " ".join(self.vocab.convert_ids_to_tokens(local_idx2tokenid[b][n]))
containn.append(n)
else:
# if n == self.split_token:
# id2token=False
# if not id2token:
if self.args.dataset == 'geo':
pred_sents+=self.vocab.convert_ids_to_tokens(n)
else:
pred_sents += " "+self.vocab.convert_ids_to_tokens(n)
# pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])
# pred_sents = ''.join(pred_sents).replace(' ##','')
pred_sents = pred_sents.replace(' ##', '')
else:
pred_sents = ''
# print(pred_sents)
# print(preds[b])
if self.args.dataset == 'geo':
gold_sent = ''.join(tgt_str[b]).replace('[unused1]','').replace('[unused3]','').replace('[unused2]','')
else:
pred_sents=pred_sents.split('[unused0]')[0]
gold_sent = ' '.join(tgt_str[b]).replace('[unused1]', '').replace(' [unused3] ', '<q>').replace('[unused2]','').replace(' ##', '')
gold_sent = re.sub(r' +', ' ', gold_sent)
# raw_src =''.join([ ''.join([ self.vocab.ids_to_tokens[x] for x in t ]) for t in src[b][1:]]).replace('[PAD]','')
# print(gold_sent)
raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]
if self.args.dataset == 'geo':
raw_src = ''.join(raw_src).replace('[PAD]','')
else:
raw_src = ' '.join(raw_src).replace('[PAD]', '').replace(' ##', '')
translation = (pred_sents, gold_sent, raw_src)
# translation = (pred_sents[0], gold_sent)
translations.append(translation)
return translations
def translate(self,
data_iter, step,
attn_debug=False):
self.model.eval()
gold_path = self.args.result_path + '.%d.gold' % step
can_path = self.args.result_path + '.%d.candidate' % step
self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')
self.can_out_file = codecs.open(can_path, 'w', 'utf-8')
# raw_gold_path = self.args.result_path + '.%d.raw_gold' % step
# raw_can_path = self.args.result_path + '.%d.raw_candidate' % step
self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8')
self.can_out_file = codecs.open(can_path, 'w', 'utf-8')
raw_src_path = self.args.result_path + '.%d.raw_src' % step
self.src_out_file = codecs.open(raw_src_path, 'w', 'utf-8')
attn_path = self.args.result_path + '.%d.attn.csv' % step
self.attn_out_file = codecs.open(attn_path, 'w', 'utf-8')
ct = 0
with torch.no_grad():
for batch in data_iter:
if(self.args.recall_eval):
gold_tgt_len = batch.tgt.size(1)
self.min_length = gold_tgt_len + 20
self.max_length = gold_tgt_len + 60
batch_data=self.translate_batch(batch)
translations = self.from_batch(batch_data)
for trans in translations:
pred, gold, src = trans
pred_str = pred.replace('[unused1]', '').replace('[unused4]', '').replace('[PAD]',
'').replace(
'[unused2]', '').replace(' [unused3] ', '<q>').replace('[unused3]','').strip()
pred_str = re.sub(r' +', ' ',pred_str)
gold_str = gold.strip()
if (self.args.recall_eval):
_pred_str = ''
gap = 1e3
for sent in pred_str.split('<q>'):
can_pred_str = _pred_str + '<q>' + sent.strip()
can_gap = math.fabs(len(_pred_str.split()) - len(gold_str.split()))
# if(can_gap>=gap):
if (len(can_pred_str.split()) >= len(gold_str.split()) + 10):
pred_str = _pred_str
break
else:
gap = can_gap
_pred_str = can_pred_str
self.can_out_file.write(pred_str + '\n')
self.gold_out_file.write(gold_str + '\n')
self.src_out_file.write(src.strip() + '\n')
ct += 1
self.can_out_file.flush()
self.gold_out_file.flush()
self.src_out_file.flush()
self.can_out_file.close()
self.gold_out_file.close()
self.src_out_file.close()
if (step != -1 and self.args.report_rouge):
rouges = self._report_rouge(gold_path, can_path)
self.logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges,self.args.dataset)))
if self.args.dataset=="geo":
if self.tensorboard_writer is not None:
self.tensorboard_writer.add_scalar('test/rouge1-F', rouges[("rouge-1", 'f')], step)
self.tensorboard_writer.add_scalar('test/rouge2-F', rouges[("rouge-2", 'f')], step)
self.tensorboard_writer.add_scalar('test/rougeL-F', rouges[("rouge-l", 'f')], step)
return rouges[("rouge-1", 'f')]+rouges[("rouge-2", 'f')]+rouges[("rouge-l", 'f')]
else:
if self.tensorboard_writer is not None:
self.tensorboard_writer.add_scalar('test/rouge1-F', rouges['rouge_1_f_score'], step)
self.tensorboard_writer.add_scalar('test/rouge2-F', rouges['rouge_2_f_score'], step)
self.tensorboard_writer.add_scalar('test/rougeL-F', rouges['rouge_l_f_score'], step)
return rouges['rouge_1_f_score'] + rouges['rouge_2_f_score'] + rouges['rouge_l_f_score']
else:
return 0
def _report_rouge(self, gold_path, can_path):
self.logger.info("Calculating Rouge")
if self.args.dataset == 'geo':
results_dict=getScore(can_path,gold_path,'zh')
else:
# results_dict = getScore(can_path, gold_path, 'en')
results_dict = test_rouge(self.args.temp_eval_dir, can_path, gold_path)
return results_dict
def translate_batch(self, batch, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
return self._fast_translate_batch(
batch,
self.max_length,
min_length=self.min_length)
def _fast_translate_batch(self,
batch,
max_length,
min_length=0):
# TODO: faster code path for beam_size == 1.
# TODO: support these blacklisted features.
assert not self.dump_beam
beam_size = self.beam_size
batch_size = batch['src_tokens'].size(0)
if self.args.split_qm:
if self.args.train_copy:
word_vec, words, mask_word = self.model.wordCopySumm.encode_word(batch)
sent_repr, mask_sent = self.model.encode_step_split(batch)
else:
word_vec, words, sent_repr, mask_word, mask_sent = self.model.encode_step_split(batch)
if self.args.train_copy:
dec_states = self.model.wordCopySumm.decoder_src.init_decoder_state(words, word_vec, with_cache=True)
dec_states_sent = self.model.decoder_sent.init_decoder_state(~mask_sent, sent_repr, with_cache=True)
dec_states_sent.map_batch_fn(
lambda state, dim: tile(state, beam_size, dim=dim))
mask_sent = mask_sent.unsqueeze(1)
mask_word = mask_word.unsqueeze(1)
elif self.args.split_gen:
dec_states = self.model.decoder_src.init_decoder_state(words, word_vec, with_cache=True)
dec_states_sent = self.model.decoder_sent.init_decoder_state(~mask_sent, sent_repr, with_cache=True)
dec_states_sent.map_batch_fn(
lambda state, dim: tile(state, beam_size, dim=dim))
mask_sent=mask_sent.unsqueeze(1)
mask_word = mask_word.unsqueeze(1)
else:
dec_states = self.model.decoder.init_decoder_state(words, word_vec, with_cache=True)
if self.args.copy_decoder:
local_idx2tokenid = batch['local_idx2tokenid']
local_idx2token = batch['local_idx2token']
else:
if self.args.split_gen:
top_vec, src, mask_q = self.model.encode_word(batch)
if self.args.multidoc:
dec_states = self.model.decoder.init_decoder_state(src, top_vec, with_cache=True)
else:
dec_states = self.model.decoder_src.init_decoder_state(src, top_vec, with_cache=True)
else:
sent_repr, sents = self.model.encode_step(batch)
dec_states = self.model.decoder.init_decoder_state(sents, sent_repr, with_cache=True)
device = batch['src_tokens'].device
# Tile states and memory beam_size times.
dec_states.map_batch_fn(
lambda state, dim: tile(state, beam_size, dim=dim))
if self.args.split_qm:
src_features = tile(word_vec,beam_size,dim=0)
src = tile(words, beam_size, dim=0)
mask_word = tile(mask_word,beam_size,dim=0)
# if sent_repr!=None:
sent_repr = tile(sent_repr,beam_size,dim=0)
mask_sent = tile(mask_sent,beam_size,dim=0)
if self.args.copy_decoder:
copy_seq = tile(batch['copy_seq'],beam_size,dim=0)
local_idx2tokenid_new =[]
for x in local_idx2tokenid:
for i in range(beam_size):
local_idx2tokenid_new.append(x)
local_idx2token_new = []
for x in local_idx2token:
for i in range(beam_size):
local_idx2token_new.append(x)
else:
if self.args.split_gen:
src_features = tile(top_vec, beam_size, dim=0)
src = tile(src, beam_size, dim=0)
mask_word = tile(mask_q, beam_size, dim=0)
else:
src_features = tile(sent_repr, beam_size, dim=0)
src = tile(sents, beam_size, dim=0)
batch_offset = torch.arange(
batch_size, dtype=torch.long, device=device)
beam_offset = torch.arange(
0,
batch_size * beam_size,
step=beam_size,
dtype=torch.long,
device=device)
alive_seq = torch.full(
[batch_size * beam_size, 1],
self.start_token,
dtype=torch.long,
device=device)
# print(alive_seq)
# Give full probability to the first beam on the first step.
topk_log_probs = (
torch.tensor([0.0] + [float("-inf")] * (beam_size - 1),
device=device).repeat(batch_size))
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)] # noqa: F812
results = {}
results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812
results["scores"] = [[] for _ in range(batch_size)] # noqa: F812
results["gold_score"] = [0] * batch_size
results["batch"] = batch
steps = torch.zeros_like(alive_seq).squeeze(-1)
copy_step = [0 for _ in range(batch_size*beam_size)]
pre_prob = None
for step in range(max_length):
# print(alive_seq)
# print(copy_step)
decoder_input = alive_seq[:, -1].view(1, -1)
# Decoder forward.
decoder_input = decoder_input.transpose(0,1)
if self.args.split_qm:
if self.args.split_gen or self.args.train_copy:
if self.args.copy_decoder:
probs, dec_out, dec_states, dec_states_sent, steps = self.model.predict_split_qm(decoder_input, src_features,
dec_states,dec_states_sent,
sent_repr, mask_sent, mask_word,
steps, copy_seq,
local_idx2tokenid_new, src=src)
steps += 1
pre_prob = probs
else:
probs, dec_out, dec_states, _ = self.model.predict_split_qm(decoder_input, src_features, dec_states,
sent_repr, mask_sent, mask_word, step,
src=src)
else:
if self.args.copy_decoder:
probs, dec_out, dec_states,steps = self.model.predict_split_qm(decoder_input, src_features, dec_states,
sent_repr, mask_sent, mask_word, steps,copy_seq,local_idx2tokenid_new,src=src)
steps+=1
pre_prob = probs
else:
probs, dec_out, dec_states,_ =self.model.predict_split_qm(decoder_input, src_features, dec_states, sent_repr, mask_sent, mask_word, step,src=src)
else:
if self.args.split_gen:# and self.args.copy_decoder:
probs, dec_out, dec_states = self.model.predict(decoder_input, src_features, dec_states, step,mask_word,src)
else:
probs, dec_out, dec_states= self.model.predict(decoder_input, src_features, dec_states, step)
# print(probs.topk(probs.size(0), dim=-1))
# print(probs.size())
log_probs = torch.log(probs)
# if (step > 0):
# for i in range(alive_seq.size(0)):
# if copy_step[i] == 0:
# words = [int(w) for w in alive_seq[i]]
# ext_word = set([word for word in words if word >= self.model.vocab_size])
# for w in ext_word:
# log_probs[i][w] = -1e20
vocab_size = log_probs.size(-1)
if step < min_length:
log_probs[:, self.end_token] = -1e20
# Multiply probs by the beam probability.
log_probs += topk_log_probs.view(-1).unsqueeze(1)
alpha = self.global_scorer.alpha
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
# Flatten probs into a list of possibilities.
curr_scores = log_probs / length_penalty
if(self.args.block_trigram):
cur_len = alive_seq.size(1)
# for i in range(alive_seq.size(0)):
# print([self.vocab.convert_ids_to_tokens(int(word)) if word<self.model.vocab_size else local_idx2token_new[i][int(word)] for word in alive_seq[i] ])
if (cur_len > 1):
for i in range(alive_seq.size(0)):
fail = False
words = [int(w) for w in alive_seq[i]]
trigrams = [words[i] for i in range(1, len(words) - 2) if words[i]>=self.model.vocab_size]
trigram = words[-1]
if trigram in trigrams:
fail = True
if fail:
curr_scores[i] = -10e20
if(cur_len>3):
for i in range(alive_seq.size(0)):
fail = False
words = [int(w) for w in alive_seq[i] if w < self.model.vocab_size]
# words = [self.vocab.ids_to_tokens[w] for w in words]
# words = ' '.join(words).replace(' ##','').split()
if(len(words)<=3):
continue
trigrams = [(words[i-1],words[i],words[i+1]) for i in range(1,len(words)-1)]
trigram = tuple(trigrams[-1])
if trigram in trigrams[:-1]:
fail = True
if fail:
curr_scores[i] = -10e20
curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)
topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)
# Recover log probs.
topk_log_probs = topk_scores * length_penalty
# Resolve beam origin and true word ids.
topk_beam_index = topk_ids.div(vocab_size)
topk_ids = topk_ids.fmod(vocab_size)
# print(topk_beam_index)
# print(beam_offset)
# Map beam_index to batch_index in the flat representation.
batch_index = (
topk_beam_index
+ beam_offset[:topk_beam_index.size(0)].unsqueeze(1))
# print(batch_index)
select_indices = batch_index.view(-1)
# Append last prediction.
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices),
topk_ids.view(-1, 1)], -1)
is_finished = topk_ids.eq(self.end_token)
if step + 1 == max_length:
is_finished.fill_(1)
# End condition is top beam is finished.
end_condition = is_finished[:, 0].eq(1)
# Save finished hypotheses.
if is_finished.any():
predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero().view(-1)
# Store finished hypotheses for this batch.
for j in finished_hyp:
hypotheses[b].append((
topk_scores[i, j],
predictions[i, j, 1:]))
# If the batch reached the end, save the n_best hypotheses.
if end_condition[i]:
best_hyp = sorted(
hypotheses[b], key=lambda x: x[0], reverse=True)
score, pred = best_hyp[0]
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(0).nonzero().view(-1)
# If all sentences are translated, no need to go further.
if len(non_finished) == 0:
break
# Remove finished batches for the next step.
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished) \
.view(-1, alive_seq.size(-1))
# Reorder states.
select_indices = batch_index.view(-1)
# print(select_indices)
src_features = src_features.index_select(0, select_indices)
src = src.index_select(0,select_indices)
if self.args.split_qm:
mask_word = mask_word.index_select(0,select_indices)
# if sent_repr != None:
sent_repr = sent_repr.index_select(0, select_indices)
mask_sent = mask_sent.index_select(0,select_indices)
steps = steps.index_select(0, select_indices)
if self.args.copy_decoder:
pre_prob = pre_prob.index_select(0,select_indices)
copy_seq = copy_seq.index_select(0,select_indices)
select_indices_lists = select_indices.cpu().numpy().tolist()
temp = local_idx2tokenid_new
local_idx2tokenid_new = []
for i in select_indices_lists:
local_idx2tokenid_new.append(temp[i])
temp = local_idx2token_new
local_idx2token_new = []
for i in select_indices_lists:
local_idx2token_new.append(temp[i])
temp = copy_step
copy_step = []
for i in select_indices_lists:
copy_step.append(temp[i])
else:
if self.args.split_gen:
# src_features = src_features.index_select(0, select_indices)
mask_word = mask_word.index_select(0, select_indices)
# src = src.index_select(0, select_indices)
dec_states.map_batch_fn(
lambda state, dim: state.index_select(dim, select_indices))
if self.args.split_gen and self.args.split_qm:
dec_states_sent.map_batch_fn(
lambda state, dim: state.index_select(dim, select_indices))
# if steps.min(-1)[0].cpu().item()>max_length:
# break
return results
class Translation(object):
"""
Container for a translated sentence.
Attributes:
src (`LongTensor`): src word ids
src_raw ([str]): raw src words
pred_sents ([[str]]): words from the n-best translations
pred_scores ([[float]]): log-probs of n-best translations
attns ([`FloatTensor`]) : attention dist for each translation
gold_sent ([str]): words from gold translation
gold_score ([float]): log-prob of gold translation
"""
def __init__(self, fname, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score):
self.fname = fname
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
def log(self, sent_number):
"""
Log translation.
"""
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += "PRED SCORE: {:.4f}\n".format(best_score)
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += ("GOLD SCORE: {:.4f}\n".format(self.gold_score))
if len(self.pred_sents) > 1:
output += '\nBEST HYP:\n'
for score, sent in zip(self.pred_scores, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output
| 45.89141
| 184
| 0.525693
|
6119315885d890395f812e01b4258369ec3f2135
| 3,190
|
py
|
Python
|
src/commcare_cloud/commands/inventory_lookup/getinventory.py
|
AliRizvi1/commcare-cloud
|
312f6c2ea4e97bdda1ae49aec6d114edf0dedb43
|
[
"BSD-3-Clause"
] | null | null | null |
src/commcare_cloud/commands/inventory_lookup/getinventory.py
|
AliRizvi1/commcare-cloud
|
312f6c2ea4e97bdda1ae49aec6d114edf0dedb43
|
[
"BSD-3-Clause"
] | null | null | null |
src/commcare_cloud/commands/inventory_lookup/getinventory.py
|
AliRizvi1/commcare-cloud
|
312f6c2ea4e97bdda1ae49aec6d114edf0dedb43
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Utilities to get server hostname or IP address from an inventory file and group.
"""
from __future__ import absolute_import, print_function
import re
import attr
from ansible.utils.display import Display
from commcare_cloud.commands.terraform.aws import get_default_username
from commcare_cloud.environment.main import get_environment
display = Display()
class HostMatchException(Exception):
pass
@attr.s
class HostPattern(object):
user = attr.ib()
group = attr.ib()
index = attr.ib()
def get_instance_group(environment, group):
env = get_environment(environment)
return env.sshable_hostnames_by_group[group]
def get_monolith_address(environment):
env = get_environment(environment)
hosts = env.inventory_manager.get_hosts()
if len(hosts) != 1:
raise HostMatchException("There are {} servers in the environment. Please include the 'server'"
"argument to select one.".format(len(hosts)))
else:
return get_server_address(environment, 'all')
def split_host_group(group):
ansible_style_pattern = re.match(r'^(?P<user>(.*?)@)?(?P<group>.*?)(\[(?P<index>\d+)\])?$', group)
if ansible_style_pattern:
user = ansible_style_pattern.group('user')
group = ansible_style_pattern.group('group')
index = ansible_style_pattern.group('index')
return HostPattern(user, group, int(index) if index else None)
return HostPattern(None, group, None)
def get_server_address(environment, group):
host_group = split_host_group(group)
username, group, index = host_group.user, host_group.group, host_group.index
if ':' in group:
display.warning("Use '[x]' to select hosts instead of ':x' which has been deprecated.")
group, index = group.rsplit(':', 1)
try:
index = int(index)
except (TypeError, ValueError):
raise HostMatchException("Non-numeric group index: {}".format(index))
if not username:
default_username = get_default_username()
if default_username.is_guess:
username = ""
else:
username = "{}@".format(default_username)
if re.match(r'(\d+\.?){4}', group):
# short circuit for IP addresses
return username + group
try:
servers = get_instance_group(environment, group)
except IOError as err:
raise HostMatchException(err)
except KeyError:
raise HostMatchException("Unknown server name/group: {}\n".format(group))
if index is not None and index > len(servers) - 1:
raise HostMatchException(
"Invalid group index: {index}\n"
"Please specify a number between 0 and {max} inclusive\n"
.format(index=index, max=len(servers) - 1)
)
if len(servers) > 1:
if index is None:
raise HostMatchException(
"There are {num} servers in the '{group}' group\n"
"Please specify the index of the server. Example: {group}:0\n"
.format(num=len(servers), group=group)
)
server = servers[index]
else:
server = servers[index or 0]
return username + server
| 31.584158
| 103
| 0.651411
|
ca5bbba5eae6295822b9959db4c70f1f2f36074d
| 571
|
py
|
Python
|
scripts/redballprogram.py
|
TUPilotsClub/TUTello
|
965b236866c34d2444afd4813418131438978e13
|
[
"MIT"
] | null | null | null |
scripts/redballprogram.py
|
TUPilotsClub/TUTello
|
965b236866c34d2444afd4813418131438978e13
|
[
"MIT"
] | null | null | null |
scripts/redballprogram.py
|
TUPilotsClub/TUTello
|
965b236866c34d2444afd4813418131438978e13
|
[
"MIT"
] | null | null | null |
from drone.tello import Tello
from gui.simplegui import SimpleGUI
from video.redballvideo import RedBallVideo
from video.facetrackvideo import FaceTrackVideo
from controller.trackcontroller import TrackController
from controller.tracktelemetry import TrackTelemetry
import sys
if sys.argv[1] != "webcam":
tello = Tello()
tello.connect()
else:
tello = None
#video = FaceTrackVideo(None)
telemetry = TrackTelemetry(tello)
video = RedBallVideo(tello, telemetry)
gui = SimpleGUI(video, telemetry)
controller = TrackController(tello, gui, telemetry)
gui.run()
| 30.052632
| 54
| 0.791594
|
49395ed517767a5408bee41e8dec2f02a247579b
| 4,185
|
py
|
Python
|
main/main.py
|
georgekis/gae-init
|
7e1eb0160aa8b971b32e902beceb08fc88aec028
|
[
"MIT"
] | 1
|
2015-09-12T10:12:16.000Z
|
2015-09-12T10:12:16.000Z
|
main/main.py
|
georgekis/gae-init
|
7e1eb0160aa8b971b32e902beceb08fc88aec028
|
[
"MIT"
] | null | null | null |
main/main.py
|
georgekis/gae-init
|
7e1eb0160aa8b971b32e902beceb08fc88aec028
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import logging
from flask.ext import wtf
import flask
import wtforms
import config
import util
app = flask.Flask(__name__)
app.config.from_object(config)
app.jinja_env.line_statement_prefix = '#'
app.jinja_env.line_comment_prefix = '##'
app.jinja_env.globals.update(
check_form_fields=util.check_form_fields,
is_iterable=util.is_iterable,
slugify=util.slugify,
update_query_argument=util.update_query_argument,
)
import user
import admin
import auth
import model
import profile
import task
import test
if config.DEVELOPMENT:
from werkzeug import debug
app.wsgi_app = debug.DebuggedApplication(app.wsgi_app, evalex=True)
app.testing = True
###############################################################################
# Main page
###############################################################################
@app.route('/')
def welcome():
return flask.render_template('welcome.html', html_class='welcome')
###############################################################################
# Sitemap stuff
###############################################################################
@app.route('/sitemap.xml')
def sitemap():
response = flask.make_response(flask.render_template(
'sitemap.xml',
lastmod=config.CURRENT_VERSION_DATE.strftime('%Y-%m-%d'),
))
response.headers['Content-Type'] = 'application/xml'
return response
###############################################################################
# Feedback
###############################################################################
class FeedbackForm(wtf.Form):
message = wtforms.TextAreaField(
'Message',
[wtforms.validators.required()], filters=[util.strip_filter],
)
email = wtforms.StringField(
'Your email (optional)',
[wtforms.validators.optional(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = wtf.RecaptchaField('Are you human?')
@app.route('/feedback/', methods=['GET', 'POST'])
def feedback():
if not config.CONFIG_DB.feedback_email:
return flask.abort(418)
form = FeedbackForm(obj=auth.current_user_db())
if not config.CONFIG_DB.has_anonymous_recaptcha or auth.is_logged_in():
del form.recaptcha
if form.validate_on_submit():
body = '%s\n\n%s' % (form.message.data, form.email.data)
kwargs = {'reply_to': form.email.data} if form.email.data else {}
task.send_mail_notification('%s...' % body[:48].strip(), body, **kwargs)
flask.flash('Thank you for your feedback!', category='success')
return flask.redirect(flask.url_for('welcome'))
return flask.render_template(
'feedback.html',
title='Feedback',
html_class='feedback',
form=form,
)
###############################################################################
# Warmup request
###############################################################################
@app.route('/_ah/warmup')
def warmup():
# TODO: put your warmup code here
return 'success'
###############################################################################
# Error Handling
###############################################################################
@app.errorhandler(400) # Bad Request
@app.errorhandler(401) # Unauthorized
@app.errorhandler(403) # Forbidden
@app.errorhandler(404) # Not Found
@app.errorhandler(405) # Method Not Allowed
@app.errorhandler(410) # Gone
@app.errorhandler(418) # I'm a Teapot
@app.errorhandler(500) # Internal Server Error
def error_handler(e):
logging.exception(e)
try:
e.code
except AttributeError:
e.code = 500
e.name = 'Internal Server Error'
if flask.request.path.startswith('/_s/'):
return util.jsonpify({
'status': 'error',
'error_code': e.code,
'error_name': util.slugify(e.name),
'error_message': e.name,
'error_class': e.__class__.__name__,
}), e.code
return flask.render_template(
'error.html',
title='Error %d (%s)!!1' % (e.code, e.name),
html_class='error-page',
error=e,
), e.code
if config.PRODUCTION:
@app.errorhandler(Exception)
def production_error_handler(e):
return error_handler(e)
| 28.664384
| 79
| 0.560812
|
b1d4a7e94d8076e1a97811df11c052361ad5385d
| 21,827
|
py
|
Python
|
build/windows/WindowsBuilder.py
|
szywind/DeepFaceLive
|
60d867843c323a810352b1a8ac209912dec0acf2
|
[
"MIT"
] | 3
|
2021-12-08T08:59:50.000Z
|
2022-02-08T02:54:27.000Z
|
build/windows/WindowsBuilder.py
|
SandUhrGucker/DeepFaceLive
|
a897cbb06ee3511c63f10d9fbf2ccb66c3ee9659
|
[
"MIT"
] | 1
|
2022-02-08T01:29:03.000Z
|
2022-02-08T01:29:03.000Z
|
build/windows/WindowsBuilder.py
|
SandUhrGucker/DeepFaceLive
|
a897cbb06ee3511c63f10d9fbf2ccb66c3ee9659
|
[
"MIT"
] | 1
|
2021-12-14T09:18:15.000Z
|
2021-12-14T09:18:15.000Z
|
import argparse
import os
import shutil
import subprocess
import time
import urllib.request
import zipfile
from datetime import datetime
from pathlib import Path
from typing import List
class WindowsFolderBuilder:
"""
Builds stand-alone portable all-in-one python folder for Windows with the project from scratch.
"""
# Constants
URL_PIP = r'https://bootstrap.pypa.io/get-pip.py'
URL_VSCODE = r'https://code.visualstudio.com/sha/download?build=stable&os=win32-x64-archive'
URL_FFMPEG = r'https://github.com/GyanD/codexffmpeg/releases/download/4.4/ffmpeg-4.4-full_build.zip'
URL_7ZIP = r'https://github.com/iperov/DeepFaceLive/releases/download/7za/7za.zip'
URL_MSVC = r'https://github.com/iperov/DeepFaceLive/releases/download/msvc/msvc.zip'
URLS_PYTHON = {'3.7.9' : r'https://www.python.org/ftp/python/3.7.9/python-3.7.9-embed-amd64.zip',
'3.8.10' : r'https://www.python.org/ftp/python/3.8.10/python-3.8.10-embed-amd64.zip',
}
DIRNAME_INTERNAL = '_internal'
DIRNAME_INTERNAL_CUDA = 'CUDA'
DIRNAME_INTERNAL_PYTHON = 'python'
DIRNAME_INTERNAL_FFMPEG = 'ffmpeg'
DIRNAME_LOCALENV = '_z'
DIRNAME_TEMP = 't'
DIRNAME_USERPROFILE = 'u'
DIRNAME_APPDATA = 'AppData'
DIRNAME_LOCAL = 'Local'
DIRNAME_ROAMING = 'Roaming'
DIRNAME_DESKTOP = 'Desktop'
DIRNAME_INTERNAL_VSCODE = 'VSCode'
def __init__(self, release_path : Path,
cache_path : Path,
python_ver : str,
clear_release_path : bool = True,
):
super().__init__()
self.release_path = release_path
self.python_ver = python_ver
self.cache_path = cache_path
self.download_cache_path = cache_path / '_dl_cache'
self.pip_cache_path = cache_path / '_pip_cache'
if clear_release_path:
if release_path.exists():
print('Removing existing directory.')
shutil.rmtree(release_path)
while release_path.exists():
time.sleep(0.1)
release_path.mkdir(parents=True)
self._validate_env()
self._install_internal()
self._install_python()
def copyfiletree(self, src, dst):
shutil.copytree(src, dst)
def copyfile(self, src, dst):
shutil.copyfile(src, dst)
def download_file(self, url, savepath : Path, progress_bar=True, use_cached=True):
"""
Download the file or use cached and save to savepath
"""
urlpath = Path(url)
if progress_bar:
print(f'Downloading {url}')
f = None
while True:
try:
url_request = urllib.request.urlopen(url)
url_size = int( url_request.getheader('content-length') )
if use_cached:
cached_filepath = self.download_cache_path / urlpath.name
if cached_filepath.exists():
if url_size == cached_filepath.stat().st_size:
print(f'Using cached {cached_filepath}')
break
else:
print('Cached file size mismatch. Downloading from url.')
else:
cached_filepath = savepath
cached_filepath.parent.mkdir(parents=True, exist_ok=True)
file_size_dl = 0
f = open(cached_filepath, 'wb')
while True:
buffer = url_request.read(8192)
if not buffer:
break
f.write(buffer)
file_size_dl += len(buffer)
if progress_bar:
print(f'Downloading {file_size_dl} / {url_size}', end='\r')
except:
print(f'Unable to download {url}')
raise
break
if f is not None:
f.close()
if use_cached:
shutil.copy2(cached_filepath, savepath)
def rmdir(self, path):
os.system('del /F /S /Q "{}" > nul'.format(str(path)))
os.system('rmdir /S /Q "{}"'.format(str(path)))
def rmdir_in_all_subdirs(self, path, subdirname):
for root, dirs, files in os.walk( str(path), topdown=False):
if subdirname in dirs:
self.rmdir( Path(root) / subdirname )
def get_release_path(self): return self.release_path
def get_internal_path(self): return self.internal_path
def _validate_env(self):
env = os.environ.copy()
self.internal_path = self.release_path / self.DIRNAME_INTERNAL
self.internal_path.mkdir(exist_ok=True, parents=True)
self.local_env_path = self.internal_path / self.DIRNAME_LOCALENV
self.local_env_path.mkdir(exist_ok=True, parents=True)
self.temp_path = self.local_env_path / self.DIRNAME_TEMP
self.temp_path.mkdir(exist_ok=True, parents=True)
self.userprofile_path = self.local_env_path / self.DIRNAME_USERPROFILE
self.userprofile_path.mkdir(exist_ok=True, parents=True)
self.desktop_path = self.userprofile_path / self.DIRNAME_DESKTOP
self.desktop_path.mkdir(exist_ok=True, parents=True)
self.localappdata_path = self.userprofile_path / self.DIRNAME_APPDATA / self.DIRNAME_LOCAL
self.localappdata_path.mkdir(exist_ok=True, parents=True)
self.appdata_path = self.userprofile_path / self.DIRNAME_APPDATA / self.DIRNAME_ROAMING
self.appdata_path.mkdir(exist_ok=True, parents=True)
self.python_path = self.internal_path / self.DIRNAME_INTERNAL_PYTHON
self.python_path.mkdir(exist_ok=True, parents=True)
self.python_site_packages_path = self.python_path / 'Lib' / 'site-packages'
self.python_site_packages_path.mkdir(exist_ok=True, parents=True)
self.cuda_path = self.internal_path / self.DIRNAME_INTERNAL_CUDA
self.cuda_path.mkdir(exist_ok=True, parents=True)
self.cuda_bin_path = self.cuda_path / 'bin'
self.cuda_bin_path.mkdir(exist_ok=True, parents=True)
self.vscode_path = self.internal_path / self.DIRNAME_INTERNAL_VSCODE
self.ffmpeg_path = self.internal_path / self.DIRNAME_INTERNAL_FFMPEG
self._7zip_path = self.temp_path / '7zip'
env['INTERNAL'] = str(self.internal_path)
env['LOCALENV'] = str(self.local_env_path)
env['TMP'] = \
env['TEMP'] = str(self.temp_path)
env['HOME'] = \
env['HOMEPATH'] = \
env['USERPROFILE'] = str(self.userprofile_path)
env['DESKTOP'] = str(self.desktop_path)
env['LOCALAPPDATA'] = str(self.localappdata_path)
env['APPDATA'] = str(self.appdata_path)
env['PYTHONHOME'] = ''
env['PYTHONPATH'] = ''
env['PYTHON_PATH'] = str(self.python_path)
env['PYTHONEXECUTABLE'] = \
env['PYTHON_EXECUTABLE'] = \
env['PYTHON_BIN_PATH'] = str(self.python_path / 'python.exe')
env['PYTHONWEXECUTABLE'] = \
env['PYTHON_WEXECUTABLE'] = str(self.python_path / 'pythonw.exe')
env['PYTHON_LIB_PATH'] = str(self.python_path / 'Lib' / 'site-packages')
env['CUDA_PATH'] = str(self.cuda_path)
env['PATH'] = f"{str(self.cuda_path)};{str(self.python_path)};{str(self.python_path / 'Scripts')};{env['PATH']}"
if self.pip_cache_path is not None:
env['PIP_CACHE_DIR'] = str(self.pip_cache_path)
self.env = env
def _install_internal(self):
(self.internal_path / 'setenv.bat').write_text(
fr"""@echo off
SET INTERNAL=%~dp0
SET INTERNAL=%INTERNAL:~0,-1%
SET LOCALENV=%INTERNAL%\{self.DIRNAME_LOCALENV}
SET TMP=%LOCALENV%\{self.DIRNAME_TEMP}
SET TEMP=%TMP%
SET HOME=%LOCALENV%\{self.DIRNAME_USERPROFILE}
SET HOMEPATH=%HOME%
SET USERPROFILE=%HOME%
SET DESKTOP=%HOME%\{self.DIRNAME_DESKTOP}
SET LOCALAPPDATA=%USERPROFILE%\{self.DIRNAME_APPDATA}\{self.DIRNAME_LOCAL}
SET APPDATA=%USERPROFILE%\{self.DIRNAME_APPDATA}\{self.DIRNAME_ROAMING}
SET PYTHONHOME=
SET PYTHONPATH=
SET PYTHON_PATH=%INTERNAL%\python
SET PYTHONEXECUTABLE=%PYTHON_PATH%\python.exe
SET PYTHON_EXECUTABLE=%PYTHONEXECUTABLE%
SET PYTHONWEXECUTABLE=%PYTHON_PATH%\pythonw.exe
SET PYTHONW_EXECUTABLE=%PYTHONWEXECUTABLE%
SET PYTHON_BIN_PATH=%PYTHONEXECUTABLE%
SET PYTHON_LIB_PATH=%PYTHON_PATH%\Lib\site-packages
SET CUDA_PATH=%INTERNAL%\CUDA
SET CUDA_BIN_PATH=%CUDA_PATH%\bin
SET QT_QPA_PLATFORM_PLUGIN_PATH=%PYTHON_LIB_PATH%\PyQT6\Qt6\Plugins\platforms
SET PATH=%INTERNAL%\ffmpeg;%PYTHON_PATH%;%CUDA_BIN_PATH%;%PYTHON_PATH%\Scripts;%PATH%
""")
self.clearenv_bat_path = self.internal_path / 'clearenv.bat'
self.clearenv_bat_path.write_text(
fr"""@echo off
cd /D %~dp0
call setenv.bat
rmdir %LOCALENV% /s /q
mkdir %LOCALENV%
mkdir %TEMP%
mkdir %USERPROFILE%
mkdir %DESKTOP%
mkdir %LOCALAPPDATA%
mkdir %APPDATA%
""")
(self.internal_path / 'python_console.bat').write_text(
fr"""
@echo off
cd /D %~dp0
call setenv.bat
cd python
cmd
""")
def _install_python(self):
python_url = self.URLS_PYTHON.get(self.python_ver, None)
if python_url is None:
raise Exception(f'No python URL defined for {self.python_ver}')
print (f"Installing python {self.python_ver} to {self.python_path}\n")
python_dl_path = self.python_path / f'python-{self.python_ver}.zip'
if not python_dl_path.exists():
self.download_file(python_url, python_dl_path)
with zipfile.ZipFile(python_dl_path, 'r') as zip_ref:
zip_ref.extractall(self.python_path)
python_dl_path.unlink()
# Remove _pth file
for pth_file in self.python_path.glob("*._pth"):
pth_file.unlink()
print('Installing MS VC dlls.')
self.download_and_unzip(self.URL_MSVC, self.python_path)
print ("Installing pip.\n")
python_pip_path = self.python_path / 'get-pip.py'
self.download_file(self.URL_PIP, python_pip_path)
subprocess.Popen(args='python.exe get-pip.py', cwd=str(self.python_path), shell=True, env=self.env).wait()
python_pip_path.unlink()
def _get_7zip_bin_path(self):
if not self._7zip_path.exists():
self.download_and_unzip(self.URL_7ZIP, self._7zip_path)
return self._7zip_path / '7za.exe'
def cleanup(self):
print ('Cleanup.\n')
subprocess.Popen(args=str(self.clearenv_bat_path), shell=True).wait()
self.rmdir_in_all_subdirs (self.release_path, '__pycache__')
def pack_sfx_release(self, archive_name):
archiver_path = self._get_7zip_bin_path()
archive_path = self.release_path.parent / (archive_name+'.exe')
subprocess.Popen(args='"%s" a -t7z -sfx7z.sfx -m0=LZMA2 -mx9 -mtm=off -mmt=8 "%s" "%s"' % ( \
str(archiver_path),
str(archive_path),
str(self.release_path) ),
shell=True).wait()
def download_and_unzip(self, url, unzip_dirpath, only_files_list : List =None):
"""
Download and unzip entire content to unzip_dirpath
only_files_list(None) if specified
only first match of these files
will be extracted to unzip_dirpath without folder structure
"""
unzip_dirpath.mkdir(parents=True, exist_ok=True)
tmp_zippath = unzip_dirpath / '__dl.zip'
self.download_file(url, tmp_zippath)
with zipfile.ZipFile(tmp_zippath, 'r') as zip_ref:
for entry in zip_ref.filelist:
if only_files_list is not None:
if not entry.is_dir():
entry_filepath = Path( entry.filename )
if entry_filepath.name in only_files_list:
only_files_list.remove(entry_filepath.name)
(unzip_dirpath / entry_filepath.name).write_bytes ( zip_ref.read(entry) )
else:
entry_outpath = unzip_dirpath / Path(entry.filename)
if entry.is_dir():
entry_outpath.mkdir(parents=True, exist_ok=True)
else:
entry_outpath.write_bytes ( zip_ref.read(entry) )
tmp_zippath.unlink()
def install_pip_package(self, pkg_name):
subprocess.Popen(args=f'python.exe -m pip install {pkg_name}', cwd=str(self.python_path), shell=True, env=self.env).wait()
def run_python(self, argsline, cwd=None):
if cwd is None:
cwd = self.python_path
subprocess.Popen(args=f'python.exe {argsline}', cwd=str(cwd), shell=True, env=self.env).wait()
def install_ffmpeg_binaries(self):
print('Installing ffmpeg binaries.')
self.ffmpeg_path.mkdir(exist_ok=True, parents=True)
self.download_and_unzip(self.URL_FFMPEG, self.ffmpeg_path, only_files_list=['ffmpeg.exe', 'ffprobe.exe'] )
def install_vscode(self, project_internal_dir : str = None):
"""
Installs vscode
"""
print('Installing VSCode.\n')
self.vscode_path.mkdir(exist_ok=True, parents=True)
vscode_zip_path = self.vscode_path / 'VSCode.zip'
self.download_file(self.URL_VSCODE, vscode_zip_path, use_cached=False)
with zipfile.ZipFile(vscode_zip_path, 'r') as zip_ref:
zip_ref.extractall(self.vscode_path)
vscode_zip_path.unlink()
# Create bat
(self.internal_path / 'vscode.bat').write_text(
fr"""@echo off
cd /D %~dp0
call setenv.bat
start "" /D "%~dp0" "%INTERNAL%\{self.DIRNAME_INTERNAL_VSCODE}\Code.exe" --disable-workspace-trust "project.code-workspace"
""")
# Enable portable mode in VSCode
(self.vscode_path / 'data').mkdir(exist_ok=True)
# Create vscode project
if project_internal_dir is None:
project_internal_dir = '.'
(self.internal_path / 'project.code-workspace').write_text (
fr'''{{
"folders": [
{{
"path": "{project_internal_dir}"
}}
],
"settings": {{
"workbench.colorTheme": "Visual Studio Light",
"diffEditor.ignoreTrimWhitespace": true,
"workbench.sideBar.location": "right",
"breadcrumbs.enabled": false,
"editor.renderWhitespace": "none",
"editor.minimap.enabled": false,
"workbench.activityBar.visible": true,
"window.menuBarVisibility": "default",
"editor.fastScrollSensitivity": 10,
"editor.mouseWheelScrollSensitivity": 2,
"window.zoomLevel": 0,
"extensions.ignoreRecommendations": true,
"debug.showBreakpointsInOverviewRuler": true,
"python.linting.pylintEnabled": false,
"python.linting.enabled": false,
"python.linting.pylamaEnabled": false,
"python.linting.pydocstyleEnabled": false,
"python.defaultInterpreterPath": "${{env:PYTHON_EXECUTABLE}}",
"telemetry.enableTelemetry": false,
"workbench.editor.tabCloseButton": "off",
"workbench.editor.tabSizing": "shrink",
"workbench.editor.highlightModifiedTabs": true,
"workbench.enableExperiments": false,
"editor.mouseWheelScrollSensitivity": 3,
"editor.folding": false,
"editor.glyphMargin": false,
"files.exclude": {{
"**/__pycache__": true,
"**/.github": true,
"**/.vscode": true,
"**/*.dat": true,
"**/*.h5": true,
"**/*.npy": true
}},
"editor.quickSuggestions": {{"other": false,"comments": false,"strings": false}},
"editor.trimAutoWhitespace": false,
"python.linting.pylintArgs": ["--disable=import-error"],
"python.linting.enabled": false,
"editor.lightbulb.enabled": false,
"python.languageServer": "Pylance"
}}
}}
''')
subprocess.Popen(args=f'bin\code.cmd --disable-workspace-trust --install-extension ms-python.python', cwd=self.vscode_path, shell=True, env=self.env).wait()
subprocess.Popen(args=f'bin\code.cmd --disable-workspace-trust --install-extension ms-python.vscode-pylance', cwd=self.vscode_path, shell=True, env=self.env).wait()
subprocess.Popen(args=f'bin\code.cmd --disable-workspace-trust --install-extension searking.preview-vscode', cwd=self.vscode_path, shell=True, env=self.env).wait()
def create_run_python_script(self, script_name : str, internal_relative_path : str, args_str : str):
(self.release_path / script_name).write_text(
fr"""@echo off
cd /D %~dp0
call {self.DIRNAME_INTERNAL}\setenv.bat
%PYTHONEXECUTABLE% {self.DIRNAME_INTERNAL}\{internal_relative_path} {args_str}
pause
""")
def create_internal_run_python_script(self, script_name : str, internal_relative_path : str, args_str : str):
(self.internal_path / script_name).write_text(
fr"""@echo off
cd /D %~dp0
call setenv.bat
%PYTHONEXECUTABLE% {internal_relative_path} {args_str}
pause
""")
def build_deepfacelive_windows(release_dir, cache_dir, python_ver='3.7.9', backend='cuda'):
builder = WindowsFolderBuilder(release_path=Path(release_dir),
cache_path=Path(cache_dir),
python_ver=python_ver,
clear_release_path=True)
# PIP INSTALLATIONS
builder.install_pip_package('numpy==1.21.2')
builder.install_pip_package('numexpr')
builder.install_pip_package('opencv-python==4.5.3.56')
builder.install_pip_package('opencv-contrib-python==4.5.3.56')
builder.install_pip_package('pyqt6==6.1.1')
builder.install_pip_package('onnx==1.10.1')
if backend == 'cuda':
builder.install_pip_package('torch==1.8.1+cu111 torchvision==0.9.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html')
builder.install_pip_package('onnxruntime-gpu==1.9.0')
elif backend == 'directml':
if python_ver[:3] == '3.7':
builder.install_pip_package('https://github.com/iperov/DeepFaceLive/releases/download/ort-dml/onnxruntime_directml-1.8.2-cp37-cp37m-win_amd64.whl')
else:
raise Exception(f'no onnxruntime_directml wheel for python {python_ver}')
builder.install_ffmpeg_binaries()
#
if backend == 'cuda':
print('Moving CUDA dlls from Torch to shared directory')
cuda_bin_path = builder.cuda_bin_path
torch_lib_path = builder.python_site_packages_path / 'torch' / 'lib'
for cu_file in torch_lib_path.glob("**/cu*64*.dll"):
target = cuda_bin_path / cu_file.name
print (f'Moving {target}')
shutil.move (str(cu_file), str(target) )
for file in torch_lib_path.glob("**/nvrtc*.dll"):
target = cuda_bin_path / file.name
print (f'Moving {target}')
shutil.move (str(file), str(target) )
deepfacelive_path = builder.get_internal_path() / 'DeepFaceLive'
print('Copying DeepFaceLive repository.')
builder.copyfiletree(Path(__file__).parent.parent.parent, deepfacelive_path)
builder.rmdir_in_all_subdirs(deepfacelive_path, '.git')
builder.install_vscode(project_internal_dir='DeepFaceLive')
release_path = builder.get_release_path()
userdata_path = release_path / 'userdata'
userdata_path.mkdir(parents=True, exist_ok=True)
dfm_models_path = userdata_path / 'dfm_models'
dfm_models_path.mkdir(parents=True, exist_ok=True)
print('Copying samples.')
shutil.copytree( str(Path(__file__).parent.parent / 'samples'), str(userdata_path / 'samples') )
if backend == 'cuda':
builder.create_run_python_script('DeepFaceLive.bat', 'DeepFaceLive\\main.py', 'run DeepFaceLive --userdata-dir=%~dp0userdata')
elif backend == 'directml':
builder.create_run_python_script('DeepFaceLive.bat', 'DeepFaceLive\\main.py', 'run DeepFaceLive --userdata-dir=%~dp0userdata --no-cuda')
builder.create_internal_run_python_script('build DeepFaceLive NVIDIA.bat', 'DeepFaceLive\\build\\windows\\WindowsBuilder.py', '--build-type dfl-windows --release-dir Builds\DeepFaceLive_NVIDIA --cache-dir _cache --backend cuda')
builder.create_internal_run_python_script('build DeepFaceLive DirectX12.bat', 'DeepFaceLive\\build\\windows\\WindowsBuilder.py', '--build-type dfl-windows --release-dir Builds\DeepFaceLive_DirectX12 --cache-dir _cache --backend directml')
builder.run_python('main.py dev merge_large_files --delete-parts', cwd=deepfacelive_path)
builder.cleanup()
if backend == 'cuda':
build_name = f'DeepFaceLive_NVIDIA_build_{datetime.now().strftime("%m_%d_%Y")}'
elif backend == 'directml':
build_name = f'DeepFaceLive_DirectX12_build_{datetime.now().strftime("%m_%d_%Y")}'
builder.pack_sfx_release(build_name)
class fixPathAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--build-type', required=True, choices=['dfl-windows'])
p.add_argument('--release-dir', action=fixPathAction, default=None)
p.add_argument('--cache-dir', action=fixPathAction, default=None)
p.add_argument('--python-ver', default="3.7.9")
p.add_argument('--backend', choices=['cuda', 'directml'], default='cuda')
args = p.parse_args()
if args.build_type == 'dfl-windows':
build_deepfacelive_windows(release_dir=args.release_dir,
cache_dir=args.cache_dir,
python_ver=args.python_ver,
backend=args.backend)
| 38.427817
| 242
| 0.644294
|
ef075eb3181552765348093b97f1e2c8d094f5f2
| 11,279
|
py
|
Python
|
source code/procon.py
|
kayamalab/Scratch2.0-Chaser-base
|
c2074d9a17cdfaddbbfce6b0ba4b66d66b9126f7
|
[
"MIT"
] | 1
|
2019-07-30T08:22:07.000Z
|
2019-07-30T08:22:07.000Z
|
source code/procon.py
|
kayamalab/Scratch2.0-Chaser-base
|
c2074d9a17cdfaddbbfce6b0ba4b66d66b9126f7
|
[
"MIT"
] | null | null | null |
source code/procon.py
|
kayamalab/Scratch2.0-Chaser-base
|
c2074d9a17cdfaddbbfce6b0ba4b66d66b9126f7
|
[
"MIT"
] | 1
|
2020-03-02T03:49:19.000Z
|
2020-03-02T03:49:19.000Z
|
from aiohttp import web
import pyclient as pyclient
import time
import os
import subprocess
class Scratch_ex:
###初期設定
def __init__(self):
self.matrices=0
self.gamestatus=0
self.getreadyok=0
self.port=2009
self.ip="127.0.0.1"
self.teamname="team"
self.session=pyclient.client()
self.busy=0
self.skipmessagecount=0
###関数
def direction_convert_jp(self,direction):
if direction=="up":
return "上"
elif direction=="right":
return "右"
elif direction=="left":
return "左"
elif direction=="down":
return "下"
def act(self,mode,direction):
if self.gamestatus!=1:
print("サーバに接続されていません。")
elif self.getreadyok==0:
print("準備がされていません スキップします")
else:
try:
if mode =="walk":
self.matrices=self.session.walk(direction)
elif mode=="look":
self.matrices=self.session.look(direction)
elif mode=="search":
self.matrices=self.session.search(direction)
elif mode=="put":
self.matrices=self.session.put(direction)
except :
print("コネクションが切断されました。")
print("ゲームの状態はリセットするまで2になります")
self.session=pyclient.client()
self.gamestatus=2
return
if self.matrices==0:
print("コネクションを切断します。")
print("ゲームの状態はリセットするまで2になります")
self.session=pyclient.client()
self.gamestatus=2
return
if mode =="walk":
print(self.direction_convert_jp(direction),"に移動")
elif mode=="look":
print(self.direction_convert_jp(direction),"の周りを見る")
elif mode=="search":
print(self.direction_convert_jp(direction),"の遠くを見る")
elif mode=="put":
print(self.direction_convert_jp(direction),"にブロックを設置")
self.getreadyok=0
print()
return
###変数・通信設定
async def handle_serverip(self, request):
self.busy = request.match_info['busyid']
self.ip = request.match_info['ip']
print("次のIPアドレスが設定されました",self.ip)
self.busy=0
return web.Response(text="OK")
async def handle_serverport(self, request):
self.busy = request.match_info['busyid']
self.port = request.match_info['port']
print("次のportが設定されました",self.port)
self.busy=0
return web.Response(text="OK")
async def handle_teamname(self, request):
self.busy = request.match_info['busyid']
self.teamname = request.match_info['team']
print("次のチーム名が設定されました",self.teamname)
self.busy=0
return web.Response(text="OK")
async def handle_server(self, request):
self.busy = request.match_info['busyid']
if self.gamestatus==1:
print("接続済みです。")
else:
print("IPアドレス:",self.ip," port:",self.port,"に接続します。")
print("チーム名は",self.teamname,"です。")
try:
self.session.connection_to_server(self.ip,int(self.port),self.teamname)
self.gamestatus=1
except :
print("サーバー接続に失敗しました。")
self.gamestatus=0
if self.gamestatus==1:
print("サーバーに接続しました。")
self.busy=0
return web.Response(text="OK")
async def handle_reset(self, request):
self.busy = request.match_info['busyid']
os.system('cls')
self.matrices=0
self.gamestatus=0
self.getreadyok=0
self.port=2009
self.ip="127.0.0.1"
self.teamname="team"
self.session=pyclient.client()
self.busy=0
self.skipmessagecount=0
print("リセットが完了しました。")
return web.Response(text="OK")
###スクラッチ動作部
async def handle_poll(self,request):
text = "matrices " + str(self.matrices) + "\n"
text =text+"gamestatus " + str(self.gamestatus) + "\n"
if type(self.matrices) is list:
for i in range(9):
text =text+"block"+str(i+1)+" "+str(self.matrices[i])+ "\n"
else:
for i in range(9):
text =text+"block"+str(i+1)+" "+str(0)+ "\n"
if self.busy!=0:
text = text+"_busy "+str(self.busy)
return web.Response(text=text)
async def handle_getready(self, request):
self.busy = request.match_info['busyid']
if self.gamestatus!=1:
print("サーバに接続されていません。")
elif self.getreadyok==1:
print("すでにこちらのターンです")
else :
print("準備を開始")
try:
self.matrices=self.session.get_ready()
except :
print("コネクションが切断されました。")
print("ゲームの状態はリセットするまで2になります")
self.session=pyclient.client()
self.gamestatus=2
if self.matrices==0:
print("コネクションを切断します。")
print("ゲームの状態はリセットするまで2になります")
self.gamestatus=2
self.session=pyclient.client()
self.busy=0
return web.Response(text="OK")
print("準備が完了しました。こちらのターンです。")
self.getreadyok=1
self.busy=0
return web.Response(text="OK")
async def handle_walk(self, request):
self.busy = request.match_info['busyid']
self.act("walk",request.match_info['direction'])
self.busy=0
return web.Response(text="OK")
async def handle_look(self, request):
self.busy = request.match_info['busyid']
self.act("look",request.match_info['direction'])
self.busy=0
return web.Response(text="OK")
async def handle_search(self, request):
self.busy = request.match_info['busyid']
self.act("search",request.match_info['direction'])
self.busy=0
return web.Response(text="OK")
async def handle_put(self, request):
self.busy = request.match_info['busyid']
self.act("put",request.match_info['direction'])
self.busy=0
return web.Response(text="OK")
async def handle_start_server(self,request):
self.busy = request.match_info['busyid']
try:
subprocess.Popen(r'.\Procon-Server\Procon-Server.exe')
except:
print("プロコンサーバーを見つけられませんでした。")
print(".\Procon-Server\Procon-Server.exe not find")
self.busy=0
return web.Response(text="OK")
#######help
async def handle_help(self, request):
self.busy = request.match_info['busyid']
helpmode=1
while helpmode!=0:
os.system('cls')
print("#"*30)
print("次から見たいhelpを数字で指定してください")
print()
print("get help Agent block =2")
print("set server ip =3")
print("set server port =4")
print("set team name =5")
print("connect to server =6")
print("connection reset =7")
print("getready =8")
print("walk to 向き =9")
print("look to 向き =10")
print("search to 向き =11")
print("put to 向き =12")
print("get game status =13")
print("get matrices =14")
print("1~9block data =15")
print("ヘルプを終了します =0")
temp=input("=")
if temp=="":
temp="hoge"
try:
helpmode=int(temp)
except :
print("整数を入力してください 全角ではありませんか?")
helpmode=1
if(helpmode==2):
print("helpモードを起動します")
if(helpmode==3):
print("プロコンサーバーのIPアドレスを指定します。ローカルでは[127.0.0.1]です。対戦を行う場合は変更が必要になります。")
if(helpmode==4):
print("プロコンサーバーのポート番号を指定します。coolは2009 hotは2010 が初期値です。")
if(helpmode==5):
print("プロコンサーバーに通知するチーム名を指定します。お好きにどうぞ、と言いたいところですが、日本語はうまく表示できないようです。")
if(helpmode==6):
print("設定されたIPとポートを使用し、プロコンサーバーに接続を行います。初期値が内部で設定されているため、IPとポートを設定しなくても、プロコンサーバーに接続できることがあります。")
if(helpmode==7):
print("プロコンサーバーとの接続を終了し、リセットを行います。基本的にはゲーム終了後に、この実行ファイルを再度起動する手間を省くためのものです。")
if(helpmode==8):
print("プロコンサーバーに対して、「getready」を送信します。プロコンサーバーに接続されていなければ処理はスキップされます。ターン中に実行した場合、処理をスキップする保護機能があります。")
if(helpmode==9):
print("プロコンサーバーに対して、「walk」を送信します。プロコンサーバーに接続されていなければ処理はスキップされます。ターン外に実行した場合、処理をスキップする保護機能があります。")
if(helpmode==10):
print("プロコンサーバーに対して、「look」を送信します。プロコンサーバーに接続されていなければ処理はスキップされます。ターン外に実行した場合、処理をスキップする保護機能があります。")
if(helpmode==11):
print("プロコンサーバーに対して、「search」を送信します。プロコンサーバーに接続されていなければ処理はスキップされます。ターン外に実行した場合、処理をスキップする保護機能があります。")
if(helpmode==12):
print("プロコンサーバーに対して、「put」を送信します。プロコンサーバーに接続されていなければ処理はスキップされます。ターン外に実行した場合、処理をスキップする保護機能があります。")
if(helpmode==13):
print("現在のゲームの状態が示されます。ゲーム開始前=0 ゲーム中=1 ゲーム終了=2 となります")
if(helpmode==14):
print("各行動によって得られた周辺情報が示されます。配列の形式が好きな方はこちらを利用してください。[,]も一文字となりますので、1マス目は2、5マス目は10 といったように、マス目の2倍を指定する必要があります。")
if(helpmode==15):
print("各行動によって得られた周辺情報が示されます。それぞれがマス目の状態なのでそのまま使うことができます。")
input("続けるにはなにかキーを入力してください。")
print("helpを終了します。")
self.busy=0
return web.Response(text="OK")
def main(self):
""" Main routine """
app = web.Application()
app.router.add_get('/poll', self.handle_poll)
app.router.add_get('/start_server/{busyid}', self.handle_start_server)
app.router.add_get('/server/{busyid}', self.handle_server)
app.router.add_get('/help/{busyid}', self.handle_help)
app.router.add_get('/reset/{busyid}', self.handle_reset)
app.router.add_get('/serverip/{busyid}/{ip}', self.handle_serverip)
app.router.add_get('/serverport/{busyid}/{port}', self.handle_serverport)
app.router.add_get('/teamname/{busyid}/{team}', self.handle_teamname)
app.router.add_get('/getready/{busyid}', self.handle_getready)
app.router.add_get('/walk/{busyid}/{direction}', self.handle_walk)
app.router.add_get('/look/{busyid}/{direction}', self.handle_look)
app.router.add_get('/search/{busyid}/{direction}', self.handle_search)
app.router.add_get('/put/{busyid}/{direction}', self.handle_put)
web.run_app(app, host='127.0.0.1', port=12345)
if __name__ == '__main__':
s2extest = Scratch_ex()
s2extest.main()
| 38.893103
| 129
| 0.541715
|
b9858130a8563172ee555916c580b06c99c4ccd8
| 6,400
|
py
|
Python
|
lisp_shell/kernel.py
|
lisp/lisp_kernel
|
203d77e497111ada93b185ec4c92f38b10c73f13
|
[
"BSD-3-Clause"
] | null | null | null |
lisp_shell/kernel.py
|
lisp/lisp_kernel
|
203d77e497111ada93b185ec4c92f38b10c73f13
|
[
"BSD-3-Clause"
] | null | null | null |
lisp_shell/kernel.py
|
lisp/lisp_kernel
|
203d77e497111ada93b185ec4c92f38b10c73f13
|
[
"BSD-3-Clause"
] | null | null | null |
from ipykernel.kernelbase import Kernel
from pexpect import replwrap, EOF
import pexpect
from subprocess import check_output
import os.path
import re
import signal
import syslog
import pprint
__version__ = '0.0.1'
version_pat = re.compile(r'version (\d+(\.\d+)+)')
from .images import (
extract_image_filenames, display_data_for_image, image_setup_cmd
)
# nyi : IREPLWrapper eliminated
class LispKernel(Kernel):
implementation = 'lisp_kernel'
implementation_version = __version__
@property
def language_version(self):
m = version_pat.search(self.banner)
return m.group(1)
_banner = None
@property
def banner(self):
if self._banner is None:
self._banner = check_output(['lisp', '--version']).decode('utf-8')
return self._banner
language_info = {'name': 'bash',
'codemirror_mode': 'text/x-common-lisp',
'mimetype': 'application/sparql-query',
'file_extension': '.rq'}
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_lisp()
def _start_lisp(self):
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that the child and its children are interruptible.
#syslog.openlog('kernel')
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
# Note: the next few lines mirror functionality in the
# bash() function of pexpect/replwrap.py. Look at the
# source code there for comments and context for
# understanding the code here.
self.child = pexpect.spawn('lisp', ['--eval', '(spocq.si::main-repl)', '--lispinit', '/opt/spocq/init.sxp'], echo=False,
encoding='utf-8', codec_errors='replace')
self.lispwrapper = replwrap.REPLWrapper(self.child, u'\*', None, '', '')
self.child.expect_exact('* ', -1)
finally:
signal.signal(signal.SIGINT, sig)
#@gen.coroutine
def execute_request(self, stream, ident, parent):
syslog.syslog('execute-request: ' + pprint.pformat(parent))
super(LispKernel, self).execute_request(stream, ident, parent)
def process_output(self, output):
#syslog.syslog(output)
if not self.silent:
# Send standard output
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
def run_command(self, code):
# Split up multiline commands and feed them in bit-by-bit
# in order to avoid buffer size limit
res = []
cmdlines = code.splitlines()
if not cmdlines:
raise ValueError("No command was given")
#syslog.syslog('run_command(' + code + ')')
for line in cmdlines:
#syslog.syslog('run_command.line(' + line + ')')
self.child.sendline(line)
#syslog.syslog('run_command.pad')
self.child.sendline('')
mode = self.child.expect(['\\* ', '\\d*] '], -1)
self.process_output(self.child.before)
if mode == 1 :
#syslog.syslog('run_command: abort')
self.child.sendline(':abort')
mode = self.child.expect('\\* ', -1)
self.process_output(self.child.before)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
self.silent = silent
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
#syslog.syslog('do_execute(' + code + ')')
interrupted = False
try:
# Note: timeout=None tells IREPLWrapper to do incremental
# output. Also note that the return value from
# run_command is not needed, because the output was
# already sent by IREPLWrapper.
# self.lispwrapper.run_command(code, timeout=None)
self.run_command(code)
except KeyboardInterrupt:
self.lispwrapper.child.sendintr()
interrupted = True
self.lispwrapper._expect_prompt()
output = self.lispwrapper.child.before
self.process_output(output)
except EOF:
output = self.lispwrapper.child.before + 'Restarting Lisp'
self._start_lisp()
self.process_output(output)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
#syslog.syslog('return ok')
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
def do_complete(self, code, cursor_pos):
#syslog.syslog('do_complete(' + code + ')')
code = code[:cursor_pos]
default = {'matches': [], 'cursor_start': 0,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
if not code or code[-1] == ' ':
return default
tokens = code.replace(';', ' ').split()
if not tokens:
return default
matches = []
token = tokens[-1]
start = cursor_pos - len(token)
if token[0] == '$':
# complete variables
cmd = 'compgen -A arrayvar -A export -A variable %s' % token[1:] # strip leading $
output = self.lispwrapper.run_command(cmd).rstrip()
completions = set(output.split())
# append matches including leading $
matches.extend(['$'+c for c in completions])
else:
# complete functions and builtins
cmd = 'compgen -cdfa %s' % token
output = self.lispwrapper.run_command(cmd).rstrip()
matches.extend(output.split())
if not matches:
return default
matches = [m for m in matches if m.startswith(token)]
return {'matches': sorted(matches), 'cursor_start': start,
'cursor_end': cursor_pos, 'metadata': dict(),
'status': 'ok'}
| 36.571429
| 132
| 0.583125
|
86f1ddc9f31ed3deed44c4a255ea5b894d68dfda
| 1,725
|
py
|
Python
|
pythonNotes/samplePgms/db/6_mysql-fetchall.py
|
itsraghz/TechNotes
|
1fa87ec2231fa06f8c780a5399d16a0f5c769c94
|
[
"CC0-1.0"
] | null | null | null |
pythonNotes/samplePgms/db/6_mysql-fetchall.py
|
itsraghz/TechNotes
|
1fa87ec2231fa06f8c780a5399d16a0f5c769c94
|
[
"CC0-1.0"
] | 1
|
2020-02-23T16:48:06.000Z
|
2020-02-23T16:48:06.000Z
|
pythonNotes/samplePgms/db/6_mysql-fetchall.py
|
itsraghz/TechNotes
|
1fa87ec2231fa06f8c780a5399d16a0f5c769c94
|
[
"CC0-1.0"
] | 1
|
2020-05-09T17:41:46.000Z
|
2020-05-09T17:41:46.000Z
|
# Ref URL : https://www.tutorialspoint.com/python/python_database_access.htm
# Ref URL : https://stackoverflow.com/questions/3595363/properties-file-in-python-similar-to-java-properties
# Direct URL : https://stackoverflow.com/a/8220790/1001242
# Ref URL: https://docs.python.org/3/library/configparser.html
# Ref URL : https://stackoverflow.com/a/26221097/1001242 (for properties file)
import MySQLdb
import configparser
import sys
print(f'MySQL client is imported sucessfully!')
config = configparser.RawConfigParser()
config.read('db.properties')
dbName = config.get('Database', 'dbName')
host = config.get('Database', 'host')
userName = config.get('Authentication', 'userName')
password = config.get('Authentication', 'password')
# Open database connection
db = MySQLdb.connect(host, userName, password, dbName)
print(f'Connected to MySQL DB - successfully!')
# Preparea a cursor object using cursor() method
cursor = db.cursor()
sql = "SELECT * FROM EMPLOYEE"
print(f'Query to execute : {sql}')
try:
# Execute the sql query
cursor.execute(sql)
# Fetch all the rows in a list of lists.
results = cursor.fetchall()
for row in results:
id = row[0]
emp_name = row[1]
user_id = row[2]
password = row[3]
# print(f'Employee Name : %s, User Id : %s, Password : %s' %
# (emp_name, user_id, password) % '\n')
print(
f'Employee Name : {emp_name}, User Id : {user_id}, password : {password}')
except:
# print("Error occurred. Unable to fetch data")
print(f'Exception occurred. Unable to fetch data!', sys.exc_info()[0])
raise
# close database connection
db.close()
print(f'Database connection closed successfully')
| 30.803571
| 108
| 0.690435
|
6f0a23c408381c8a4cc5b04dfcd02431bc2619d1
| 7,375
|
py
|
Python
|
gunpowder/provider_spec.py
|
riels89/gunpowder
|
e523b49ca846a9fd46ab6fc0dd1040cc4a4d53b4
|
[
"MIT"
] | null | null | null |
gunpowder/provider_spec.py
|
riels89/gunpowder
|
e523b49ca846a9fd46ab6fc0dd1040cc4a4d53b4
|
[
"MIT"
] | null | null | null |
gunpowder/provider_spec.py
|
riels89/gunpowder
|
e523b49ca846a9fd46ab6fc0dd1040cc4a4d53b4
|
[
"MIT"
] | null | null | null |
import math
from gunpowder.coordinate import Coordinate
from gunpowder.array import ArrayKey
from gunpowder.array_spec import ArraySpec
from gunpowder.graph import GraphKey
from gunpowder.graph_spec import GraphSpec
from gunpowder.roi import Roi
from .freezable import Freezable
import logging
import warnings
logger = logging.getLogger(__file__)
class ProviderSpec(Freezable):
'''A collection of (possibly partial) :class:`ArraySpecs<ArraySpec>` and
:class:`GraphSpecs<GraphSpec>` describing a
:class:`BatchProvider's<BatchProvider>` offered arrays and graphs.
This collection mimics a dictionary. Specs can be added with::
provider_spec = ProviderSpec()
provider_spec[array_key] = ArraySpec(...)
provider_spec[graph_key] = GraphSpec(...)
Here, ``array_key`` and ``graph_key`` are :class:`ArrayKey` and
:class:`GraphKey`. The specs can be queried with::
array_spec = provider_spec[array_key]
graph_spec = provider_spec[graph_key]
Furthermore, pairs of keys/values can be iterated over using
``provider_spec.items()``.
To access only array or graph specs, use the dictionaries
``provider_spec.array_specs`` or ``provider_spec.graph_specs``,
respectively.
Args:
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`):
Initial array specs.
graph_specs (``dict``, :class:`GraphKey` -> :class:`GraphSpec`):
Initial graph specs.
Attributes:
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`):
Contains all array specs contained in this provider spec.
graph_specs (``dict``, :class:`GraphKey` -> :class:`GraphSpec`):
Contains all graph specs contained in this provider spec.
'''
def __init__(self, array_specs=None, graph_specs=None, points_specs=None):
self.array_specs = {}
self.graph_specs = {}
self.freeze()
# use __setitem__ instead of copying the dicts, this ensures type tests
# are run
if array_specs is not None:
for key, spec in array_specs.items():
self[key] = spec
if graph_specs is not None:
for key, spec in graph_specs.items():
self[key] = spec
if points_specs is not None:
for key, spec in points_specs.items():
self[key] = spec
@property
def points_specs(self):
# Alias to graphs
warnings.warn(
"points_specs are depricated. Please use graph_specs", DeprecationWarning
)
return self.graph_specs
def __setitem__(self, key, spec):
assert isinstance(key, ArrayKey) or isinstance(key, GraphKey), \
f"Only ArrayKey or GraphKey (not {type(key).__name__} are " \
"allowed as key for ProviderSpec, "
if isinstance(key, ArrayKey):
if isinstance(spec, Roi):
spec = ArraySpec(roi=spec)
assert isinstance(spec, ArraySpec), \
f"Only ArraySpec (not {type(spec).__name__}) can be set for " \
"ArrayKey"
self.array_specs[key] = spec.copy()
else:
if isinstance(spec, Roi):
spec = GraphSpec(roi=spec)
assert isinstance(spec, GraphSpec), \
f"Only GraphSpec (not {type(spec).__name__}) can be set for " \
"GraphKey"
self.graph_specs[key] = spec.copy()
def __getitem__(self, key):
if isinstance(key, ArrayKey):
return self.array_specs[key]
elif isinstance(key, GraphKey):
return self.graph_specs[key]
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s."%type(self).__name__)
def __len__(self):
return len(self.array_specs) + len(self.graph_specs)
def __contains__(self, key):
if isinstance(key, ArrayKey):
return key in self.array_specs
elif isinstance(key, GraphKey):
return key in self.graph_specs
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s."%type(self).__name__)
def __delitem__(self, key):
if isinstance(key, ArrayKey):
del self.array_specs[key]
elif isinstance(key, GraphKey):
del self.graph_specs[key]
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s."%type(self).__name__)
def items(self):
'''Provides a generator iterating over key/value pairs.'''
for (k, v) in self.array_specs.items():
yield k, v
for (k, v) in self.graph_specs.items():
yield k, v
def get_total_roi(self):
'''Get the union of all the ROIs.'''
total_roi = None
for specs_type in [self.array_specs, self.graph_specs]:
for (_, spec) in specs_type.items():
if total_roi is None:
total_roi = spec.roi
elif spec.roi is not None:
total_roi = total_roi.union(spec.roi)
return total_roi
def get_common_roi(self):
'''Get the intersection of all the requested ROIs.'''
common_roi = None
for specs_type in [self.array_specs, self.graph_specs]:
for (_, spec) in specs_type.items():
if common_roi is None:
common_roi = spec.roi
else:
common_roi = common_roi.intersect(spec.roi)
return common_roi
def get_lcm_voxel_size(self, array_keys=None):
'''Get the least common multiple of the voxel sizes in this spec.
Args:
array_keys (list of :class:`ArrayKey`, optional): If given,
consider only the given array types.
'''
if array_keys is None:
array_keys = self.array_specs.keys()
if not array_keys:
raise RuntimeError("Can not compute lcm voxel size -- there are "
"no array specs in this provider spec.")
else:
if not array_keys:
raise RuntimeError("Can not compute lcm voxel size -- list of "
"given array specs is empty.")
lcm_voxel_size = None
for key in array_keys:
voxel_size = self.array_specs[key].voxel_size
if voxel_size is None:
continue
if lcm_voxel_size is None:
lcm_voxel_size = voxel_size
else:
lcm_voxel_size = Coordinate(
(a * b // math.gcd(a, b)
for a, b in zip(lcm_voxel_size, voxel_size)))
return lcm_voxel_size
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __repr__(self):
r = "\n"
for (key, spec) in self.items():
r += "\t%s: %s\n"%(key, spec)
return r
| 30.475207
| 85
| 0.582915
|
1af6780a23f278a8683cfd6e26da2d59f0abd4d0
| 789
|
py
|
Python
|
remote_data/update.py
|
Erhannis/hackerskeyboard
|
beb8c6b3b34d1b317662aa4b4050943909cb0855
|
[
"Apache-2.0"
] | null | null | null |
remote_data/update.py
|
Erhannis/hackerskeyboard
|
beb8c6b3b34d1b317662aa4b4050943909cb0855
|
[
"Apache-2.0"
] | null | null | null |
remote_data/update.py
|
Erhannis/hackerskeyboard
|
beb8c6b3b34d1b317662aa4b4050943909cb0855
|
[
"Apache-2.0"
] | null | null | null |
# Prepare the installable schemata list
# Read each URL and download the IMDF file, and add to the downloadable.json file
# Set homeUrl to the directory that contains the IMDF file.
import requests
import json
listFile = "imdf_list.in"
jsonFile = "downloadable.json"
def appendEntry(jsonOut, url):
# read url
# change homeUrl
# serialize as json
print(url)
r = requests.get(url)
imdf = json.loads(r.content)
pos = url.rfind("/")
imdf["homeUrl"] = url[:pos+1]
jsonList.append(imdf)
jsonList = []
with open(listFile, "r") as file:
for line in file:
line = line.strip()
appendEntry(jsonList, line)
jsonOut = open(jsonFile, "w")
jsonOut.write(json.dumps(jsonList, indent=4, separators=(',', ': ')))
| 24.65625
| 82
| 0.642586
|
96d93408bc24f38ce8c6e13ac1426f6c2465b173
| 50
|
py
|
Python
|
virtual/lib/python3.8/site-packages/bootstrap4/models.py
|
ShirQUillE-SandE/the-neighborhood-101
|
fda09cb0481d1cd902f5e13b7ed61ed96772121d
|
[
"MIT"
] | 1,810
|
2015-01-01T02:04:55.000Z
|
2020-06-15T01:01:06.000Z
|
virtual/lib/python3.6/site-packages/bootstrap4/models.py
|
kahenya-anita/Insta-Clone
|
4894e959c17170505e73aee6dc497aeb29d55a71
|
[
"MIT"
] | 298
|
2017-05-07T15:20:09.000Z
|
2022-03-28T09:01:42.000Z
|
virtual/lib/python3.6/site-packages/bootstrap4/models.py
|
kahenya-anita/Insta-Clone
|
4894e959c17170505e73aee6dc497aeb29d55a71
|
[
"MIT"
] | 752
|
2015-01-05T01:27:20.000Z
|
2020-05-25T02:48:35.000Z
|
# Empty models.py, required file for Django tests
| 25
| 49
| 0.78
|
ea300ccc4ba3361e4c0fe9fcdef1a48430ddf4f7
| 1,820
|
py
|
Python
|
qa/rpc-tests/disablewallet.py
|
bitmammoth/kekcoin-segwit
|
ac5ed108fbe3221a68826108bdc5bef725f98849
|
[
"MIT"
] | 7
|
2018-01-29T18:47:19.000Z
|
2019-06-24T13:48:10.000Z
|
qa/rpc-tests/disablewallet.py
|
bitmammoth/kekcoin-segwit
|
ac5ed108fbe3221a68826108bdc5bef725f98849
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/disablewallet.py
|
bitmammoth/kekcoin-segwit
|
ac5ed108fbe3221a68826108bdc5bef725f98849
|
[
"MIT"
] | 17
|
2017-11-20T21:20:03.000Z
|
2021-01-31T12:43:54.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise API with -disablewallet.
#
from test_framework.test_framework import KekCoinTestFramework
from test_framework.util import *
class DisableWalletTest (KekCoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
self.is_network_split = False
self.sync_all()
def run_test (self):
# Check regression: https://github.com/kekcoin/kekcoin/issues/6963#issuecomment-154548880
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet
try:
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
except JSONRPCException as e:
assert("Invalid address" not in e.error['message'])
assert("ProcessNewBlock, block not accepted" not in e.error['message'])
assert("Couldn't create new block" not in e.error['message'])
try:
self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
raise AssertionError("Must not mine to invalid address!")
except JSONRPCException as e:
assert("Invalid address" in e.error['message'])
if __name__ == '__main__':
DisableWalletTest ().main ()
| 37.142857
| 97
| 0.682967
|
6b0bee98a361b1d01fa29e833cb29f653168c4a4
| 510
|
py
|
Python
|
mt/migrations/0012_auto_20190606_1531.py
|
JahanviNShah/inmt
|
730faf6dcd24536bbffd0ec463c61d3b0a7819cd
|
[
"MIT"
] | 39
|
2020-04-14T09:41:41.000Z
|
2022-02-21T14:32:06.000Z
|
mt/migrations/0012_auto_20190606_1531.py
|
JahanviNShah/inmt
|
730faf6dcd24536bbffd0ec463c61d3b0a7819cd
|
[
"MIT"
] | 17
|
2020-05-11T23:44:57.000Z
|
2021-02-17T10:53:15.000Z
|
mt/migrations/0012_auto_20190606_1531.py
|
JahanviNShah/inmt
|
730faf6dcd24536bbffd0ec463c61d3b0a7819cd
|
[
"MIT"
] | 20
|
2020-04-14T11:22:43.000Z
|
2021-11-25T03:57:21.000Z
|
# Generated by Django 2.2 on 2019-06-06 10:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mt', '0011_corpus_helpprovision'),
]
operations = [
migrations.AlterField(
model_name='dockeystroke',
name='translatedSet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dockeystroke', to='mt.translatedSet'),
),
]
| 25.5
| 133
| 0.658824
|
4e6c6014734e188c165a9aa73d4bd9f1bbdd372f
| 662
|
py
|
Python
|
hwilib/device_ids.py
|
gabridome/HWI
|
21e4122636497e46a8d5d16c3ee01b25a5238aa8
|
[
"MIT"
] | null | null | null |
hwilib/device_ids.py
|
gabridome/HWI
|
21e4122636497e46a8d5d16c3ee01b25a5238aa8
|
[
"MIT"
] | null | null | null |
hwilib/device_ids.py
|
gabridome/HWI
|
21e4122636497e46a8d5d16c3ee01b25a5238aa8
|
[
"MIT"
] | null | null | null |
# Contains the arrays of device IDs. This is to avoid TREZOR and KeepKey
# library incompatibility.
ledger_device_ids = [
(0x2581, 0x2b7c),
(0x2581, 0x3b7c),
(0x2581, 0x4b7c),
(0x2c97, 0x0001),
(0x2581, 0x1807)]
digitalbitbox_device_ids = [
(0x03eb, 0x2402)
]
trezor_device_ids = [
(0x534c, 0x0001), # TREZOR
(0x1209, 0x53c0), # TREZORv2 Bootloader
(0x1209, 0x53c1), # TREZORv2
]
keepkey_device_ids = [
(0x2B24, 0x0001), # KeepKey
]
coldcard_device_ids = [
(0xd13e, 0xcc10),
]
| 23.642857
| 72
| 0.521148
|
ad4869f9eaedafea590b27ab94dc75072c6f41b8
| 14,456
|
py
|
Python
|
kubernetes/client/models/v1_stateful_set_status.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | 1
|
2022-02-22T23:10:55.000Z
|
2022-02-22T23:10:55.000Z
|
kubernetes/client/models/v1_stateful_set_status.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | 6
|
2021-09-13T19:03:02.000Z
|
2022-03-16T18:56:42.000Z
|
kubernetes/client/models/v1_stateful_set_status.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1StatefulSetStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available_replicas': 'int',
'collision_count': 'int',
'conditions': 'list[V1StatefulSetCondition]',
'current_replicas': 'int',
'current_revision': 'str',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int',
'update_revision': 'str',
'updated_replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'collision_count': 'collisionCount',
'conditions': 'conditions',
'current_replicas': 'currentReplicas',
'current_revision': 'currentRevision',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas',
'update_revision': 'updateRevision',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, available_replicas=None, collision_count=None, conditions=None, current_replicas=None, current_revision=None, observed_generation=None, ready_replicas=None, replicas=None, update_revision=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSetStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._available_replicas = None
self._collision_count = None
self._conditions = None
self._current_replicas = None
self._current_revision = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self._update_revision = None
self._updated_replicas = None
self.discriminator = None
self.available_replicas = available_replicas
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
if current_replicas is not None:
self.current_replicas = current_replicas
if current_revision is not None:
self.current_revision = current_revision
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
self.replicas = replicas
if update_revision is not None:
self.update_revision = update_revision
if updated_replicas is not None:
self.updated_replicas = updated_replicas
@property
def available_replicas(self):
"""Gets the available_replicas of this V1StatefulSetStatus. # noqa: E501
Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. # noqa: E501
:return: The available_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""Sets the available_replicas of this V1StatefulSetStatus.
Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. # noqa: E501
:param available_replicas: The available_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and available_replicas is None: # noqa: E501
raise ValueError("Invalid value for `available_replicas`, must not be `None`") # noqa: E501
self._available_replicas = available_replicas
@property
def collision_count(self):
"""Gets the collision_count of this V1StatefulSetStatus. # noqa: E501
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:return: The collision_count of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this V1StatefulSetStatus.
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:param collision_count: The collision_count of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this V1StatefulSetStatus. # noqa: E501
Represents the latest available observations of a statefulset's current state. # noqa: E501
:return: The conditions of this V1StatefulSetStatus. # noqa: E501
:rtype: list[V1StatefulSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1StatefulSetStatus.
Represents the latest available observations of a statefulset's current state. # noqa: E501
:param conditions: The conditions of this V1StatefulSetStatus. # noqa: E501
:type: list[V1StatefulSetCondition]
"""
self._conditions = conditions
@property
def current_replicas(self):
"""Gets the current_replicas of this V1StatefulSetStatus. # noqa: E501
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:return: The current_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._current_replicas
@current_replicas.setter
def current_replicas(self, current_replicas):
"""Sets the current_replicas of this V1StatefulSetStatus.
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:param current_replicas: The current_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._current_replicas = current_replicas
@property
def current_revision(self):
"""Gets the current_revision of this V1StatefulSetStatus. # noqa: E501
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:return: The current_revision of this V1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._current_revision
@current_revision.setter
def current_revision(self, current_revision):
"""Sets the current_revision of this V1StatefulSetStatus.
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:param current_revision: The current_revision of this V1StatefulSetStatus. # noqa: E501
:type: str
"""
self._current_revision = current_revision
@property
def observed_generation(self):
"""Gets the observed_generation of this V1StatefulSetStatus. # noqa: E501
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:return: The observed_generation of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1StatefulSetStatus.
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:param observed_generation: The observed_generation of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""Gets the ready_replicas of this V1StatefulSetStatus. # noqa: E501
readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. # noqa: E501
:return: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""Sets the ready_replicas of this V1StatefulSetStatus.
readyReplicas is the number of pods created for this StatefulSet with a Ready Condition. # noqa: E501
:param ready_replicas: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""Gets the replicas of this V1StatefulSetStatus. # noqa: E501
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:return: The replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1StatefulSetStatus.
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:param replicas: The replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and replicas is None: # noqa: E501
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def update_revision(self):
"""Gets the update_revision of this V1StatefulSetStatus. # noqa: E501
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:return: The update_revision of this V1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._update_revision
@update_revision.setter
def update_revision(self, update_revision):
"""Sets the update_revision of this V1StatefulSetStatus.
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:param update_revision: The update_revision of this V1StatefulSetStatus. # noqa: E501
:type: str
"""
self._update_revision = update_revision
@property
def updated_replicas(self):
"""Gets the updated_replicas of this V1StatefulSetStatus. # noqa: E501
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:return: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this V1StatefulSetStatus.
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:param updated_replicas: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSetStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSetStatus):
return True
return self.to_dict() != other.to_dict()
| 38.344828
| 285
| 0.672454
|
a1739e0998979b4cc02ef9839e5ef1ae64eb5f7c
| 12,164
|
py
|
Python
|
steady_state_analyzer.py
|
javierron/ChaosETH
|
02d145d2452fb4989c336a531e084aa501320ab7
|
[
"MIT"
] | null | null | null |
steady_state_analyzer.py
|
javierron/ChaosETH
|
02d145d2452fb4989c336a531e084aa501320ab7
|
[
"MIT"
] | null | null | null |
steady_state_analyzer.py
|
javierron/ChaosETH
|
02d145d2452fb4989c336a531e084aa501320ab7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Filename: steady_state_analyzer.py
import csv, requests, sys, argparse, time, calendar, json, numpy
import iso8601
from datetime import datetime
from prettytable import PrettyTable
import logging
def get_args():
parser = argparse.ArgumentParser(
description="Infer an Ethereum client's steady state and error models.")
parser.add_argument("--host", required = True,
help = "URL to the prometheus database, e.g. http://prometheus:9090")
parser.add_argument("--start", required = True,
help = "starting timepoint in rfc3339 or unix_timestamp")
parser.add_argument("--end", required = True,
help = "starting timepoint in rfc3339 or unix_timestamp")
parser.add_argument("-s", "--step", default = "15s",
help = "query step in seconds, default: 15s")
parser.add_argument("--output_query", default = "query_results.json",
help = "a json file name that saves query results, default: query_results.json")
parser.add_argument("--output_models", default = "error_models.json",
help = "a json file name that saves query results, default: error_models.json")
parser.add_argument("--from_json",
help = "generate steady state and error models from a json file that contains the metrics")
args = parser.parse_args()
return args
def dump_query_results(filename, error_list):
with open(filename, "wt") as output:
json.dump(error_list, output, indent = 2)
def read_query_results(filename):
with open(filename, "rt") as json_file:
data = json.load(json_file)
return data
def calculate_stats(values):
values = numpy.array(values).astype(float)
min_value = numpy.percentile(values, 5, axis=0)[1] # in the values array, index 0: timestamp, index 1: failure rate
mean_value = numpy.mean(values, axis=0)[1]
max_value = numpy.percentile(values, 95, axis=0)[1]
variance = numpy.var(values, axis=0)[1]
return min_value, mean_value, max_value, variance
def query_total_invocations(prometheus_url, syscall_name, error_code, start_time, end_time, step):
range_query_api = "/api/v1/query_range"
if error_code == "":
query_string = 'sum(failed_syscalls_total{syscall_name="%s"})'%(syscall_name)
else:
query_string = 'failed_syscalls_total{syscall_name="%s", error_code="%s"}'%(syscall_name, error_code)
response = requests.post(prometheus_url + range_query_api, data={'query': query_string, 'start': start_time, 'end': end_time, 'step': step})
status = response.json()["status"]
if status == "error":
logging.error(response.json())
total = -1
else:
if len(response.json()['data']['result']) == 0:
total = 0
else:
results = response.json()['data']['result'][0]
# https://stackoverflow.com/questions/1941927/convert-an-rfc-3339-time-to-a-standard-python-timestamp
start_datetime = iso8601.parse_date(start_time)
start_timestamp = calendar.timegm(start_datetime.utctimetuple())
if results["values"][0][0] > start_timestamp:
# the first failure happened after start_time
total = int(results["values"][-1][1])
else:
total = int(results["values"][-1][1]) - int(results["values"][0][1])
return total
def query_syscall_errors(prometheus_url, start_time, end_time, step):
range_query_api = "/api/v1/query_range"
error_list = list()
syscall_type = list()
query_string = 'syscalls_failure_rate'
response = requests.post(prometheus_url + range_query_api, data={'query': query_string, 'start': start_time, 'end': end_time, 'step': step})
status = response.json()["status"]
if status == "error":
logging.error(response.json())
sys.exit(2)
results = response.json()['data']['result']
for entry in results:
if entry["metric"]["error_code"].startswith("-"): continue
if len(entry["values"]) == 1:
samples = [{"timestamp": entry["values"][0][0], "failure_rate": float(entry["values"][0][1])}] # only one case
else:
samples = [
{"timestamp": entry["values"][0][0], "failure_rate": float(entry["values"][0][1])}, # first case
{"timestamp": entry["values"][-1][0], "failure_rate": float(entry["values"][-1][1])} # last case
]
min_value, mean_value, max_value, variance = calculate_stats(entry["values"])
error_list.append({
"syscall_name": entry["metric"]["syscall_name"],
"error_code": entry["metric"]["error_code"],
"samples_in_total": len(entry["values"]),
"invocations_in_total": query_total_invocations(prometheus_url, entry["metric"]["syscall_name"], entry["metric"]["error_code"], start_time, end_time, step),
"rate_min": min_value,
"rate_mean": mean_value,
"rate_max": max_value,
"variance": variance,
"samples": samples,
"data_points": entry["values"]
})
if entry["metric"]["syscall_name"] not in syscall_type:
error_list.append({
"syscall_name": entry["metric"]["syscall_name"],
"error_code": "SUCCESS",
"invocations_in_total": query_total_invocations(prometheus_url, entry["metric"]["syscall_name"], "SUCCESS", start_time, end_time, step),
})
syscall_type.append(entry["metric"]["syscall_name"])
return error_list
def query_metrics(prometheus_url, start_time, end_time, step):
range_query_api = "/api/v1/query_range"
# metric_name: query_string
metrics = {
"dir_read_c": "dir_reads_total",
"dir_write_c": "dir_writes_total",
"dir_reads": "dir_reads_kb*1024",
"dir_writes": "dir_writes_kb*1024",
"tcp_conn": "sum(tcp_connections_total)",
"tcp_sends": "sum(tcp_sends_kb*1024)",
"tcp_recvs": "sum(tcp_recvs_kb*1024)"
}
query_results = list()
for metric_name, query_string in metrics.items():
response = requests.post(prometheus_url + range_query_api, data={'query': query_string, 'start': start_time, 'end': end_time, 'step': step})
status = response.json()["status"]
if status == "error":
logging.error(response.json())
continue
results = response.json()['data']['result'][0]
min_value, mean_value, max_value, variance = calculate_stats(results["values"])
query_results.append({
"metric_name": metric_name,
"stat": {
"min": min_value,
"mean": mean_value,
"max": max_value,
"variance": variance
},
"data_points": results["values"]
})
return query_results
def infer_steady_state(host, start_time, end_time, step):
error_list = query_syscall_errors(host, start_time, end_time, step)
other_metrics = query_metrics(host, start_time, end_time, step)
return {"syscall_errors": error_list, "other_metrics": other_metrics}
def pretty_print_metrics(metrics):
stat_table = PrettyTable()
stat_table.field_names = ["Metric Name", "Min", "Mean", "Max", "Variance"]
for metric in metrics:
stat_table.add_row([metric["metric_name"], metric["stat"]["min"], metric["stat"]["mean"], metric["stat"]["max"], metric["stat"]["variance"]])
stat_table.sortby = "Metric Name"
print(stat_table)
def pretty_print_syscall_errors(error_list):
stat_table = PrettyTable()
stat_table.field_names = ["Syscall Name", "Error Code", "Samples in Total", "Invocations in Total", "Failure Rate", "Variance"]
tmp_success_count = dict()
for detail in error_list:
if detail["error_code"].startswith("-"):
error_code = int(detail["error_code"])
if error_code <= -1E10:
if detail["syscall_name"] not in tmp_success_count: tmp_success_count[detail["syscall_name"]] = 0
tmp_success_count[detail["syscall_name"]] = tmp_success_count[detail["syscall_name"]] + detail["invocations_in_total"]
for detail in error_list:
if detail["error_code"].startswith("-"): continue
if detail["error_code"] == "SUCCESS":
stat_table.add_row([detail["syscall_name"], detail["error_code"], "-", detail["invocations_in_total"] + tmp_success_count[detail["syscall_name"]] if detail["syscall_name"] in tmp_success_count else detail["invocations_in_total"], "-", "-"])
else:
stat_table.add_row([detail["syscall_name"], detail["error_code"], detail["samples_in_total"], detail["invocations_in_total"], "%f, %f, %f"%(detail["rate_min"], detail["rate_mean"], detail["rate_max"]), detail["variance"]])
stat_table.sortby = "Syscall Name"
print(stat_table)
def generate_experiment(syscall_name, error_code, failure_rate, ori_min_rate, ori_mean_rate, ori_max_rate, duration):
result = {
"syscall_name": syscall_name,
"error_code": "-%s"%error_code,
"failure_rate": failure_rate,
"original_min_rate": ori_min_rate,
"original_mean_rate": ori_mean_rate,
"original_max_rate": ori_max_rate,
"experiment_duration": duration
}
return result
def generate_experiment_config(args, error_list):
start = args.start
end = args.end
output_file = args.output_models
config = {
"experiment_name": "ChaosETH Experiment Error Models",
"experiment_description": "Automatically generated based on monitoring data from %s to %s"%(start, end),
"experiments": []
}
factor = 1.2
duration = 300
for detail in error_list:
if "unknown" in detail["syscall_name"]: continue
if detail["error_code"] == "SUCCESS": continue
if detail["error_code"].startswith("-"): continue
if detail["rate_max"] < 0.05:
# the original failure rate is very low, thus we use fixed rate instead
config["experiments"].append(generate_experiment(detail["syscall_name"], detail["error_code"], 0.05, detail["rate_min"], detail["rate_mean"], detail["rate_max"], duration))
elif detail["rate_max"] / detail["rate_min"] > 10:
# the original failure rate fluctuated wildly, we keep using the max failure rate
config["experiments"].append(generate_experiment(detail["syscall_name"], detail["error_code"], detail["rate_max"], detail["rate_min"], detail["rate_mean"], detail["rate_max"], duration))
else:
# if the original failure rate is relatively high, and it does not fluctuate a lot
# we amplify it by multiplying the factor
amplified = detail["rate_max"] * factor
if amplified > 1: amplified = 1
config["experiments"].append(generate_experiment(detail["syscall_name"], detail["error_code"], amplified, detail["rate_min"], detail["rate_mean"], detail["rate_max"], duration))
with open(output_file, "wt") as output:
json.dump(config, output, indent = 2)
def main(args):
if args.from_json != None:
error_list = read_query_results(args.from_json)
pretty_print_syscall_errors(error_list)
generate_experiment_config(args, error_list)
else:
steady_state = infer_steady_state(args.host, args.start, args.end, args.step)
pretty_print_syscall_errors(steady_state["syscall_errors"])
generate_experiment_config(args, steady_state["syscall_errors"])
pretty_print_metrics(steady_state["other_metrics"])
dump_query_results(args.output_query, steady_state)
if __name__ == "__main__":
logger_format = '%(asctime)-15s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=logger_format)
args = get_args()
main(args)
| 47.330739
| 253
| 0.636139
|
68692c83b1eb213d52d51f8dfd6bdda12f8a6400
| 1,429
|
py
|
Python
|
PYTHON/pythonDesafios/desafio095.py
|
Santos1000/Curso-Python
|
549223a1633f6f619c87554dd8078cf7841bb1df
|
[
"MIT"
] | null | null | null |
PYTHON/pythonDesafios/desafio095.py
|
Santos1000/Curso-Python
|
549223a1633f6f619c87554dd8078cf7841bb1df
|
[
"MIT"
] | null | null | null |
PYTHON/pythonDesafios/desafio095.py
|
Santos1000/Curso-Python
|
549223a1633f6f619c87554dd8078cf7841bb1df
|
[
"MIT"
] | null | null | null |
gols = list() # INPUT DE DADOS, FORMATAÇAO E SELECAO DE OBJETO DE LISTA
galera = list()
dados = dict()
while True:
dados.clear()
dados['nome'] = str(input('Nome do jogador(a): '))
dados['partidas'] = int(input(f'Quantas partidas {dados["nome"]} fez: '))
gols.clear()
for n in range(1, dados['partidas'] + 1):
gols.append(int(input(f'Quantos gols na partida {n}: ')))
dados['Gols'] = gols[:]
dados['total'] = sum( gols )
galera.append(dados.copy())
while True:
resp = str(input('Quer continuar: [s;n]')).strip().upper()
if resp in 'SN':
break
print('ERRO! Responda apenas S ou N.')
if resp in 'N':
break
print('==='*20)
print('Cod: ', end='')
for i in dados.keys():
print(f'{i:<15}', end='')
print()
print('==='*20)
for k, v in enumerate(galera):
print(f'{k:>3} ', end='')
for d in v.values():
print(f'{str(d):<15}', end='')
print()
print()
print('==='*20)
while True:
busca = int(input('Mostrar dados de qual jogaror(a):(999 para parar)'))
if busca == 999:
break
if busca >= len(galera):
print(f'Erro ! Não existe jogador(a) com código {busca}.')
else:
print(f' --- LEVANTAMENTO DO JOGADOR {galera[busca]["nome"]}:')
for i, g in enumerate(galera[busca]['Gols']):
print(f' No jogo {i} fez {g} gols')
print('--'*40)
print('<< VOLTE SEMPRE >>')
| 31.065217
| 77
| 0.550735
|
06e5789eb3ea558e6f5f48b67150ca206be1b83a
| 276
|
py
|
Python
|
application/errors.py
|
ChrisRx/flask-base
|
cb6a48aa4ddd1b89d2bcc1c34d327a94ce4bad6d
|
[
"MIT"
] | 1
|
2015-04-22T00:10:51.000Z
|
2015-04-22T00:10:51.000Z
|
application/errors.py
|
ChrisRx/flask-base
|
cb6a48aa4ddd1b89d2bcc1c34d327a94ce4bad6d
|
[
"MIT"
] | null | null | null |
application/errors.py
|
ChrisRx/flask-base
|
cb6a48aa4ddd1b89d2bcc1c34d327a94ce4bad6d
|
[
"MIT"
] | null | null | null |
class GeneralError(Exception): pass
class ProcessingError(GeneralError): pass
class DatabaseError(GeneralError): pass
class UploadError(GeneralError): pass
class FatalError(Exception): pass
class StartupError(FatalError): pass
class CredentialsNotFound(FatalError): pass
| 39.428571
| 43
| 0.82971
|
90aea98258726efb55e128b22611229c5aa5cbd4
| 211
|
py
|
Python
|
location.py
|
AnelMusic/fastapi_communication_example
|
4f9012fcbf7cf32847d1c771a835723ad80ebe9f
|
[
"MIT"
] | null | null | null |
location.py
|
AnelMusic/fastapi_communication_example
|
4f9012fcbf7cf32847d1c771a835723ad80ebe9f
|
[
"MIT"
] | null | null | null |
location.py
|
AnelMusic/fastapi_communication_example
|
4f9012fcbf7cf32847d1c771a835723ad80ebe9f
|
[
"MIT"
] | 1
|
2022-02-10T14:10:52.000Z
|
2022-02-10T14:10:52.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 31 22:01:47 2021
@author: anelmusic
"""
from pydantic import BaseModel
class Location(BaseModel):
continent: str
capital: str
| 15.071429
| 35
| 0.658768
|
d722d33b4b7161fd58cb7b5a1a21ff9ba75b7f65
| 7,465
|
py
|
Python
|
src/dispatch/database.py
|
Liamvdv/dispatch
|
6c1c9cf46bdba468fa48fb66ffffd6b7356e8de5
|
[
"Apache-2.0"
] | 1
|
2020-11-19T08:37:05.000Z
|
2020-11-19T08:37:05.000Z
|
src/dispatch/database.py
|
segmond/dispatch
|
ab69505334d4689537c7ea0c4d66f6438a6c33b5
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/database.py
|
segmond/dispatch
|
ab69505334d4689537c7ea0c4d66f6438a6c33b5
|
[
"Apache-2.0"
] | null | null | null |
import re
import logging
import json
from typing import Any, List
from itertools import groupby
import functools
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import Query, sessionmaker
from sqlalchemy_filters import apply_pagination, apply_sort, apply_filters
from sqlalchemy_searchable import make_searchable
from sqlalchemy_searchable import search as search_db
from starlette.requests import Request
from dispatch.common.utils.composite_search import CompositeSearch
from dispatch.enums import Visibility, UserRoles
from .config import SQLALCHEMY_DATABASE_URI
log = logging.getLogger(__file__)
engine = create_engine(str(SQLALCHEMY_DATABASE_URI))
SessionLocal = sessionmaker(bind=engine)
def resolve_table_name(name):
"""Resolves table names to their mapped names."""
names = re.split("(?=[A-Z])", name) # noqa
return "_".join([x.lower() for x in names if x])
raise_attribute_error = object()
def resolve_attr(obj, attr, default=None):
"""Attempts to access attr via dotted notation, returns none if attr does not exist."""
try:
return functools.reduce(getattr, attr.split("."), obj)
except AttributeError:
return default
class CustomBase:
@declared_attr
def __tablename__(self):
return resolve_table_name(self.__name__)
Base = declarative_base(cls=CustomBase)
make_searchable(Base.metadata)
def get_db(request: Request):
return request.state.db
def get_model_name_by_tablename(table_fullname: str) -> str:
"""Returns the model name of a given table."""
return get_class_by_tablename(table_fullname=table_fullname).__name__
def get_class_by_tablename(table_fullname: str) -> Any:
"""Return class reference mapped to table."""
mapped_name = resolve_table_name(table_fullname)
for c in Base._decl_class_registry.values():
if hasattr(c, "__table__"):
if c.__table__.fullname.lower() == mapped_name.lower():
return c
raise Exception(f"Incorrect tablename '{mapped_name}'. Check the name of your model.")
def paginate(query: Query, page: int, items_per_page: int):
# Never pass a negative OFFSET value to SQL.
offset_adj = 0 if page <= 0 else page - 1
items = query.limit(items_per_page).offset(offset_adj * items_per_page).all()
total = query.order_by(None).count()
return items, total
def composite_search(*, db_session, query_str: str, models: List[Base]):
"""Perform a multi-table search based on the supplied query."""
s = CompositeSearch(db_session, models)
q = s.build_query(query_str, sort=True)
return s.search(query=q)
def search(*, db_session, query_str: str, model: str):
"""Perform a search based on the query."""
q = db_session.query(get_class_by_tablename(model))
return search_db(q, query_str, sort=True)
def create_filter_spec(model, fields, ops, values, user_role):
"""Creates a filter spec."""
filters = []
if fields and ops and values:
for field, op, value in zip(fields, ops, values):
if "." in field:
complex_model, complex_field = field.split(".")
filters.append(
{
"model": get_model_name_by_tablename(complex_model),
"field": complex_field,
"op": op,
"value": value,
}
)
else:
filters.append({"model": model, "field": field, "op": op, "value": value})
filter_spec = []
# group by field (or for same fields and for different fields)
data = sorted(filters, key=lambda x: x["model"])
for k, g in groupby(data, key=lambda x: x["model"]):
# force 'and' for operations other than equality
filters = list(g)
force_and = False
for f in filters:
if ">" in f["op"] or "<" in f["op"]:
force_and = True
if force_and:
filter_spec.append({"and": filters})
else:
filter_spec.append({"or": filters})
# add admin only filter
if user_role != UserRoles.admin:
# add support for filtering restricted incidents
if model.lower() == "incident":
filter_spec.append(
{
"model": model,
"field": "visibility",
"op": "!=",
"value": Visibility.restricted,
}
)
if filter_spec:
filter_spec = {"and": filter_spec}
log.debug(f"Filter Spec: {json.dumps(filter_spec, indent=2)}")
return filter_spec
def create_sort_spec(model, sort_by, descending):
"""Creates sort_spec."""
sort_spec = []
if sort_by and descending:
for field, direction in zip(sort_by, descending):
direction = "desc" if direction else "asc"
# we have a complex field, we may need to join
if "." in field:
complex_model, complex_field = field.split(".")[-2:]
sort_spec.append(
{
"model": get_model_name_by_tablename(complex_model),
"field": complex_field,
"direction": direction,
}
)
else:
sort_spec.append({"model": model, "field": field, "direction": direction})
log.debug(f"Sort Spec: {json.dumps(sort_spec, indent=2)}")
return sort_spec
def get_all(*, db_session, model):
"""Fetches a query object based on the model class name."""
return db_session.query(get_class_by_tablename(model))
def join_required_attrs(query, model, join_attrs, fields, sort_by):
"""Determines which attrs (if any) require a join."""
all_fields = list(set(fields + sort_by))
if not join_attrs:
return query
for field, attr in join_attrs:
# sometimes fields have attributes e.g. "incident_type.id"
for f in all_fields:
if field in f:
query = query.join(getattr(model, attr))
return query
def search_filter_sort_paginate(
db_session,
model,
query_str: str = None,
page: int = 1,
items_per_page: int = 5,
sort_by: List[str] = None,
descending: List[bool] = None,
fields: List[str] = None,
ops: List[str] = None,
values: List[str] = None,
join_attrs: List[str] = None,
user_role: UserRoles = UserRoles.user,
):
"""Common functionality for searching, filtering and sorting"""
model_cls = get_class_by_tablename(model)
if query_str:
query = search(db_session=db_session, query_str=query_str, model=model)
else:
query = db_session.query(model_cls)
query = join_required_attrs(query, model_cls, join_attrs, fields, sort_by)
filter_spec = create_filter_spec(model, fields, ops, values, user_role)
query = apply_filters(query, filter_spec)
sort_spec = create_sort_spec(model, sort_by, descending)
query = apply_sort(query, sort_spec)
if items_per_page == -1:
items_per_page = None
query, pagination = apply_pagination(query, page_number=page, page_size=items_per_page)
return {
"items": query.all(),
"itemsPerPage": pagination.page_size,
"page": pagination.page_number,
"total": pagination.total_results,
}
| 31.901709
| 91
| 0.636035
|
65724bacbace96ebf07488e3e137ade94e9bf7e2
| 797
|
py
|
Python
|
PyInstaller/hooks/hook-scipy.spatial.transform.rotation.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/hooks/hook-scipy.spatial.transform.rotation.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/hooks/hook-scipy.spatial.transform.rotation.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2021-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import is_module_satisfies
# As of scipy 1.6.0, scipy.spatial.transform.rotation is cython-compiled, so we fail to automatically pick up its
# imports.
if is_module_satisfies("scipy >= 1.6.0"):
hiddenimports = ['scipy.spatial.transform._rotation_groups']
| 44.277778
| 113
| 0.621079
|
289c17ed0b97c39fe81a231569c10db06fd1c4ef
| 1,357
|
py
|
Python
|
scripts/split.py
|
mdraw/newsgeneratorbot
|
7cceab9fe59ee2dec525a4ff6820a313329c6f69
|
[
"MIT"
] | null | null | null |
scripts/split.py
|
mdraw/newsgeneratorbot
|
7cceab9fe59ee2dec525a4ff6820a313329c6f69
|
[
"MIT"
] | null | null | null |
scripts/split.py
|
mdraw/newsgeneratorbot
|
7cceab9fe59ee2dec525a4ff6820a313329c6f69
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import random
parser = argparse.ArgumentParser()
parser.add_argument('input', help='File name of the text to split.')
parser.add_argument(
'--ratio',
help='Ratio of training/validation lines',
type=float,
default=0.9)
parser.add_argument('--quiet', action='store_true')
args = parser.parse_args()
def main():
split_ratio = args.ratio
inpath = os.path.abspath(os.path.expanduser(args.input))
trainpath = os.path.splitext(inpath)[0] + '_train.txt'
validpath = os.path.splitext(inpath)[0] + '_valid.txt'
with open(inpath) as infile:
lines = infile.readlines()
train_lines = []
valid_lines = []
for line in lines:
if random.random() < split_ratio:
train_lines.append(line)
else:
valid_lines.append(line)
with open(trainpath, 'w') as f:
for line in train_lines:
f.write(line)
with open(validpath, 'w') as f:
for line in valid_lines:
f.write(line)
if not args.quiet:
effective_ratio = len(train_lines) / len(lines)
print(f'Effective ratio: {effective_ratio:.3f}')
print(f'Lines: train {len(train_lines)}, valid {len(valid_lines)}')
print(f'Stored splits in\n {trainpath}\n {validpath}')
if __name__ == '__main__':
main()
| 27.14
| 75
| 0.635225
|
f94e25b2b28cb198766a1b22768f98baa37e5fbf
| 26,130
|
py
|
Python
|
corus/sources/meta.py
|
natasha/corus
|
1bf01284628585b56723b4d024367f88886fb3e5
|
[
"MIT"
] | 205
|
2019-05-01T07:38:01.000Z
|
2022-03-30T04:02:54.000Z
|
corus/sources/meta.py
|
trnkv/corus
|
1bf01284628585b56723b4d024367f88886fb3e5
|
[
"MIT"
] | 78
|
2019-04-29T06:53:53.000Z
|
2021-09-20T14:51:25.000Z
|
corus/sources/meta.py
|
trnkv/corus
|
1bf01284628585b56723b4d024367f88886fb3e5
|
[
"MIT"
] | 18
|
2019-06-19T09:56:10.000Z
|
2022-01-30T14:55:14.000Z
|
from corus.record import Record
from . import (
load_mokoron,
load_wiki,
load_simlex,
load_omnia,
load_gramru,
load_corpora,
load_ruadrect,
load_factru,
load_gareev,
load_lenta,
load_lenta2,
load_librusec,
load_ne5,
load_wikiner,
load_bsnlp,
load_persons,
load_rudrec,
load_taiga_arzamas,
load_taiga_fontanka,
load_taiga_interfax,
load_taiga_kp,
load_taiga_lenta,
load_taiga_nplus1,
load_taiga_magazines,
load_taiga_subtitles,
load_taiga_social,
load_taiga_proza,
load_taiga_stihi,
load_buriy_news,
load_buriy_webhose,
load_ods_interfax,
load_ods_gazeta,
load_ods_izvestia,
load_ods_meduza,
load_ods_ria,
load_ods_rt,
load_ods_tass,
load_ria_raw,
load_ria,
load_ud_gsd,
load_ud_taiga,
load_ud_pud,
load_ud_syntag,
load_morphoru_gicrya,
load_morphoru_rnc,
load_morphoru_corpora,
load_russe_hj,
load_russe_rt,
load_russe_ae,
load_toloka_lrwc,
)
class Meta(Record):
__attributes__ = ['title', 'url',
'description', 'stats', 'instruction',
'tags', 'functions']
def __init__(self, title, url=None,
description=None, stats=None, instruction=(),
tags=(), functions=()):
self.title = title
self.url = url
self.description = description
self.stats = stats
self.instruction = instruction
self.tags = tags
self.functions = functions
class Group(Record):
__attributes__ = ['title', 'url', 'description', 'instruction', 'metas']
def __init__(self, title, url=None, description=None, instruction=(), metas=()):
self.title = title
self.url = url
self.description = description
self.instruction = instruction
self.metas = metas
def is_group(item):
return isinstance(item, Group)
class Stats(Record):
__attributes__ = ['bytes', 'count']
def __init__(self, bytes=None, count=None):
self.bytes = bytes
self.count = count
NER = 'ner'
NEWS = 'news'
FICTION = 'fiction'
SOCIAL = 'social'
MORPH = 'morph'
SYNTAX = 'syntax'
EMB = 'emb'
SIM = 'sim'
SENTIMENT = 'sentiment'
WEB = 'web'
METAS = [
Group(
title='Lenta.ru',
url='https://github.com/yutkin/Lenta.Ru-News-Dataset',
metas=[
Meta(
title='Lenta.ru v1.0',
stats=Stats(
bytes=1785632079,
count=739351
),
instruction=[
'wget https://github.com/yutkin/Lenta.Ru-News-Dataset/releases/download/v1.0/lenta-ru-news.csv.gz'
],
tags=[NEWS],
functions=[load_lenta]
),
Meta(
title='Lenta.ru v1.1+',
stats=Stats(
bytes=2084746431,
count=800975
),
instruction=[
'wget https://github.com/yutkin/Lenta.Ru-News-Dataset/releases/download/v1.1/lenta-ru-news.csv.bz2'
],
tags=[NEWS],
functions=[load_lenta2]
),
]
),
Meta(
title='Lib.rus.ec',
url='https://russe.nlpub.org/downloads/',
description='Dump of lib.rus.ec prepared for RUSSE workshop',
stats=Stats(
count=301871,
bytes=155611193945
),
instruction=[
'wget http://panchenko.me/data/russe/librusec_fb2.plain.gz'
],
tags=[FICTION],
functions=[load_librusec]
),
Meta(
title='Rossiya Segodnya',
url='https://github.com/RossiyaSegodnya/ria_news_dataset',
stats=Stats(
count=1003869,
bytes=3974121040
),
instruction=[
'wget https://github.com/RossiyaSegodnya/ria_news_dataset/raw/master/ria.json.gz'
],
tags=[NEWS],
functions=[load_ria_raw, load_ria]
),
Meta(
title='Mokoron Russian Twitter Corpus',
url='http://study.mokoron.com/',
description='Russian Twitter sentiment markup',
instruction=[
'Manually download https://www.dropbox.com/s/9egqjszeicki4ho/db.sql'
],
stats=Stats(
count=17633417,
bytes=1998559570
),
tags=[SOCIAL, SENTIMENT],
functions=[load_mokoron],
),
Meta(
title='Wikipedia',
url='https://dumps.wikimedia.org/',
description='Russian Wiki dump',
instruction=[
'wget https://dumps.wikimedia.org/ruwiki/latest/ruwiki-latest-pages-articles.xml.bz2'
],
stats=Stats(
count=1541401,
bytes=13895798340
),
functions=[load_wiki],
),
Meta(
title='GramEval2020',
url='https://github.com/dialogue-evaluation/GramEval2020',
instruction=[
'wget https://github.com/dialogue-evaluation/GramEval2020/archive/master.zip',
'unzip master.zip',
'mv GramEval2020-master/dataTrain train',
'mv GramEval2020-master/dataOpenTest dev',
'rm -r master.zip GramEval2020-master',
'wget https://github.com/AlexeySorokin/GramEval2020/raw/master/data/GramEval_private_test.conllu'
],
stats=Stats(
count=162372,
bytes=31503713
),
functions=[load_gramru],
),
Meta(
title='OpenCorpora',
url='http://opencorpora.org/',
instruction=[
'wget http://opencorpora.org/files/export/annot/annot.opcorpora.xml.zip'
],
stats=Stats(
count=4030,
bytes=21194932
),
tags=[MORPH],
functions=[load_corpora],
),
Meta(
title='RusVectores SimLex-965',
instruction=[
'wget https://rusvectores.org/static/testsets/ru_simlex965_tagged.tsv',
'wget https://rusvectores.org/static/testsets/ru_simlex965.tsv'
],
tags=[EMB, SIM],
functions=[load_simlex],
),
Meta(
title='Omnia Russica',
url='https://omnia-russica.github.io/',
description='Taiga + Wiki + Araneum. Read "Even larger Russian corpus" https://events.spbu.ru/eventsContent/events/2019/corpora/corp_sborn.pdf',
instruction=[
'Manually download http://bit.ly/2ZT4BY9'
],
stats=Stats(
bytes=525728427750
),
tags=[MORPH, WEB, FICTION],
functions=[load_omnia]
),
###########
#
# NER
#
############
Meta(
title='factRuEval-2016',
url='https://github.com/dialogue-evaluation/factRuEval-2016/',
description='Manual PER, LOC, ORG markup prepared for 2016 Dialog competition',
stats=Stats(
count=254,
bytes=992532
),
instruction=[
'wget https://github.com/dialogue-evaluation/factRuEval-2016/archive/master.zip',
'unzip master.zip',
'rm master.zip'
],
tags=[NER, NEWS],
functions=[load_factru]
),
Meta(
title='Gareev',
url='https://www.researchgate.net/publication/262203599_Introducing_Baselines_for_Russian_Named_Entity_Recognition',
description='Manual PER, ORG markup (no LOC)',
stats=Stats(
count=97,
bytes=465938
),
instruction=[
'Email Rinat Gareev (gareev-rm@yandex.ru) ask for dataset',
'tar -xvf rus-ner-news-corpus.iob.tar.gz',
'rm rus-ner-news-corpus.iob.tar.gz'
],
tags=[NER, NEWS],
functions=[load_gareev]
),
Meta(
title='Collection5',
url='http://www.labinform.ru/pub/named_entities/',
description='News articles with manual PER, LOC, ORG markup',
stats=Stats(
count=1000,
bytes=3105146
),
instruction=[
'wget http://www.labinform.ru/pub/named_entities/collection5.zip',
'unzip collection5.zip',
'rm collection5.zip'
],
tags=[NER, NEWS],
functions=[load_ne5]
),
Meta(
title='WiNER',
url='https://www.aclweb.org/anthology/I17-1042',
description='Sentences from Wiki auto annotated with PER, LOC, ORG tags',
stats=Stats(
count=203287,
bytes=37907651
),
instruction=[
'wget https://github.com/dice-group/FOX/raw/master/input/Wikiner/aij-wikiner-ru-wp3.bz2'
],
tags=[NER],
functions=[load_wikiner]
),
Meta(
title='BSNLP-2019',
url='http://bsnlp.cs.helsinki.fi/shared_task.html',
description='Markup prepared for 2019 BSNLP Shared Task',
stats=Stats(
count=464,
bytes=1211300
),
instruction=[
'wget http://bsnlp.cs.helsinki.fi/TRAININGDATA_BSNLP_2019_shared_task.zip',
'wget http://bsnlp.cs.helsinki.fi/TESTDATA_BSNLP_2019_shared_task.zip',
'unzip TRAININGDATA_BSNLP_2019_shared_task.zip',
'unzip TESTDATA_BSNLP_2019_shared_task.zip -d test_pl_cs_ru_bg',
'rm TRAININGDATA_BSNLP_2019_shared_task.zip TESTDATA_BSNLP_2019_shared_task.zip'
],
tags=[NER],
functions=[load_bsnlp]
),
Meta(
title='Persons-1000',
url='http://ai-center.botik.ru/Airec/index.php/ru/collections/28-persons-1000',
description='Same as Collection5, only PER markup + normalized names',
stats=Stats(
count=1000,
bytes=3105146
),
instruction=[
'wget http://ai-center.botik.ru/Airec/ai-resources/Persons-1000.zip'
],
tags=[NER, NEWS],
functions=[load_persons]
),
Meta(
title='The Russian Drug Reaction Corpus (RuDReC)',
url='https://github.com/cimm-kzn/RuDReC',
description=(
'RuDReC is a new partially annotated corpus of consumer reviews in Russian about pharmaceutical '
'products for the detection of health-related named entities and the effectiveness of pharmaceutical products. '
'Here you can download and work with the annotated part, to get the raw part (1.4M reviews) '
'please refer to https://github.com/cimm-kzn/RuDReC.'
),
stats=Stats(
count=4809,
bytes=1773
),
instruction=[
'wget https://github.com/cimm-kzn/RuDReC/raw/master/data/rudrec_annotated.json'
],
tags=[NER],
functions=[load_rudrec]
),
##########
#
# TAIGA
#
###########
Group(
title='Taiga',
url='https://tatianashavrina.github.io/taiga_site/',
description='Large collection of Russian texts from various sources: news sites, magazines, literacy, social networks',
instruction=[
'wget https://linghub.ru/static/Taiga/retagged_taiga.tar.gz',
'tar -xzvf retagged_taiga.tar.gz'
],
metas=[
Meta(
title='Arzamas',
stats=Stats(
count=311,
bytes=4721604
),
tags=[NEWS],
functions=[load_taiga_arzamas],
),
Meta(
title='Fontanka',
stats=Stats(
count=342683,
bytes=824419630
),
tags=[NEWS],
functions=[load_taiga_fontanka],
),
Meta(
title='Interfax',
stats=Stats(
count=46429,
bytes=81320006
),
tags=[NEWS],
functions=[load_taiga_interfax],
),
Meta(
title='KP',
stats=Stats(
count=45503,
bytes=64789612
),
tags=[NEWS],
functions=[load_taiga_kp],
),
Meta(
title='Lenta',
stats=Stats(
count=36446,
bytes=99772679
),
tags=[NEWS],
functions=[load_taiga_lenta],
),
Meta(
title='Taiga/N+1',
stats=Stats(
count=7696,
bytes=26167631
),
tags=[NEWS],
functions=[load_taiga_nplus1],
),
Meta(
title='Magazines',
stats=Stats(
count=39890,
bytes=2352629006
),
functions=[load_taiga_magazines]
),
Meta(
title='Subtitles',
stats=Stats(
count=19011,
bytes=953237022
),
functions=[load_taiga_subtitles]
),
Meta(
title='Social',
stats=Stats(
count=1876442,
bytes=679670941
),
tags=[SOCIAL],
functions=[load_taiga_social]
),
Meta(
title='Proza',
stats=Stats(
count=1732434,
bytes=41067043857
),
tags=[FICTION],
functions=[load_taiga_proza]
),
Meta(
title='Stihi',
stats=Stats(
count=9157686,
bytes=13745805334
),
functions=[load_taiga_stihi]
),
]
),
#############
#
# BURIY
#
##########
Group(
title='Russian NLP Datasets',
url='https://github.com/buriy/russian-nlp-datasets/releases',
description='Several Russian news datasets from webhose.io, lenta.ru and other news sites.',
metas=[
Meta(
title='News',
description='Dump of top 40 news + 20 fashion news sites.',
instruction=[
'wget https://github.com/buriy/russian-nlp-datasets/releases/download/r4/news-articles-2014.tar.bz2',
'wget https://github.com/buriy/russian-nlp-datasets/releases/download/r4/news-articles-2015-part1.tar.bz2',
'wget https://github.com/buriy/russian-nlp-datasets/releases/download/r4/news-articles-2015-part2.tar.bz2'
],
stats=Stats(
count=2154801,
bytes=7340672169
),
tags=[NEWS],
functions=[load_buriy_news],
),
Meta(
title='Webhose',
description='Dump from webhose.io, 300 sources for one month.',
instruction=[
'wget https://github.com/buriy/russian-nlp-datasets/releases/download/r4/webhose-2016.tar.bz2'
],
stats=Stats(
count=285965,
bytes=901066314
),
tags=[NEWS],
functions=[load_buriy_webhose],
),
]
),
#############
#
# ODS
#
#########
Group(
title='ODS #proj_news_viz',
url='https://github.com/ods-ai-ml4sg/proj_news_viz/releases/tag/data',
description='Several news sites scraped by members of #proj_news_viz ODS project.',
metas=[
Meta(
title='Interfax',
instruction=[
'wget https://github.com/ods-ai-ml4sg/proj_news_viz/releases/download/data/interfax.csv.gz',
],
stats=Stats(
count=543961,
bytes=1314462876,
),
tags=[NEWS],
functions=[load_ods_interfax],
),
Meta(
title='Gazeta',
instruction=[
'wget https://github.com/ods-ai-ml4sg/proj_news_viz/releases/download/data/gazeta.csv.gz',
],
stats=Stats(
count=865847,
bytes=1752712320
),
tags=[NEWS],
functions=[load_ods_gazeta],
),
Meta(
title='Izvestia',
instruction=[
'wget https://github.com/ods-ai-ml4sg/proj_news_viz/releases/download/data/iz.csv.gz',
],
stats=Stats(
count=86601,
bytes=322117124
),
tags=[NEWS],
functions=[load_ods_izvestia],
),
Meta(
title='Meduza',
instruction=[
'wget https://github.com/ods-ai-ml4sg/proj_news_viz/releases/download/data/meduza.csv.gz',
],
stats=Stats(
count=71806,
bytes=283233963
),
tags=[NEWS],
functions=[load_ods_meduza],
),
Meta(
title='RIA',
instruction=[
'wget https://github.com/ods-ai-ml4sg/proj_news_viz/releases/download/data/ria.csv.gz',
],
stats=Stats(
count=101543,
bytes=245236791
),
tags=[NEWS],
functions=[load_ods_ria],
),
Meta(
title='Russia Today',
instruction=[
'wget https://github.com/ods-ai-ml4sg/proj_news_viz/releases/download/data/rt.csv.gz',
],
stats=Stats(
count=106644,
bytes=196212474
),
tags=[NEWS],
functions=[load_ods_rt],
),
Meta(
title='TASS',
instruction=[
'wget https://github.com/ods-ai-ml4sg/proj_news_viz/releases/download/data/tass-001.csv.gz',
],
stats=Stats(
count=1135635,
bytes=3515136716
),
tags=[NEWS],
functions=[load_ods_tass],
),
]
),
#############
#
# UD
#
#########
Group(
title='Universal Dependencies',
url='https://universaldependencies.org/',
metas=[
Meta(
title='GSD',
instruction=[
'wget https://github.com/UniversalDependencies/UD_Russian-GSD/raw/master/ru_gsd-ud-dev.conllu',
'wget https://github.com/UniversalDependencies/UD_Russian-GSD/raw/master/ru_gsd-ud-test.conllu',
'wget https://github.com/UniversalDependencies/UD_Russian-GSD/raw/master/ru_gsd-ud-train.conllu'
],
stats=Stats(
count=5030,
bytes=1059114
),
tags=[MORPH, SYNTAX],
functions=[load_ud_gsd],
),
Meta(
title='Taiga',
instruction=[
'wget https://github.com/UniversalDependencies/UD_Russian-Taiga/raw/master/ru_taiga-ud-dev.conllu',
'wget https://github.com/UniversalDependencies/UD_Russian-Taiga/raw/master/ru_taiga-ud-test.conllu',
'wget https://github.com/UniversalDependencies/UD_Russian-Taiga/raw/master/ru_taiga-ud-train.conllu'
],
stats=Stats(
count=3264,
bytes=362293
),
tags=[MORPH, SYNTAX],
functions=[load_ud_taiga],
),
Meta(
title='PUD',
instruction=[
'wget https://github.com/UniversalDependencies/UD_Russian-PUD/raw/master/ru_pud-ud-test.conllu',
],
stats=Stats(
count=1000,
bytes=212766
),
tags=[MORPH, SYNTAX],
functions=[load_ud_pud],
),
Meta(
title='SynTagRus',
instruction=[
'wget https://github.com/UniversalDependencies/UD_Russian-SynTagRus/raw/master/ru_syntagrus-ud-dev.conllu',
'wget https://github.com/UniversalDependencies/UD_Russian-SynTagRus/raw/master/ru_syntagrus-ud-test.conllu',
'wget https://github.com/UniversalDependencies/UD_Russian-SynTagRus/raw/master/ru_syntagrus-ud-train.conllu',
],
stats=Stats(
count=61889,
bytes=11877258
),
tags=[MORPH, SYNTAX],
functions=[load_ud_syntag],
),
]
),
#############
#
# MORPHORUEVAL
#
#########
Group(
title='morphoRuEval-2017',
url='https://github.com/dialogue-evaluation/morphoRuEval-2017',
metas=[
Meta(
title='General Internet-Corpus',
instruction=[
'wget https://github.com/dialogue-evaluation/morphoRuEval-2017/raw/master/GIKRYA_texts_new.zip',
'unzip GIKRYA_texts_new.zip',
'rm GIKRYA_texts_new.zip'
],
stats=Stats(
count=83148,
bytes=11091464
),
tags=[MORPH],
functions=[load_morphoru_gicrya],
),
Meta(
title='Russian National Corpus',
instruction=[
'wget https://github.com/dialogue-evaluation/morphoRuEval-2017/raw/master/RNC_texts.rar',
'unrar x RNC_texts.rar',
'rm RNC_texts.rar'
],
stats=Stats(
count=98892,
bytes=13330673
),
tags=[MORPH],
functions=[load_morphoru_rnc],
),
Meta(
title='OpenCorpora',
instruction=[
'wget https://github.com/dialogue-evaluation/morphoRuEval-2017/raw/master/OpenCorpora_Texts.rar',
'unrar x OpenCorpora_Texts.rar',
'rm OpenCorpora_Texts.rar'
],
stats=Stats(
count=38510,
bytes=5028255
),
tags=[MORPH],
functions=[load_morphoru_corpora],
),
]
),
#############
#
# RUSSE SEM
#
#########
Group(
title='RUSSE Russian Semantic Relatedness',
url='https://russe.nlpub.org/downloads/',
metas=[
Meta(
title='HJ: Human Judgements of Word Pairs',
instruction=[
'wget https://github.com/nlpub/russe-evaluation/raw/master/russe/evaluation/hj.csv'
],
tags=[EMB, SIM],
functions=[load_russe_hj],
),
Meta(
title='RT: Synonyms and Hypernyms from the Thesaurus RuThes',
instruction=[
'wget https://raw.githubusercontent.com/nlpub/russe-evaluation/master/russe/evaluation/rt.csv'
],
tags=[EMB, SIM],
functions=[load_russe_rt],
),
Meta(
title='AE: Cognitive Associations from the Sociation.org Experiment',
instruction=[
'wget https://github.com/nlpub/russe-evaluation/raw/master/russe/evaluation/ae-train.csv',
'wget https://github.com/nlpub/russe-evaluation/raw/master/russe/evaluation/ae-test.csv',
'wget https://raw.githubusercontent.com/nlpub/russe-evaluation/master/russe/evaluation/ae2.csv'
],
tags=[EMB, SIM],
functions=[load_russe_ae],
),
]
),
#############
#
# TOLOKA
#
#########
Group(
title='Toloka Datasets',
url='https://toloka.yandex.ru/datasets/',
metas=[
Meta(
title='Lexical Relations from the Wisdom of the Crowd (LRWC)',
instruction=[
'wget https://tlk.s3.yandex.net/dataset/LRWC.zip',
'unzip LRWC.zip',
'rm LRWC.zip'
],
tags=[EMB, SIM],
functions=[load_toloka_lrwc],
),
Meta(
title='The Russian Adverse Drug Reaction Corpus of Tweets (RuADReCT)',
url='https://github.com/cimm-kzn/RuDReC',
description='This corpus was developed for the Social Media Mining for Health Applications (#SMM4H) '
'Shared Task 2020',
instruction=[
'wget https://github.com/cimm-kzn/RuDReC/raw/master/data/RuADReCT.zip',
'unzip RuADReCT.zip',
'rm RuADReCT.zip'
],
stats=Stats(
count=9515,
bytes=2190063
),
tags=[SOCIAL],
functions=[load_ruadrect],
),
]
),
]
| 30.850059
| 152
| 0.487218
|
69b0c71d3bb8f4cf67474c55884300e34280ea53
| 1,710
|
py
|
Python
|
stubs.min/System/Windows/__init___parts/DynamicResourceExtensionConverter.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/System/Windows/__init___parts/DynamicResourceExtensionConverter.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/System/Windows/__init___parts/DynamicResourceExtensionConverter.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class DynamicResourceExtensionConverter(TypeConverter):
"""
Converts from parsed XAML to System.Windows.DynamicResourceExtension and supports dynamic resource references made from XAML.
DynamicResourceExtensionConverter()
"""
def CanConvertTo(self,*__args):
"""
CanConvertTo(self: DynamicResourceExtensionConverter,context: ITypeDescriptorContext,destinationType: Type) -> bool
Returns a value indicating whether this converter can convert an object to the
given destination type using the context.
context: Context in which the provided type should be evaluated.
destinationType: The type of the destination/output of conversion.
Returns: true if destinationType is type of
System.ComponentModel.Design.Serialization.InstanceDescriptor; otherwise,
false.
"""
pass
def ConvertTo(self,*__args):
"""
ConvertTo(self: DynamicResourceExtensionConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object,destinationType: Type) -> object
Converts the specified object to another type.
context: An System.ComponentModel.ITypeDescriptorContext object that provides a format
context.
culture: A System.Globalization.CultureInfo object that specifies the culture to
represent the number.
value: Value to be converted. This is expected to be type
System.Windows.DynamicResourceExtension.
destinationType: Type that should be converted to.
Returns: The returned converted object. Cast this to the requested type. Ordinarily this
should be cast to
System.ComponentModel.Design.Serialization.InstanceDescriptor.
"""
pass
| 39.767442
| 153
| 0.74386
|
0e5bf6a2eec1f5a78a6dd1060921eb046ccf37ea
| 3,076
|
py
|
Python
|
main.py
|
SynimSelimi/HorspoolAlgorithm
|
c20fda244529800b90338f4283cb1a66ca2b1261
|
[
"MIT"
] | null | null | null |
main.py
|
SynimSelimi/HorspoolAlgorithm
|
c20fda244529800b90338f4283cb1a66ca2b1261
|
[
"MIT"
] | null | null | null |
main.py
|
SynimSelimi/HorspoolAlgorithm
|
c20fda244529800b90338f4283cb1a66ca2b1261
|
[
"MIT"
] | null | null | null |
samples = [{
"text": "Nëse kërkesat e absurdit nuk respektohen në vepër, nëse ajo nuk ilustron divorcin dhe revoltën, nëse ajo u bën lëshime iluzioneve dhe ngjall shpresë, ajo nuk është më e padobi. Nuk mund të shkëputem më prej saj.",
"pattern": "lëshime iluzioneve",
"asserts": True
},
{
"text": "Tani që kishte lënë prapa ditët e mërzitshme e të ftohta në mal dhe kishte veshur uniformën e re, po e kapte përsëri ndjenja e madhështisë. Fshatari kishte fytyrë të hequr dhe sy të përhimë.",
"pattern": "përhimë?",
"asserts": False
},
{
"text": "Çdo njeri i dashuruar do t'i thotë të dashurës së tij: të dua, xhan, të dua dhe do t'i thotë se e dashuron në gjuhën e tij kombëtare, por do ta realizojë dashurinë me të brenda kushtesh të posaçme, specifike për mjedisin dhe popullin e tij.",
"pattern": "kombëtare",
"asserts": True
},
{
"text": "Ata s'paskëshin qenë rapsodë, por magjistarë. Ujana e keqe rridhte ndërkaq mospërfillëse.",
"pattern": "muzikë",
"asserts": False
},
{
"text": "Dhe këta heshtakë zakonisht heshtin vetëm publikisht, por veprojnë nën tokë: i sufrojnë të tjerët, u mbajnë ligjërata të fshehta, ua hartojnë strategjinë dhe, mandej, kapardisen për ngadhnjimin e një të bëre të caktuar.",
"pattern": "veprojnë",
"asserts": True
}]
def shift_table(pattern):
sh_table = dict()
pattern_len = len(pattern) - 1
for i in range(0, pattern_len):
sh_table[pattern[i]] = pattern_len - i
return sh_table
def horspool_search(text, pattern):
sh_table = shift_table(pattern)
ptrn_len = len(pattern)
i = len(pattern) - 1
while i <= len(text) - 1:
matches = 0
def is_match():
return pattern[ptrn_len - 1 - matches] == text[i - matches]
while matches < ptrn_len and is_match():
matches += 1
if matches == ptrn_len:
return i - matches + 1
else:
offset = sh_table.get(text[i]) or ptrn_len
i += offset
return -1
def is_found(position):
return True if position != -1 else False
def round_zero(nm):
return 0 if nm < 0 else nm
def round_top(nm, top):
return top if nm > top else nm
def print_result(position, text, pattern):
print((f'Found "{pattern}" at {position}' if is_found(position) else "Not Found\n"))
if position != -1:
ptrn_len = len(pattern)
begin = round_zero(position - ptrn_len - 10)
end = round_top(position + ptrn_len + 10, len(text))
print(f'... {text[begin:end]} ...\n')
def demo1():
print("[+] Horspool Algorithm Demo 1")
for sample in samples:
position = horspool_search(sample['text'], sample['pattern'])
print(f'Working properly? - {is_found(position) == sample["asserts"]}')
print_result(position, sample['text'], sample['pattern'])
def demo2():
print("[+] Horspool Algorithm Demo 2")
text = input("Enter message: ").strip()
pattern = input("Enter pattern: ").strip()
print(f'\n[+] Searching for "{pattern}" in message...')
position = horspool_search(text, pattern)
print_result(position, text, pattern)
def main():
demo1()
demo2()
if __name__ == "__main__":
main()
| 32.378947
| 253
| 0.676853
|
a60fe1b9b27ad6baf98a5edb4b1f6cbfa28fede0
| 2,415
|
py
|
Python
|
src/pyAnalytics/gui_raman_maps/raman_pca_window.py
|
AlexanderSouthan/pyAnalytics
|
18038b2cda75a99280d3cdd68d61e601eefa0fe0
|
[
"MIT"
] | null | null | null |
src/pyAnalytics/gui_raman_maps/raman_pca_window.py
|
AlexanderSouthan/pyAnalytics
|
18038b2cda75a99280d3cdd68d61e601eefa0fe0
|
[
"MIT"
] | null | null | null |
src/pyAnalytics/gui_raman_maps/raman_pca_window.py
|
AlexanderSouthan/pyAnalytics
|
18038b2cda75a99280d3cdd68d61e601eefa0fe0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import (QMainWindow, QComboBox, QWidget, QGridLayout,
QDesktopWidget, QLabel, QVBoxLayout,
QPushButton, QHBoxLayout)
class raman_pca_window(QMainWindow):
def __init__(self, raman_data):
self.update_data(raman_data)
super().__init__()
self.init_window()
self.define_widgets()
self.position_widgets()
self.connect_event_handlers()
def init_window(self):
self.setGeometry(500, 500, 600, 360) #xPos,yPos,width, heigth
self.center() #center function is defined below
self.setWindowTitle('Principal component analysis options')
self.container0 = QWidget(self)
self.setCentralWidget(self.container0)
self.grid_container = QGridLayout()
self.container0.setLayout(self.grid_container)
def define_widgets(self):
self.pca_label = QLabel('<b>Principal component analysis</b>')
self.pca_components_label = QLabel('Number of principal components')
self.pca_components_combo = QComboBox()
self.pca_components_combo.addItems([str(ii) for ii in range(1, 13)])
self.perform_pca_button = QPushButton('Perform PCA')
def position_widgets(self):
self.pca_components_layout = QHBoxLayout()
self.pca_components_layout.addWidget(self.pca_components_label)
self.pca_components_layout.addWidget(self.pca_components_combo)
self.imaging_layout = QVBoxLayout()
self.imaging_layout.addWidget(self.pca_label)
self.imaging_layout.addLayout(self.pca_components_layout)
self.imaging_layout.addWidget(self.perform_pca_button)
self.imaging_layout.addStretch(1)
self.grid_container.addLayout(self.imaging_layout, *(0, 0), 1, 1)
def connect_event_handlers(self):
self.perform_pca_button.clicked.connect(self.perform_pca)
def update_data(self, raman_data):
self.raman_data = raman_data
def center(self):#centers object on screen
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def perform_pca(self):
pca_components = int(self.pca_components_combo.currentText())
self.raman_data.principal_component_analysis(pca_components)
print('PCA finished!')
| 37.153846
| 76
| 0.684886
|
ab7fbb0bb53a8246dc943735adfc8f5a753e4255
| 745
|
py
|
Python
|
databases/schemas.py
|
MichaelLan/PracticeFastAPI
|
aa7fe2167e73b4ab7c2f288db1ae93fd696083f8
|
[
"MIT"
] | null | null | null |
databases/schemas.py
|
MichaelLan/PracticeFastAPI
|
aa7fe2167e73b4ab7c2f288db1ae93fd696083f8
|
[
"MIT"
] | null | null | null |
databases/schemas.py
|
MichaelLan/PracticeFastAPI
|
aa7fe2167e73b4ab7c2f288db1ae93fd696083f8
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel, Field
class UserBase(BaseModel):
username: str = Field(..., title='username')
name: str = Field(..., title='Nombre')
email: str = Field(..., title='Correo electrónico', regex='^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$')
group_id: int = Field(..., title='ID del grupo al que pertenece')
class GroupBase(BaseModel):
name_group: str = Field(..., title='Nombre del grupo')
city: str
link: str = None
class UserCreate(UserBase):
pass
class GroupCreate(GroupBase):
pass
class Group(GroupBase):
# Para lectura
id: int
class Config:
orm_mode = True
class User(UserBase):
id: int
class Config:
orm_mode = True
| 19.102564
| 123
| 0.601342
|
92fd25fae7251901496169053d15fb8bc6d30a24
| 399
|
py
|
Python
|
eodashboard/eodashboard/wsgi.py
|
ArkaprabhaChakraborty/EarthObservationDashboard
|
9b1e6d95b24edcf99533ba279b0d95b776be185c
|
[
"MIT"
] | null | null | null |
eodashboard/eodashboard/wsgi.py
|
ArkaprabhaChakraborty/EarthObservationDashboard
|
9b1e6d95b24edcf99533ba279b0d95b776be185c
|
[
"MIT"
] | null | null | null |
eodashboard/eodashboard/wsgi.py
|
ArkaprabhaChakraborty/EarthObservationDashboard
|
9b1e6d95b24edcf99533ba279b0d95b776be185c
|
[
"MIT"
] | 1
|
2022-03-04T09:13:01.000Z
|
2022-03-04T09:13:01.000Z
|
"""
WSGI config for eodashboard project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eodashboard.settings')
application = get_wsgi_application()
| 23.470588
| 78
| 0.789474
|
55c65ff0f9718cb25373c89631c6e56161db1730
| 10,052
|
py
|
Python
|
Lab_Dash/models.py
|
SimonSchubotz/Electronic-Laboratory-Notebook
|
a5dc3daa76b07370c1ee5b7e74fb6c780c3d3c97
|
[
"Apache-2.0"
] | null | null | null |
Lab_Dash/models.py
|
SimonSchubotz/Electronic-Laboratory-Notebook
|
a5dc3daa76b07370c1ee5b7e74fb6c780c3d3c97
|
[
"Apache-2.0"
] | null | null | null |
Lab_Dash/models.py
|
SimonSchubotz/Electronic-Laboratory-Notebook
|
a5dc3daa76b07370c1ee5b7e74fb6c780c3d3c97
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from datetime import datetime
# Create your models here.
class OCA(models.Model):
"""OCA Saves all dash properties of the OCA measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(unique=True, blank=True, null=True)
CA_high_degree = models.FloatField(blank=True, null=True)
CA_low_degree = models.FloatField(blank=True, null=True)
BD_high_mm = models.FloatField(blank=True, null=True)
BD_low_mm = models.FloatField(blank=True, null=True)
Time_high_sec = models.FloatField(blank=True, null=True)
Time_low_sec = models.FloatField(blank=True, null=True)
Time_diff_pump = models.FloatField(blank=True, null=True)
Cycle_drop_1_sec = models.FloatField(blank=True, null=True)
Cycle_drop_2_sec = models.FloatField(blank=True, null=True)
Cycle_drop_3_sec = models.FloatField(blank=True, null=True)
Cycle_drop_4_sec = models.FloatField(blank=True, null=True)
Cycle_drop_5_sec = models.FloatField(blank=True, null=True)
Cycle_drop_6_sec = models.FloatField(blank=True, null=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(OCA, self).save(*args, **kwargs)
class RSD(models.Model):
"""RSD Saves all dash properties of the RSD measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(unique=True, blank=True, null=True)
CA_high_degree = models.FloatField(blank=True, null=True)
CA_low_degree = models.FloatField(blank=True, null=True)
BD_high_mm = models.FloatField(blank=True, null=True)
BD_low_mm = models.FloatField(blank=True, null=True)
Time_high_sec = models.FloatField(blank=True, null=True)
Time_low_sec = models.FloatField(blank=True, null=True)
Time_diff_pump = models.FloatField(blank=True, null=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(RSD, self).save(*args, **kwargs)
class SEL(models.Model):
"""SEL Saves all dash properties of the SEL measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(unique=True, blank=True, null=True)
Start_datetime_elli = models.DateTimeField(default=datetime.now(), null=True, blank=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(SEL, self).save(*args, **kwargs)
class ComparisonEntry(models.Model):
"""SEL Saves all dash properties of the SEL measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(blank=True, null=True)
Label = models.TextField(blank=True, null=True)
ExpBaseID = models.IntegerField(blank=True, null=True)#Foreign key not possible because of circular import
X_high = models.FloatField(blank=True, null=True)
X_low = models.FloatField(blank=True, null=True)
Y_high = models.FloatField(blank=True, null=True)
Y_low = models.FloatField(blank=True, null=True)
X_shift = models.FloatField(blank=True, null=True)
Y_shift = models.FloatField(blank=True, null=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(ComparisonEntry, self).save(*args, **kwargs)
class Comparison(models.Model):
"""SEL Saves all dash properties of the SEL measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(blank=True, null=True)
Title = models.TextField(blank=True, null=True)
Entry = models.ManyToManyField(ComparisonEntry, blank=True)
X_shift = models.FloatField(blank=True, null=True)
Y_shift = models.FloatField(blank=True, null=True)
X_high = models.FloatField(blank=True, null=True)
X_low = models.FloatField(blank=True, null=True)
Y_high = models.FloatField(blank=True, null=True)
Y_low = models.FloatField(blank=True, null=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(Comparison, self).save(*args, **kwargs)
class OszAnalysisEntry(models.Model):
"""SEL Saves all dash properties of the SEL measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(blank=True, null=True)
Label = models.TextField(blank=True, null=True)
OszAnalysisID = models.IntegerField(blank=True, null=True)#Foreign key not possible because of circular import
X_high = models.FloatField(blank=True, null=True)
X_low = models.FloatField(blank=True, null=True)
Y_high = models.FloatField(blank=True, null=True)
Y_low = models.FloatField(blank=True, null=True)
X_shift = models.FloatField(blank=True, null=True)
Y_shift = models.FloatField(blank=True, null=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(OszAnalysisEntry, self).save(*args, **kwargs)
class OszAnalysis(models.Model):
"""SEL Saves all dash properties of the SEL measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(blank=True, null=True)
Title = models.TextField(blank=True, null=True)
Entry = models.ManyToManyField(OszAnalysisEntry, blank=True)
X_shift = models.FloatField(blank=True, null=True)
Y_shift = models.FloatField(blank=True, null=True)
X_high = models.FloatField(blank=True, null=True)
X_low = models.FloatField(blank=True, null=True)
Y_high = models.FloatField(blank=True, null=True)
Y_low = models.FloatField(blank=True, null=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(OszAnalysis, self).save(*args, **kwargs)
class SFG(models.Model):
"""SEL Saves all dash properties of the SEL measurements
Parameters
----------
models : [type]
[description]
"""
Name = models.TextField(unique=True, blank=True, null=True)
def __str__(self):
return str(self.Name)
def save(self, *args, **kwargs):#saves '' as none
if not self.Name:
self.Name = None
super(SFG, self).save(*args, **kwargs)
class GRP(models.Model):
Name = models.TextField(unique=True, blank=True, null=True)
PossibleTyps = [('SFG_kin_3D', 'Sum frequency generation kinetic'), ('SFG_kin_drive', 'Sum frequency generation kinetic while changing the Position'),
('SFG_abrastern', 'Sum frequency generation at different locations'), ('SFG_cycle', 'Sum frequency generation cycle drops')]
Typ = models.TextField(choices=PossibleTyps, blank=True, null=True)
def __str__(self):
return str(self.Name)
class SFG_cycle(models.Model):
Name = models.TextField(unique=True)
Graph_distance = models.FloatField(blank=True, null=True)
Signal_high = models.FloatField(blank=True, null=True)
Signal_low = models.FloatField(blank=True, null=True)
Wavenumber_high = models.FloatField(blank=True, null=True)
Wavenumber_low = models.FloatField(blank=True, null=True)
Group = models.ForeignKey(GRP, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return str(self.Name)
class SFG_abrastern(models.Model):
Name = models.TextField(unique=True)
Graph_distance = models.FloatField(blank=True, null=True)
Signal_high = models.FloatField(blank=True, null=True)
Signal_low = models.FloatField(blank=True, null=True)
Wavenumber_high = models.FloatField(blank=True, null=True)
Wavenumber_low = models.FloatField(blank=True, null=True)
Group = models.ForeignKey(GRP, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return str(self.Name)
class SFG_kin_3D(models.Model):
Name = models.TextField(unique=True)
Time_high_sec = models.FloatField(blank=True, null=True)
Time_low_sec = models.FloatField(blank=True, null=True)
Signal_high = models.FloatField(blank=True, null=True)
Signal_low = models.FloatField(blank=True, null=True)
Wavenumber_high = models.FloatField(blank=True, null=True)
Wavenumber_low = models.FloatField(blank=True, null=True)
Group = models.ForeignKey(GRP, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return str(self.Name)
class SFG_kin_drive(models.Model):
Name = models.TextField(unique=True)
Time_high_sec = models.FloatField(blank=True, null=True)
Time_low_sec = models.FloatField(blank=True, null=True)
Signal_high = models.FloatField(blank=True, null=True)
Signal_low = models.FloatField(blank=True, null=True)
Wavenumber_high = models.FloatField(blank=True, null=True)
Wavenumber_low = models.FloatField(blank=True, null=True)
Group = models.ForeignKey(GRP, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return str(self.Name)
class KUR(models.Model):
Name = models.TextField(unique=True)
CA_high_degree = models.FloatField(blank=True, null=True)
CA_low_degree = models.FloatField(blank=True, null=True)
BD_high_mm = models.FloatField(blank=True, null=True)
BD_low_mm = models.FloatField(blank=True, null=True)
Time_high_sec = models.FloatField(blank=True, null=True)
Time_low_sec = models.FloatField(blank=True, null=True)
def __str__(self):
return str(self.Name)
| 40.532258
| 154
| 0.679467
|
4c7138b0d464134b2ae4023c02248128887296a4
| 5,365
|
py
|
Python
|
tests/components/deconz/test_lock.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/deconz/test_lock.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/deconz/test_lock.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""deCONZ lock platform tests."""
from unittest.mock import patch
from openpeerpower.components.lock import (
DOMAIN as LOCK_DOMAIN,
SERVICE_LOCK,
SERVICE_UNLOCK,
)
from openpeerpower.const import (
ATTR_ENTITY_ID,
STATE_LOCKED,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
)
from .test_gateway import (
DECONZ_WEB_REQUEST,
mock_deconz_put_request,
setup_deconz_integration,
)
async def test_no_locks(opp, aioclient_mock):
"""Test that no lock entities are created."""
await setup_deconz_integration(opp, aioclient_mock)
assert len(opp.states.async_all()) == 0
async def test_lock_from_light(opp, aioclient_mock, mock_deconz_websocket):
"""Test that all supported lock entities based on lights are created."""
data = {
"lights": {
"1": {
"etag": "5c2ec06cde4bd654aef3a555fcd8ad12",
"hascolor": False,
"lastannounced": None,
"lastseen": "2020-08-22T15:29:03Z",
"manufacturername": "Danalock",
"modelid": "V3-BTZB",
"name": "Door lock",
"state": {"alert": "none", "on": False, "reachable": True},
"swversion": "19042019",
"type": "Door Lock",
"uniqueid": "00:00:00:00:00:00:00:00-00",
}
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(opp, aioclient_mock)
assert len(opp.states.async_all()) == 1
assert opp.states.get("lock.door_lock").state == STATE_UNLOCKED
event_changed_light = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"on": True},
}
await mock_deconz_websocket(data=event_changed_light)
await opp.async_block_till_done()
assert opp.states.get("lock.door_lock").state == STATE_LOCKED
# Verify service calls
mock_deconz_put_request(aioclient_mock, config_entry.data, "/lights/1/state")
# Service lock door
await opp.services.async_call(
LOCK_DOMAIN,
SERVICE_LOCK,
{ATTR_ENTITY_ID: "lock.door_lock"},
blocking=True,
)
assert aioclient_mock.mock_calls[1][2] == {"on": True}
# Service unlock door
await opp.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: "lock.door_lock"},
blocking=True,
)
assert aioclient_mock.mock_calls[2][2] == {"on": False}
await opp.config_entries.async_unload(config_entry.entry_id)
states = opp.states.async_all()
assert len(states) == 1
for state in states:
assert state.state == STATE_UNAVAILABLE
await opp.config_entries.async_remove(config_entry.entry_id)
await opp.async_block_till_done()
assert len(opp.states.async_all()) == 0
async def test_lock_from_sensor(opp, aioclient_mock, mock_deconz_websocket):
"""Test that all supported lock entities based on sensors are created."""
data = {
"sensors": {
"1": {
"config": {
"battery": 100,
"lock": False,
"on": True,
"reachable": True,
},
"ep": 11,
"etag": "a43862f76b7fa48b0fbb9107df123b0e",
"lastseen": "2021-03-06T22:25Z",
"manufacturername": "Onesti Products AS",
"modelid": "easyCodeTouch_v1",
"name": "Door lock",
"state": {
"lastupdated": "2021-03-06T21:25:45.624",
"lockstate": "unlocked",
},
"swversion": "20201211",
"type": "ZHADoorLock",
"uniqueid": "00:00:00:00:00:00:00:00-00",
}
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(opp, aioclient_mock)
assert len(opp.states.async_all()) == 2
assert opp.states.get("lock.door_lock").state == STATE_UNLOCKED
event_changed_light = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"lockstate": "locked"},
}
await mock_deconz_websocket(data=event_changed_light)
await opp.async_block_till_done()
assert opp.states.get("lock.door_lock").state == STATE_LOCKED
# Verify service calls
mock_deconz_put_request(aioclient_mock, config_entry.data, "/sensors/1/config")
# Service lock door
await opp.services.async_call(
LOCK_DOMAIN,
SERVICE_LOCK,
{ATTR_ENTITY_ID: "lock.door_lock"},
blocking=True,
)
assert aioclient_mock.mock_calls[1][2] == {"lock": True}
# Service unlock door
await opp.services.async_call(
LOCK_DOMAIN,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: "lock.door_lock"},
blocking=True,
)
assert aioclient_mock.mock_calls[2][2] == {"lock": False}
await opp.config_entries.async_unload(config_entry.entry_id)
states = opp.states.async_all()
assert len(states) == 2
for state in states:
assert state.state == STATE_UNAVAILABLE
await opp.config_entries.async_remove(config_entry.entry_id)
await opp.async_block_till_done()
assert len(opp.states.async_all()) == 0
| 29.478022
| 83
| 0.594781
|
48a26689d5cc1fd3a0d956b83720eaaf69d4966e
| 18,282
|
py
|
Python
|
scripts/tse.py
|
aassumpcao/tseresearch
|
8c46a81fddee1f2a18b35a28a32dfe0a1f294750
|
[
"MIT"
] | null | null | null |
scripts/tse.py
|
aassumpcao/tseresearch
|
8c46a81fddee1f2a18b35a28a32dfe0a1f294750
|
[
"MIT"
] | 1
|
2019-07-09T20:37:06.000Z
|
2019-07-09T20:37:06.000Z
|
scripts/tse.py
|
aassumpcao/tseresearch
|
8c46a81fddee1f2a18b35a28a32dfe0a1f294750
|
[
"MIT"
] | null | null | null |
### tse classes and methods
# developed by:
# Andre Assumpcao
# andre.assumpcao@gmail.com
# import standard libraries
import codecs
import math
import os
import re
import time
# import third-party libraries
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import (
NoSuchElementException, StaleElementReferenceException, TimeoutException
)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
import pandas as pd
# define scraper class
class scraper:
"""
the scraper class contains methods used to download data from
tse websites. it visits two pages: (i) any candidate's profile
and (ii) the decision regarding their candidacy.
attributes:
browser: placeholder for selenium browser call
methods:
case: download case and protocol number
decision: use protocol number to download judicial decision
"""
# define static arguments for all methods in the scraper class
browser, page = [], []
main = 'http://divulgacandcontas.tse.jus.br/divulga/#/candidato'
java = 'return document.getElementsByTagName("html")[0].innerHTML'
# xpath search pattern for case method
prot = '//*[contains(@href, "nprot")][not(contains(@href, "undefined"))]'
# xpath search patterns for decision method
xpath = '//*[contains(@value, "Todos")]'
viewPath = '//*[@value="Visualizar"]'
errPath = '//*[@color="RED"]'
# init method share by all class instances
def __init__(self, browser):
"""load into class the url which will be downloaded"""
# store browser info
self.browser = browser
# case number scraper function
def case(self, year, election, unit, candidate, wait=2):
""" method to download case number by candidate information """
# turn method arguments to strings
args = locals()
argsnot = ['self', 'wait']
pageargs = [str(v) for k, v in args.items() if k not in argsnot]
# concatenate everything and form page address
self.page = '/'.join([self.main] + pageargs)
# try information in DOM
try:
# navigate to candidate page
self.browser.get(self.page)
# check if protocol number is visible in webpage
protVis = EC.presence_of_element_located((By.XPATH, self.prot))
# wait up to wait seconds for elements to be located
WebDriverWait(self.browser, wait).until(protVis)
# if protocol number has been found, download it
protElem = self.browser.find_element_by_xpath(self.prot)
# extract text from caseNum and href from protNum
protNum = protElem.get_attribute('href')
# handle exception
except StaleElementReferenceException as Exception:
protNum = 'staleElementException'
# handle exception
except TimeoutException as Exception:
protNum = 'timeoutException'
# handle exception
except:
protNum = 'pageCrashed'
# return case and protocol numbers as list
return protNum
# decision scraper function
def decision(self, url=None, filename='decision', wait=15):
""" method to download tse decisions by url """
# skip out if url has not been provided
if url == None: return 'URL not loaded'
# store url for scraping decisions
self.url = url
# catch error
try:
# navigate to url
self.browser.get(self.url)
# check if elements are located
dec = EC.presence_of_element_located((By.XPATH, self.viewPath))
# wait up to 3s for last element to be located
WebDriverWait(self.browser, wait).until(dec)
# when element is found, click on 'andamento', 'despacho',
# and 'view' so that the browser opens up the information we
# want
self.browser.find_element_by_xpath(self.xpath).click()
self.browser.find_element_by_xpath(self.viewPath).click()
# save inner html to object
html = self.browser.execute_script(self.java)
# define filename to save file
file = str(filename) + '.html'
# save to file
try:
codecs.open(file, 'w', 'cp1252').write(html)
except:
codecs.open(file, 'w', 'utf-8').write(html)
# print message
return 'Download successful'
except NoSuchElementException as Exception:
e = EC.presence_of_element_located((By.XPATH, self.errPath))
WebDriverWait(self.browser, 1).until(e)
return 'Download failed: NoSuchElementException'
except TimeoutException as Exception:
return 'Download failed: timeoutException'
except StaleElementReferenceException as Exception:
return 'Download failed: staleElementException'
except:
return 'Download failed: reason unknown'
# define parser class
class parser:
"""
series of methods to wrangle TSE court documents
attributes:
file: path to judicial decision html file
methods:
parse_summary: parse summary table
parse_updates: parse case updates
parse_details: parse sentence details
parse_related_cases: parse references to other cases
parse_related_docs: parse references to other documents
parse_all: parse everything above
"""
# define static variables used for parsing all tables
soup, tables = [], []
# define regex compile for substituting weird characters in all tables
regex0 = re.compile(r'\n|\t')
regex1 = re.compile(r'\\n|\\t')
regex2 = re.compile('\xa0')
regex3 = re.compile(' +')
regex4 = re.compile('^PROCESSO')
regex5 = re.compile('^MUNIC[IÍ]PIO')
regex6 = re.compile('^PROTOCOLO')
regex7 = re.compile('^(requere|impugnan|recorren|litis)', re.IGNORECASE)
regex8 = re.compile('^(requeri|impugnad|recorri|candid)', re.IGNORECASE)
regex9 = re.compile('^(ju[íi]z|relator)', re.IGNORECASE)
regex10 = re.compile('^assunt', re.IGNORECASE)
regex11 = re.compile('^localiz', re.IGNORECASE)
regex12 = re.compile('^fase', re.IGNORECASE)
regex13 = re.compile('(?<=:)(.)*')
regex14 = re.compile('(Despach|Senten|(Decis.*?=Plenária))')
# init method shared by all class instances
def __init__(self, file):
""" load into class the file which will be parsed """
# try cp1252 encoding first or utf-8 if loading fails
try:
self.file = codecs.open(file, 'r', 'cp1252').read()
except:
self.file = codecs.open(file, 'r', 'utf-8').read()
# call BeautifulSoup to read string as html
self.soup = BeautifulSoup(self.file, 'lxml')
# find all tables in document
self.tables = self.soup.find_all('table')
# isolate latter tables
kwargs = {'class': 'titulo_tabela'}
self.xtable = [
td.text for t in self.tables for td in t.find_all('td', **kwargs)
]
self.xtable = {t: i + 1 for i, t in enumerate(self.xtable)}
#1 parse summary info table:
def parse_summary(self, transpose=False):
""" method to wrangle summary information """
# initial objects for parser
# isolate summary table
table = self.tables[0]
# find all rows in table and extract their text
rows = [tr.text for tr in table.find_all('tr')]
# clean up text
rows = [re.sub(self.regex0, '', row) for row in rows]
rows = [re.sub(self.regex1, '', row) for row in rows]
rows = [re.sub(self.regex2, '', row) for row in rows]
rows = [re.sub(self.regex3,' ', row) for row in rows]
# slice javascript out of list
rows = rows[:-1]
# filter down each row to text that matters
info = {
'case' : list(filter(self.regex4.search, rows)),
'town' : list(filter(self.regex5.search, rows)),
'prot' : list(filter(self.regex6.search, rows)),
'claimants': list(filter(self.regex7.search, rows)),
'defendant': list(filter(self.regex8.search, rows)),
'judge' : list(filter(self.regex9.search, rows)),
'subject' : list(filter(self.regex10.search, rows)),
'district' : list(filter(self.regex11.search, rows)),
'stage' : list(filter(self.regex12.search, rows))
}
# strip keys in dictionary values
for k, v in info.items():
info[k] = [re.search(self.regex13, i).group() for i in info[k]]
info[k] = [i.strip() for i in info[k]]
if len(info[k]) > 1:
info[k] = [' '.join(info[k])]
# replace missing values for None
summary = {k: [None] if not v else v for k, v in info.items()}
# # flatten list of values into scalars
# summary = {k: v for k, v in info.items() for v in info[k]}
# return dictionary of information
return summary
#2 parse case updates
def parse_updates(self):
""" method to wrangle case updates information """
# isolate updates table
table = self.tables[1]
# find all rows in table
cols = [td.text for row in table.find_all('tr') \
for td in row.find_all('td')]
# clean up text
cols = [re.sub(self.regex0, '', col) for col in cols]
cols = [re.sub(self.regex1, '', col) for col in cols]
cols = [re.sub(self.regex2, '', col) for col in cols]
cols = [re.sub(self.regex3,' ', col) for col in cols]
cols = [col.strip() for col in cols]
# create dictionary
update = {'zone': cols[4::3], 'date': cols[5::3], 'update': cols[6::3]}
# return dictionary of information
return update
#3 parse judicial decisions
def parse_details(self):
""" method to wrangle case decisions """
# empty placeholder for the details table
index = None
# find table to parse
for k, v in self.xtable.items():
if re.search(self.regex14, k):
index = int(v)
# end program if index is empty
if index is None: return {'shead': [None], 'sbody': [None]}
# choose updates table to parse
table = self.tables[index]
# extract rows from table
kwarg = {'class': 'tdlimpoImpar'}
shead = [tr.text for tr in table.find_all('tr', **kwarg)]
kwarg = {'class': 'tdlimpoPar'}
sbody = [tr.text for tr in table.find_all('tr', **kwarg)]
# clean up headers
shead = [re.sub(self.regex0, '', i) for i in shead]
shead = [re.sub(self.regex1, '', i) for i in shead]
shead = [re.sub(self.regex2, '', i) for i in shead]
shead = [re.sub(self.regex3,' ', i) for i in shead]
shead = [i.strip() for i in shead]
# clean up body
sbody = [re.sub(self.regex0, '', i) for i in sbody]
sbody = [re.sub(self.regex1, '', i) for i in sbody]
sbody = [re.sub(self.regex2, '', i) for i in sbody]
sbody = [re.sub(self.regex3,' ', i) for i in sbody]
sbody = [i.strip() for i in sbody]
# assign updates to dictionary
if len(shead) == len(sbody):
details = {'shead': shead, 'sbody': sbody}
else:
sbody = [i + ' ' + j for i, j in zip(sbody[::2], sbody[1::2])]
details = {'shead': shead, 'sbody': sbody}
# return dictionary of information
return details
#4 parse related cases
def parse_related_cases(self):
""" method to wrangle case decisions """
### initial objects for parser
# try catch error if table doesn't exist
try:
tables = self.tables[2:]
# define regex to find table title
regex3 = re.compile('apensad', re.IGNORECASE)
regex4 = re.compile(r'\n', re.IGNORECASE)
# find the position of tables with decisions
decisions = [i for i in range(len(tables)) if \
re.search(regex3, tables[i].td.get_text())]
# define empty list of docs
relatedcases = []
# for loop finding references to all related cases
for tr in tables[decisions[0]].find_all('tr')[1:]:
td = [td.text for td in tr.find_all('td')]
relatedcases.append(td)
# find url just in case and subset the duplicates to unique values
url = [a['href'] for a in tables[decisions[0]].find_all('a')]
url = [x for x, y in zip(url, range(len(url))) if int(y) % 2 != 0]
# append link at the end of the table
for x, i in zip(range(len(relatedcases[1:])), range(len(url))):
relatedcases[x + 1].append(url[i])
# build corrected dataset
relatedcases = pd.DataFrame(relatedcases[1:])
# remove weird characters
relatedcases = relatedcases.replace(self.regex0, ' ', regex = True)
relatedcases = relatedcases.replace(self.regex1, ' ', regex = True)
relatedcases = relatedcases.replace(self.regex2, ' ', regex = True)
relatedcases = relatedcases.replace(' +', ' ', regex = True)
# assign column names
relatedcases.columns = ['casetype', 'casenumber', 'caseurl']
# return outcome
return pd.DataFrame(relatedcases)
# throw error if table is not available
except:
return 'There are related cases here.'
#5 parse related documents
def parse_related_docs(self):
""" method to parse related docs into a single dataset """
### initial objects for parser
# try catch error if table doesn't exist
try:
# isolate updates and further tables
tables = self.tables[2:]
# define regex to find table title
regex3 = re.compile('Documentos', re.IGNORECASE)
regex4 = re.compile(r'\n', re.IGNORECASE)
# find the position of tables with decisions
decisions = [i for i in range(len(tables)) if \
re.search(regex3, tables[i].td.get_text())]
# define empty list of docs
docs = []
# for loop finding references to all docs
for tr in tables[decisions[0]].find_all('tr')[1:]:
td = [td.text for td in tr.find_all('td')]
docs.append(td)
# build corrected dataset
docs = pd.DataFrame(docs[1:])
# remove weird characters
docs = docs.replace(self.regex0, ' ', regex = True)
docs = docs.replace(self.regex1, ' ', regex = True)
docs = docs.replace(self.regex2, ' ', regex = True)
docs = docs.replace(' +', ' ', regex = True)
# assign column names
docs.columns = ['reference', 'type']
# return outcome
return pd.DataFrame(docs)
# throw error if table is not available
except:
return 'There are no related docs here.'
#6 return full table
def parse_all(self):
""" method to parse all tables into a single dataset """
### call other parser functions
# parse tables we know exist
table1 = self.parse_summary(transpose = True)
table2 = self.parse_updates()
# insert column for identifying case information (updates)
table2.insert(0, 'caseinfo', 'updates')
# parse tables we are not sure exist
# try catch if tables don't exist
# table three
try:
# parse case details table
table3 = self.parse_details()
# insert column for identifying case information (details)
table3.insert(0, 'caseinfo', 'details')
# bind onto previous tables
table2 = pd.concat(
[table2, table3], axis = 0, ignore_index = True, sort = False
)
# skip error if table doesn't exist
except:
pass
# table four
try:
# parse related cases table
table4 = self.parse_related_cases()
# insert column for identifying case information (related cases)
table4.insert(0, 'caseinfo', 'relatedcases')
# bind onto previous tables
table2 = pd.concat(
[table2, table4], axis = 0, ignore_index = True, sort = False
)
# skip error if table doesn't exist
except:
pass
# table five
try:
# parse related docs table
table5 = self.parse_related_docs()
# insert column for identifying case information (related docs)
table5.insert(0, 'caseinfo', 'relateddocs')
# bind onto previous tables
table2 = pd.concat(
[table2, table5], axis = 0, ignore_index = True, sort = False
)
# skip error if table doesn't exist
except:
pass
# create list of column names
names = list(table1)
names.extend(list(table2))
# bind everything together
table = pd.concat([table1]*len(table2), ignore_index = True)
table = pd.concat([table, table2], axis = 1, ignore_index = True)
# reassign column names
table.columns = names
# reorder table columns
ordered = [names[9]]
ordered.extend(names[0:8])
ordered.extend(names[10:])
# change order of columns
table = table[ordered]
# return outcome
return table
| 34.235955
| 79
| 0.58079
|
b6acef5a6cec12d411cc6506677183b2c43a63a3
| 2,659
|
py
|
Python
|
roles/openshift_health_checker/test/ovs_version_test.py
|
KoteikinyDrova/openshift-ansible
|
3db2bb10c0ad5e7ed702bfccdec03562533e8539
|
[
"Apache-2.0"
] | 1
|
2019-03-13T10:14:35.000Z
|
2019-03-13T10:14:35.000Z
|
roles/openshift_health_checker/test/ovs_version_test.py
|
KoteikinyDrova/openshift-ansible
|
3db2bb10c0ad5e7ed702bfccdec03562533e8539
|
[
"Apache-2.0"
] | 1
|
2021-09-23T23:36:29.000Z
|
2021-09-23T23:36:29.000Z
|
roles/openshift_health_checker/test/ovs_version_test.py
|
KoteikinyDrova/openshift-ansible
|
3db2bb10c0ad5e7ed702bfccdec03562533e8539
|
[
"Apache-2.0"
] | 4
|
2018-10-27T00:29:24.000Z
|
2022-01-07T07:39:51.000Z
|
import pytest
from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
def test_openshift_version_not_supported():
def execute_module(*_):
return {}
openshift_release = '111.7.0'
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
)
with pytest.raises(OpenShiftCheckException) as excinfo:
OvsVersion(execute_module, task_vars).run()
assert "no recommended version of Open vSwitch" in str(excinfo.value)
def test_invalid_openshift_release_format():
def execute_module(*_):
return {}
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
openshift_image_tag='v0',
openshift_deployment_type='origin',
)
with pytest.raises(OpenShiftCheckException) as excinfo:
OvsVersion(execute_module, task_vars).run()
assert "invalid version" in str(excinfo.value)
@pytest.mark.parametrize('openshift_release,expected_ovs_version', [
("3.5", "2.6"),
("3.6", "2.6"),
("3.4", "2.4"),
("3.3", "2.4"),
("1.0", "2.4"),
])
def test_ovs_package_version(openshift_release, expected_ovs_version):
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
return_value = object()
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
assert "package_list" in module_args
for pkg in module_args["package_list"]:
if pkg["name"] == "openvswitch":
assert pkg["version"] == expected_ovs_version
return return_value
result = OvsVersion(execute_module, task_vars).run()
assert result is return_value
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
(['masters'], False, True),
# ensure check is skipped on containerized installs
(['masters'], True, False),
(['nodes'], False, True),
(['masters', 'nodes'], False, True),
(['masters', 'etcd'], False, True),
([], False, False),
(['etcd'], False, False),
(['lb'], False, False),
(['nfs'], False, False),
])
def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
task_vars = dict(
group_names=group_names,
openshift=dict(common=dict(is_containerized=is_containerized)),
)
assert OvsVersion(None, task_vars).is_active() == is_active
| 30.563218
| 93
| 0.668296
|
3f580dde470f2245e8ee507d4b55a7ef25538f22
| 1,128
|
py
|
Python
|
venv/Lib/site-packages/networkx/algorithms/tree/tests/test_operations.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 10,024
|
2015-01-01T13:06:43.000Z
|
2022-03-31T12:45:25.000Z
|
venv/Lib/site-packages/networkx/algorithms/tree/tests/test_operations.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 3,191
|
2015-01-01T18:13:11.000Z
|
2022-03-31T22:06:00.000Z
|
venv/Lib/site-packages/networkx/algorithms/tree/tests/test_operations.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 3,272
|
2015-01-01T05:04:53.000Z
|
2022-03-31T17:46:35.000Z
|
"""Unit tests for the :mod:`networkx.algorithms.tree.operations` module.
"""
import networkx as nx
from networkx.utils import nodes_equal, edges_equal
class TestJoin:
"""Unit tests for the :func:`networkx.tree.join` function."""
def test_empty_sequence(self):
"""Tests that joining the empty sequence results in the tree
with one node.
"""
T = nx.join([])
assert len(T) == 1
assert T.number_of_edges() == 0
def test_single(self):
"""Tests that joining just one tree yields a tree with one more
node.
"""
T = nx.empty_graph(1)
actual = nx.join([(T, 0)])
expected = nx.path_graph(2)
assert nodes_equal(list(expected), list(actual))
assert edges_equal(list(expected.edges()), list(actual.edges()))
def test_basic(self):
"""Tests for joining multiple subtrees at a root node."""
trees = [(nx.full_rary_tree(2, 2 ** 2 - 1), 0) for i in range(2)]
actual = nx.join(trees)
expected = nx.full_rary_tree(2, 2 ** 3 - 1)
assert nx.is_isomorphic(actual, expected)
| 29.684211
| 73
| 0.609043
|
ddd3b35cf94592edb9a7321bb83b41ff4d23626a
| 4,542
|
py
|
Python
|
support_functions/compute_mAP.py
|
piyalong/TAMIDS_Traffic_Management
|
0dfa3e6ae2ae018a469b5719468021888c5d3ca2
|
[
"MIT"
] | null | null | null |
support_functions/compute_mAP.py
|
piyalong/TAMIDS_Traffic_Management
|
0dfa3e6ae2ae018a469b5719468021888c5d3ca2
|
[
"MIT"
] | null | null | null |
support_functions/compute_mAP.py
|
piyalong/TAMIDS_Traffic_Management
|
0dfa3e6ae2ae018a469b5719468021888c5d3ca2
|
[
"MIT"
] | null | null | null |
import numpy as np
def iou (boxes1, boxes2):
'''
boxes1: m x 4 numpy array
boxes2: n x 4 numpy array
'''
boxes1 = np.array(boxes1, dtype='float32')
boxes2 = np.array(boxes2, dtype='float32')
m = boxes1.shape[0] # number of boxes1
n = boxes2.shape[0] # number of boxes2
boxes1_area = (boxes1[:,2]-boxes1[:,0])*(boxes1[:,3]-boxes1[:,1])
boxes1_area = boxes1_area.repeat(n).reshape((m,n)) # converts to mxn matrix
boxes2_area = (boxes2[:,2]-boxes2[:,0])*(boxes2[:,3]-boxes2[:,1])
boxes2_area = np.tile(boxes2_area, (1,m)).reshape((m,n)) # converts to mxn matrix
boxes1 = np.tile(boxes1, (1,n)).reshape((m,n,4))
boxes2 = np.tile(boxes2, (m,1)).reshape((m,n,4))
top = np.maximum(boxes1[:,:,:2],boxes2[:,:,:2])
bot = np.minimum(boxes1[:,:,2:],boxes2[:,:,2:])
diff = bot - top
diff[diff<0] = 0
intersection_area = diff[:,:,0] * diff[:,:,1]
union_area = boxes1_area + boxes2_area - intersection_area
# avoid division by zero
idx = np.logical_or(boxes1_area==0, boxes2_area==0)
union_area[idx] = 1
return intersection_area/union_area
def is_TP (ious, iou_threshold=0.5):
'''
INPUT:
m x n numpy array.
- IoU between m detected boxes and n groud truth boxes
- m detected boxes are sorted in descending order of confidence
OUTPUT:
m x 1 boolean array
- indicates if corresponding detected box is true positve
'''
m, n = ious.shape
result = np.zeros(m,dtype=bool) # to store the result
for i in range(m):
idx = np.argmax( ious[i,:] ) # index of the max iou
if ious[i,idx] >= iou_threshold:
result[i] = True
ious[:,idx] = -1 # turn off the ground truth box that is already detected
return result
def evaluate (groundtruths, detections, included_class_names):
'''
groundtruths['image_name']:
shape = (m, 1+4)
[class_id, x0, y0, x1, y1]
detections['image_name'] :
shape=(n,1+4+1)
[class_id, x0, y0, x1, y1, confidence]
'''
auc = {c: 0 for c in included_class_names}
precision = {c:[] for c in included_class_names}
recall = {c:[] for c in included_class_names}
real_precision = {c:0 for c in included_class_names}
real_recall = {c:0 for c in included_class_names}
for c in included_class_names:
detections_tps = np.array([])
detections_confs = []
num_gt = 0
num_dt = 0
for i in groundtruths:
if groundtruths[i]==0:continue
bx_gt = np.array(groundtruths[i])
bx_gt = bx_gt[ bx_gt[:,0] == c,: ]
num_gt += len(bx_gt)
bx_dt = np.array(detections[i])
if bx_dt.shape[0] == 0: continue
bx_dt = bx_dt[ bx_dt[:,0]==c,: ]
num_dt = num_dt + len(bx_dt)
if bx_dt.shape[0] == 0: continue
if bx_gt.shape[0] != 0:
ious = iou(bx_dt[:,1:5] , bx_gt[:,1:5] )
tps = is_TP(ious)
else:
tps = np.zeros(len(bx_dt))
confs = bx_dt[:,-1]
detections_tps = np.append(detections_tps,tps)
detections_confs = np.append(detections_confs,confs)
# sort
idc = np.argsort(detections_confs)[::-1]
detections_tps = detections_tps[idc]
num_tp = 0
for i, tp in enumerate(detections_tps):
if tp: num_tp += 1
recall[c].append( num_tp/num_gt+0.000000000001 )
precision[c].append( num_tp/(i+1) )
if num_gt==0 or len(bx_dt)==0:
continue
else:
real_precision[c] = num_tp/(num_dt)
real_recall[c] = num_tp/(num_gt)
for i in range(len(precision[c])):
precision[c][i] = max(precision[c][i:])
for i in range(1,len(precision[c])):
auc[c] += precision[c][i] * ( recall[c][i]-recall[c][i-1] )
for c in included_class_names:
# recall[c].append(recall[c][-1])
recall[c].append(1.0)
# precision[c].append(0.0)
precision[c].append(0.0)
# real_auc ={}
# for each in auc:
# if auc[each]>0.01:
# real_auc.update({each:auc[each]})
real_auc=auc
m_a_p = sum(real_auc.values())/len(real_auc)
return m_a_p, real_auc, precision, recall,real_precision,real_recall
| 33.153285
| 85
| 0.549317
|
848ce1334fc63df1de6c30f5dcd1f0a397f8083e
| 153
|
py
|
Python
|
docassemble_base/docassemble/base/rtfng/exceptions.py
|
amsclark/docassemble
|
ae5c194831faabb52681a6c827ec30c106273eb7
|
[
"MIT"
] | 5
|
2015-01-27T08:08:08.000Z
|
2022-03-30T10:43:31.000Z
|
docassemble_base/docassemble/base/rtfng/exceptions.py
|
amsclark/docassemble
|
ae5c194831faabb52681a6c827ec30c106273eb7
|
[
"MIT"
] | null | null | null |
docassemble_base/docassemble/base/rtfng/exceptions.py
|
amsclark/docassemble
|
ae5c194831faabb52681a6c827ec30c106273eb7
|
[
"MIT"
] | 4
|
2015-12-11T07:41:24.000Z
|
2022-03-30T09:48:55.000Z
|
"""
pyrtf-ng Errors and Exceptions
"""
class RTFError(Exception):
pass
class ParseError(RTFError):
"""
Unable to parse the RTF data.
"""
| 11.769231
| 32
| 0.640523
|
2ae16b5db00ad76f88b85a2b42480ce6ae441da9
| 140
|
py
|
Python
|
peeros/peeros.py
|
crioto/guardian
|
386f3f7959e7abf73fddfc62907abb8b308f6ce6
|
[
"Apache-2.0"
] | null | null | null |
peeros/peeros.py
|
crioto/guardian
|
386f3f7959e7abf73fddfc62907abb8b308f6ce6
|
[
"Apache-2.0"
] | null | null | null |
peeros/peeros.py
|
crioto/guardian
|
386f3f7959e7abf73fddfc62907abb8b308f6ce6
|
[
"Apache-2.0"
] | null | null | null |
from guardlib import check as t
from peeros import agent_installed
t.RegisterCheck(agent_installed.Check, "Checking Subutai Installation")
| 35
| 71
| 0.842857
|
e014ca208c3bf0d84d2b37fa760e5632df2d4411
| 9,889
|
py
|
Python
|
uhppote_rfid/controller_socket.py
|
andrewvaughan/rfid
|
ff6e78074a3030dbb43b40fa47198f28e38b6026
|
[
"Apache-2.0"
] | 6
|
2017-12-06T21:36:05.000Z
|
2022-01-10T11:25:44.000Z
|
uhppote_rfid/controller_socket.py
|
andrewvaughan/uhppote-rfid
|
ff6e78074a3030dbb43b40fa47198f28e38b6026
|
[
"Apache-2.0"
] | 33
|
2017-09-16T19:01:47.000Z
|
2017-12-05T16:15:17.000Z
|
uhppote_rfid/controller_socket.py
|
andrewvaughan/rfid
|
ff6e78074a3030dbb43b40fa47198f28e38b6026
|
[
"Apache-2.0"
] | 3
|
2018-01-08T19:05:33.000Z
|
2020-09-24T12:58:04.000Z
|
# -*- coding: utf-8 -*-
"""
Provides socket and communication support for UHPPOTE RFID control boards.
:copyright: (c) 2017 by Andrew Vaughan.
:license: Apache 2.0, see LICENSE for more details.
.. module:: ControllerSocket
"""
import logging
import re
import socket
class ControllerSocket(object):
"""
Manages socket communication and transport for UHPPOTE RFID boards.
.. class:: ControllerSocket
.. versionadded:: 0.1.0
"""
def __init__(self, host, port=60000):
"""
Initialize a new Socket given an IP address and port for the control board.
:param host: the hostname or IP address of the control board
:type host: str
:param port: the port of the control board (default: 60000)
:type port: int
:raises ValueError: if provided an invalid host or port
.. versionadded:: 0.1.0
.. function:: __init__(host, port)
"""
self.logger = logging.getLogger("UHPPOTE.ControllerSocket")
self.setHost(host)
self.setPort(port)
self.connected = False
self.logger.debug("Creating socket on %s:%d (not connected)" % (self.getHost(), self.getPort()))
self.socket = socket.socket(
socket.AF_INET,
socket.SOCK_STREAM
)
def connect(self, attempts=3):
"""
Attempt to connect to the target as-configured.
:param attempts: the number of times to retry connecting before throwing an exception (default: 3)
:type attempts: int
:raises ValueError: if attempts is below 1
:raises SocketConnectionException: if unable to connect after the prescribed number of retries
.. versionadded:: 0.1.0
.. function:: connect()
"""
self.logger.debug("Connecting to %s:%d via socket" % (self.host, self.port))
if int(attempts) <= 0:
raise ValueError("Invalid number of attempts for socket connection: %d" % int(attempts))
for attempt in range(1, attempts + 1):
self.logger.debug("Attempt #%d..." % attempt)
try:
self.socket.connect((self.host, self.port))
self.connected = True
self.logger.debug("Connection successful.")
return
except Exception, e:
self.logger.warn("Connection attempt #%d to %s:%d unsuccessful. Error message: %s" % (attempt, self.host, self.port, str(e)))
pass
raise SocketConnectionException("Unable to connect to %s:%d after %d attempts." % (self.host, self.port, int(attempts)))
def close(self):
"""
Attempt to close the open connection.
:returns: True if the socket was closed, or False if it remains open
:rtype: bool
.. versionadded:: 0.1.0
.. function:: close()
"""
self.logger.debug("Closing socket...")
self.socket.close()
self.connected = False
def send(self, msg):
"""
Send a message through a connected socket.
:param msg: the message to send through the socket
:type msg: str or bytearray or bytes
:raises ValueError: if the message being sent is in an invalid format
:raises SocketConnectionException: if the socket does not have a working connection
:raises SocketTransmitException: if the socket connection is broken during transmission
.. versionadded:: 0.1.0
.. function:: send(msg)
"""
if not isinstance(msg, (str, bytes, bytearray)):
raise ValueError("Invalid message sent to socket. Expected str, bytes, or bytearray; received %s." % type(msg))
messageLength = len(msg)
if messageLength <= 0:
raise ValueError("Expected message to be sent. Received blank message.")
if not self.isConnected():
raise SocketConnectionException("Socket not connected. Cannot send.")
self.logger.debug("Attempting to send message through socket of length %d." % messageLength)
self.logger.log(1, str(msg))
byteCount = 0
while byteCount < messageLength:
sent = self.socket.send(msg[byteCount:])
if sent == 0:
raise SocketTransmitException("Connection broken.")
self.logger.log(1, "%d bytes sent in chunk..." % sent)
byteCount += sent
self.logger.debug("Send complete (%d bytes)." % byteCount)
def receive(self, size=64):
"""
Receive a message through a connected socket. Will block I/O until enough bytes to get `size` are returned.
:param size: the size, in bytes, expected for the incoming message
:type size: int
:returns: the received message
:rtype: bytearray
:raises ValueError: if the size is not a positive multiple of 8
:raises SocketConnectionException: if the socket does not have a working connection
:raises SocketTransmitException: if the socket connection is broken during transmission
.. versionadded:: 0.1.0
.. function:: receive()
"""
self.logger.debug("Listening for message via socket of length %s..." % str(size))
if isinstance(size, str):
if not size.isdigit():
raise ValueError("Invalid size. Non-Integer string provided: \"%s\"." % size)
size = int(size)
if not isinstance(size, (int, long)):
raise ValueError("Invalid size. Expected positive integer; received \"%s\"." % type(size))
if size <= 0:
raise ValueError("Packet size must be a positive integer; received \"%d\"." % size)
if size % 8 != 0:
raise ValueError("Packet size must be a multiple of 8; received \"%d\"." % size)
if not self.isConnected():
raise SocketConnectionException("Socket not connected. Cannot send.")
received = bytearray()
received_bytes = 0
while received_bytes < size:
chunk = self.socket.recv(min(size, 2048))
if chunk == '':
raise SocketTransmitException("Unexpected end of connection. Received %d bytes, but expected %d." % (received_bytes, size))
received.extend(chunk)
received_bytes += len(chunk)
self.logger.debug(1, "%d bytes received in chunk..." % len(chunk))
return received
def getHost(self):
"""
Return the hostname of the socket.
:returns: the hosthame of the socket
:rtype: str
.. versionadded:: 0.1.0
.. function:: getHost()
"""
return self.host
def setHost(self, host):
"""
Set the hostname for the socket.
:param host: the hostname for the socket
:type host: str or bytearray
:raises ValueError: if provided an invalid host
.. versionadded: 0.1.0
.. function:: setHost(host)
"""
if not isinstance(host, (str, bytearray)):
raise ValueError("Invalid host provided. Expected string or bytearray; received %s." % type(host))
if isinstance(host, bytearray):
if len(host) != 4:
raise ValueError("Invalid host provided. Bytearray must contain 4 values; received %d values." % len(host))
host = "%d.%d.%d.%d" % (host[0], host[1], host[2], host[3])
if len(host) <= 0 or len(host) > 255:
raise ValueError("Invalid host provided. Length cannot be 0 or longer than 255 characters; received \"%s\"." % host)
# Valid hostnames can have one dot at the end; strip it if it exists
if host[-1] == ".":
host = host[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
if not all(allowed.match(c) for c in host.split(".")):
raise ValueError("Invalid host provided. Received \"%s\"." % host)
self.host = host
def getPort(self):
"""
Return the port for the socket.
:returns: the port for the socket
:rtype: int
.. versionadded:: 0.1.0
.. function:: getPort()
"""
return self.port
def setPort(self, port):
"""
Set the port for the socket.
:param port: the port for the socket
:type port: int
:raises ValueError: if provided an invalid port
.. versionadded:: 0.1.0
.. function:: setPort(port)
"""
if isinstance(port, str):
if not port.isdigit():
raise ValueError("Invalid port. Non-Integer string provided: \"%s\"." % port)
elif not isinstance(port, (int, long)):
raise ValueError("Invalid port. Expected positive integer; received \"%s\"." % type(port))
if int(port) <= 0:
raise ValueError("Invalid port. Expected positive integer; received \"%d\"." % port)
if int(port) > 65535:
raise ValueError("Invalid port. Exceeds maximum of 65535; received \"%d\"." % port)
self.port = int(port)
def isConnected(self):
"""
Return whether the socket is currently connected to a server.
:returns: whether the socket is currently connected to a server
:rtype: bool
.. versionadded: 0.1.0
.. function:: isConnected()
"""
return self.connected
class SocketConnectionException(Exception):
"""
Custom exception raised if a socket fails to connect to its target.
.. versionadded:: 0.1.0
"""
pass
class SocketTransmitException(Exception):
"""
Custom exception raised if a problem occurs during transmission.
.. versionadded:: 0.1.0
"""
pass
| 31.195584
| 142
| 0.586713
|
22a1cdaf5496c0529186c45471bff77025a7eb64
| 1,150
|
py
|
Python
|
cleaner20200517.py
|
superlova/WeatherSpider
|
fa8ed79f780d8ef065a41208ed7e80f94581fac4
|
[
"MIT"
] | 1
|
2020-05-26T01:51:13.000Z
|
2020-05-26T01:51:13.000Z
|
cleaner20200517.py
|
superlova/WeatherSpider
|
fa8ed79f780d8ef065a41208ed7e80f94581fac4
|
[
"MIT"
] | null | null | null |
cleaner20200517.py
|
superlova/WeatherSpider
|
fa8ed79f780d8ef065a41208ed7e80f94581fac4
|
[
"MIT"
] | null | null | null |
import pymysql
def connect_sql():
conn = pymysql.connect(host='localhost', user='root', password='xn410a', db='zyt_spiders', charset='utf8')
return conn
def delete_old_items(conn, table_name, time):
cursor = conn.cursor()
sql = 'delete from {} where record_time < "{}";'.format(table_name, time)
#print(sql)
cursor.execute(sql)
sql_optimize = 'optimize table {}'.format(table_name)
cursor.execute(sql_optimize)
if __name__ == "__main__":
conn = connect_sql()
table_name_list = ["caiyun_15d_weather",
"caiyun_48h_weather",
"caiyun_real_weather",
"heweather_3d",
"heweather_real",
"nmc_2d_weather",
"nmc_real_weather",
"openweather_5d",
"openweather_real",
"seniverse_15d_weather",
"seniverse_real_weather"]
for table_name in table_name_list:
delete_old_items(conn=conn, table_name=table_name, time="2020-05-17 00:00:00")
print(table_name + " is cleaned.")
| 34.848485
| 110
| 0.568696
|
acb28bfe73ee9b94496cc101fa7c765014855c4e
| 163
|
py
|
Python
|
PyPortal_NeoPixel_Color_Picker/secrets.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
PyPortal_NeoPixel_Color_Picker/secrets.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
PyPortal_NeoPixel_Color_Picker/secrets.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
# This file is where you keep secret settings, passwords, and tokens!
# If you put them in the code you risk committing that info or sharing it
secrets = {
}
| 27.166667
| 73
| 0.723926
|
03421233341f21e8b5b50f7bb6921e54b5610889
| 1,230
|
py
|
Python
|
lab_6/GSDLabs-master/lab_5/boards/views.py
|
jennifernolan/Software-for-the-Global-Market
|
9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8
|
[
"MIT"
] | null | null | null |
lab_6/GSDLabs-master/lab_5/boards/views.py
|
jennifernolan/Software-for-the-Global-Market
|
9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8
|
[
"MIT"
] | null | null | null |
lab_6/GSDLabs-master/lab_5/boards/views.py
|
jennifernolan/Software-for-the-Global-Market
|
9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from .forms import NewTopicForm
from .models import Board, Topic, Post
def home(request):
boards = Board.objects.all()
#Return the page home.html
return render(request, 'home.html', {'boards': boards})
def board_topics(request, pk):
board = get_object_or_404(Board, pk=pk)
return render(request, 'topics.html', {'board': board})
def new_topic(request, pk):
board = get_object_or_404(Board, pk=pk)
user = User.objects.first() # TODO: get the currently logged in user
if request.method == 'POST':
form = NewTopicForm(request.POST)
if form.is_valid():
topic = form.save(commit=False)
topic.board = board
topic.starter = user
topic.save()
post = Post.objects.create(
message=form.cleaned_data.get('message'),
topic=topic,
created_by=user
)
return redirect('board_topics', pk=board.pk) # TODO: redirect to the created topic page
else:
form = NewTopicForm()
return render(request, 'new_topic.html', {'board': board, 'form': form})
| 38.4375
| 100
| 0.635772
|
9f9ab3f3193c9f62980d69cad064630cc878980d
| 11,221
|
py
|
Python
|
django/app/views.py
|
b3j0f/debate
|
a77b61a917bb7a2ebfbc16d28211fcb1634089b8
|
[
"MIT"
] | 1
|
2017-05-04T13:50:54.000Z
|
2017-05-04T13:50:54.000Z
|
django/app/views.py
|
b3j0f/debate
|
a77b61a917bb7a2ebfbc16d28211fcb1634089b8
|
[
"MIT"
] | null | null | null |
django/app/views.py
|
b3j0f/debate
|
a77b61a917bb7a2ebfbc16d28211fcb1634089b8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""View module."""
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from core.models import Account, Topic, Space, Vote, Stat, Event
from .utils import sendemail
from uuid import uuid4 as uuid
def requirelogin(func=None):
"""Decorator for requiring login."""
nextpage = func.__name__[:-len('view')]
def _requirelogin(request, *args, **kwargs):
"""Local require login."""
if isinstance(request.user, User):
return func(request, *args, **kwargs)
else:
return redirect('login.html?next={0}'.format(nextpage))
return _requirelogin
def basecontext(request, page='home', tableofcontents=False):
"""Get base context.
:rtype: dict
"""
spacecount = Space.objects.count()
topiccount = Topic.objects.count()
accountcount = Account.objects.count()
votecount = Vote.objects.count()
eventcount = Event.objects.count()
result = {
'spacecount': spacecount, 'topiccount': topiccount,
'eventcount': eventcount,
'votecount': votecount, 'accountcount': accountcount,
'page': page,
'tableofcontents': tableofcontents,
'next': request.GET.get('next', page),
'host': settings.HOST, 'api': settings.API,
'DEBUG': settings.DEBUG
}
return result
def rendernextpage(request, context):
"""Redirect to the nextage."""
nextpage = context.pop('next', 'home') or 'home'
return render(request, '{0}.html'.format(nextpage), context=context)
def loginview(request):
"""Login view."""
username = email = request.POST.get('email')
context = basecontext(request, 'login')
result, user = None, None
if email is not None:
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is None:
try:
user = User.objects.get(email=email, username=username)
except User.DoesNotExist:
user = User(email=email, username=username)
user.set_password(password)
user.save()
account = Account(user=user)
account.save()
else:
context['errors'] = ['Mauvais mot de passe !']
context['csrf_token'] = request.POST['csrfmiddlewaretoken']
context['username'] = username
context['email'] = email
user = None
if user is None:
result = render(request, 'login.html', context)
else:
login(request, user, 'django.contrib.auth.backends.ModelBackend')
result = redirect('/{0}'.format(request.GET.get('next', '')))
return result
def logoutview(request):
"""Login view."""
logout(request)
context = basecontext(request)
context['successes'] = ['Vous êtes déconnecté !']
return redirect('/{0}'.format(request.GET.get('next', '')))
def resetpwdview(request):
"""Reset password view."""
result = None
lost_key = request.GET.get('lost_key', request.POST.get('lost_key'))
context = basecontext(request, 'resetpwd')
if lost_key is None:
result = render(request, 'resetpwd.html', context=context)
else:
context['lost_key'] = lost_key
email = request.GET.get('email', request.POST.get('email'))
if email is None:
context['errors'] = ['Email manquant !']
context['page'] = 'home'
result = render(request, 'home.html', context=context)
else:
context['email'] = email
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
context['errors'] = [
'Email {0} non enregistré !'.format(email)
]
context['page'] = 'home'
result = render(request, 'home.html', context=context)
else:
account = user.account
password = request.POST.get('password')
if 'email' in request.GET:
result = render(request, 'resetpwd.html', context=context)
elif password is None:
lost_key = str(uuid())
account.lost_key = lost_key
account.save()
url = '{0}/resetpwd?lost_key={1}&email={2}'.format(
settings.HOST, lost_key, email
)
subject = 'Réinitialiser le mot de passe de lechangement'
msg = 'Réinitialiser le mot de passe de : {0}'.format(url)
html = '<a href="{0}">Changer mot de passe !</a>'.format(
url
)
sendemail(subject, msg, html, email)
context['successes'] = [
'Changement de mot de passe envoyé !'.format(email)
]
result = render(request, 'resetpwd.html', context=context)
else:
context['lost_key'] = lost_key
if 'lost_key' in request.POST: # reset password
password = request.POST['password']
account.user.set_password(password)
account.lost_key = None
account.save()
account.user.save()
login(
request, user,
'django.contrib.auth.backends.ModelBackend'
)
context['successes'] = ['Mot de passe changé !']
context['page'] = 'home'
result = rendernextpage(request, context=context)
elif 'lost_key' in request.GET: # reset form
result = render(
request, 'resetpwd.html', context=context
)
return result
def getuserid(request):
"""Get user id from input request.
If user is authenticated, get user id. Otherwise, get
request.COOKIES.get('userid', uuid()).
"""
if request.user.is_authenticated():
result = request.user.id
else:
result = request.COOKIES.get('userid', uuid())
return result
def appcontext(request, page='home', tableofcontents=False):
"""Get app context.
:rtype: dict
"""
result = basecontext(request, page, tableofcontents)
result['admins'] = [Account.objects.filter(administrated__isnull=False)]
if page[-1] == 's':
result['type'] = page[:-1]
return result
def searchview(request, model):
"""Search view."""
page = '{0}s'.format(model.__name__.lower())
context = appcontext(request, page=page, tableofcontents=True)
return render(request, 'search.html', context=context)
def spacesview(request):
"""View of spaces."""
return searchview(request, Space)
def eventsview(request):
"""View of events."""
return searchview(request, Event)
def topicsview(request):
"""View of Topics."""
return searchview(request, Topic)
def votesview(request):
"""View of votes."""
context = appcontext(request, page='votes', tableofcontents=True)
return render(request, 'search.html', context=context)
def accountsview(request):
"""View of accounts."""
context = appcontext(request, page='accounts', tableofcontents=True)
return render(request, 'search.html', context=context)
@requirelogin
def accountview(request):
"""Account view."""
context = appcontext(request, 'account', True)
return render(request, 'account.html', context=context)
def homeview(request):
"""Home view."""
context = basecontext(request, 'home')
return render(request, 'home.html', context=context)
@requirelogin
def topicview(request):
"""Topic view."""
return editview(request, 'topic')
@requirelogin
def eventview(request):
"""Event view."""
return editview(request, 'event')
@requirelogin
def spaceview(request):
"""Space view."""
return editview(request, 'space')
def editview(request, page):
"""Edit space/topic."""
context = appcontext(
request, page=page, tableofcontents=False
)
if 'id' not in request.POST:
instance = globals()[page.title()]()
else:
instance = globals()[page.title()].objects.get(id=request.POST['id'])
def filldefaults(*names):
"""Fill defaults."""
for name in names:
if name in request.POST:
val = request.POST[name]
setattr(instance, name, val)
if page == 'comment':
filldefaults('content', 'cited')
else:
filldefaults('name', 'description', 'public')
admins = request.POST.get('admins', '')
admins = admins.split(',') if admins else []
if request.user.id not in admins:
admins.append(request.user.id)
instance.save()
instance.admins.set(admins)
if page == 'space':
filldefaults('address', 'lon', 'lat')
instance.save()
page = request.POST.get('next', 'search')
return render(request, 'edit.html', context=context)
def faqview(request):
"""Faq view."""
context = basecontext(request, 'faq')
return render(request, 'faq.html', context=context)
def aboutview(request):
"""About view."""
context = basecontext(request, 'about', True)
return render(request, 'about.html', context=context)
def statsview(request):
"""Stat view."""
context = basecontext(request, 'stats', True)
context['stats'] = Stat.objects.all()
context['eventcount'] = Event.objects.count()
context['usercount'] = Account.objects.filter(uses=None).count()
return render(request, 'stats.html', context=context)
def mytopicsview(request):
"""My Topics view."""
context = appcontext(request, 'mytopics')
return render(request, 'mysearch.html', context=context)
def myvotesview(request):
"""My votes view."""
context = appcontext(request, 'myvotes')
return render(request, 'mysearch.html', context=context)
def myspacesview(request):
"""My spaces view."""
context = appcontext(request, 'myspaces')
return render(request, 'mysearch.html', context=context)
def myeventsview(request):
"""My events view."""
context = appcontext(request, 'myevents')
return render(request, 'mysearch.html', context=context)
def mycommentsview(request):
"""My comments view."""
context = appcontext(request, 'mycomments')
return render(request, 'mysearch.html', context=context)
def mystatsview(request):
"""Stat view."""
context = basecontext(request, 'stats', True)
context['stats'] = Stat.objects.all()
context['eventcount'] = Event.objects.count()
context['usercount'] = Account.objects.filter(uses=None).count()
return render(request, 'stats.html', context=context)
| 28.552163
| 78
| 0.586935
|
992ceaaf4fc3d8f4dd9582df934865feb62b5aa4
| 29,527
|
py
|
Python
|
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 25
|
2019-12-04T03:09:55.000Z
|
2022-03-08T10:52:06.000Z
|
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 29
|
2019-12-04T03:00:39.000Z
|
2022-03-02T06:25:44.000Z
|
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 33
|
2019-12-04T02:51:30.000Z
|
2022-03-24T02:47:38.000Z
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import utils
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
import re
from ambari_commons.os_check import OSCheck
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version, get_major_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions import get_klist_path
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
from resource_management.libraries.functions.namenode_ha_utils import get_properties_for_all_nameservices, namenode_federation_enabled
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
architecture = get_architecture()
stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
upgrade_direction = default("/commandParams/upgrade_direction", None)
rolling_restart = default("/commandParams/rolling_restart", False)
rolling_restart_safemode_exit_timeout = default("/configurations/cluster-env/namenode_rolling_restart_safemode_exit_timeout", None)
stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
major_stack_version = get_major_version(stack_version_formatted)
agent_stack_retry_on_unavailability = config['ambariLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count", int)
# there is a stack upgrade which has not yet been finalized; it's currently suspended
upgrade_suspended = default("roleParams/upgrade_suspended", False)
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
# The desired role is only available during a Non-Rolling Upgrade in HA.
# The server calculates which of the two NameNodes will be the active, and the other the standby since they
# are started using different commands.
desired_namenode_role = default("/commandParams/desired_namenode_role", None)
command_timeout = default("/commandParams/command_timeout", 900)
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = status_params.hdfs_user
root_user = "root"
hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
namenode_pid_file = status_params.namenode_pid_file
zkfc_pid_file = status_params.zkfc_pid_file
datanode_pid_file = status_params.datanode_pid_file
# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
secure_dn_ports_are_in_use = False
hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
namenode_backup_dir = default("/configurations/hadoop-env/namenode_backup_dir", "/tmp/upgrades")
# hadoop default parameters
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_bin = stack_select.get_hadoop_dir("sbin")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_home = stack_select.get_hadoop_dir("home")
hadoop_secure_dn_user = hdfs_user
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
# hadoop parameters for stacks that support rolling_upgrade
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
if not security_enabled:
hadoop_secure_dn_user = '""'
else:
dfs_dn_port = utils.get_port(dfs_dn_addr)
dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if dfs_http_policy == "HTTPS_ONLY":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
elif dfs_http_policy == "HTTP_AND_HTTPS":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
if secure_dn_ports_are_in_use:
hadoop_secure_dn_user = hdfs_user
else:
hadoop_secure_dn_user = '""'
# Parameters for upgrade packs
skip_namenode_save_namespace_express = default("/configurations/cluster-env/stack_upgrade_express_skip_namenode_save_namespace", False)
skip_namenode_namedir_backup_express = default("/configurations/cluster-env/stack_upgrade_express_skip_backup_namenode_dir", False)
ambari_libs_dir = "/var/lib/ambari-agent/lib"
limits_conf_dir = "/etc/security/limits.d"
hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
create_lib_snappy_symlinks = check_stack_feature(StackFeature.SNAPPY, stack_version_formatted)
jsvc_path = "/usr/lib/bigtop-utils"
execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -c unlimited ; "
snappy_so = "libsnappy.so"
so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
so_src_dir_x86 = format("{hadoop_home}/lib")
so_src_dir_x64 = format("{hadoop_home}/lib64")
so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
#security params
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
falcon_user = config['configurations']['falcon-env']['falcon_user']
#exclude file
if 'all_decommissioned_hosts' in config['commandParams']:
hdfs_exclude_file = config['commandParams']['all_decommissioned_hosts'].split(",")
else:
hdfs_exclude_file = []
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
hdfs_include_file = None
manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
if include_file_path and manage_include_files:
hdfs_include_file = slave_hosts
update_files_only = default("/commandParams/update_files_only",False)
command_phase = default("/commandParams/phase","")
klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
#hosts
hostname = config['agentLevelParams']['hostname']
rm_host = default("/clusterHostInfo/resourcemanager_hosts", [])
public_hostname = config["agentLevelParams"]["public_hostname"]
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", [])
hive_server_host = default("/clusterHostInfo/hive_server_hosts", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/historyserver_hosts", [])
jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
namenode_host = default("/clusterHostInfo/namenode_hosts", [])
nm_host = default("/clusterHostInfo/nodemanager_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_namenodes = not len(namenode_host) == 0
has_jobtracker = not len(jtnode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_histroryserver = not len(hs_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_journalnode_hosts = not len(journalnode_hosts) == 0
has_zkfc_hosts = not len(zkfc_hosts) == 0
has_falcon_host = not len(falcon_host) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
#users and groups
yarn_user = config['configurations']['yarn-env']['yarn_user']
hbase_user = config['configurations']['hbase-env']['hbase_user']
oozie_user = config['configurations']['oozie-env']['oozie_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hive_user = config['configurations']['hive-env']['hive_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
user_group = config['configurations']['cluster-env']['user_group']
root_group = "root"
proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
nfs_file_dump_dir = config['configurations']['hdfs-site']['nfs.file.dump.dir']
dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
hdfs_site = config['configurations']['hdfs-site']
if namenode_federation_enabled(hdfs_site):
jn_edits_dirs = get_properties_for_all_nameservices(hdfs_site, 'dfs.journalnode.edits.dir').values()
else:
jn_edits_dirs = [config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']]
dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
hdfs_log_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
namenode_dirs_created_stub_dir = hdfs_log_dir
namenode_dirs_stub_filename = "namenode_dirs_created"
smoke_hdfs_user_dir = format("/user/{smoke_user}")
smoke_hdfs_user_mode = 0770
hdfs_namenode_format_disabled = default("/configurations/cluster-env/hdfs_namenode_format_disabled", False)
hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
hdfs_namenode_bootstrapped_mark_suffix = "/namenode-bootstrapped/"
namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted",
format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
"/var/lib/hdfs/namenode/formatted"]
dfs_name_dirs = dfs_name_dir.split(",")
namenode_formatted_mark_dirs = []
namenode_bootstrapped_mark_dirs = []
for dn_dir in dfs_name_dirs:
tmp_format_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
tmp_bootstrap_mark_dir = format("{dn_dir}{hdfs_namenode_bootstrapped_mark_suffix}")
namenode_formatted_mark_dirs.append(tmp_format_mark_dir)
namenode_bootstrapped_mark_dirs.append(tmp_bootstrap_mark_dir)
# Use the namenode RPC address if configured, otherwise, fallback to the default file system
namenode_address = None
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
namenode_address = format("hdfs://{namenode_rpcaddress}")
else:
namenode_address = config['configurations']['core-site']['fs.defaultFS']
fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
dfs_data_dirs = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
dfs_data_dirs_perm = default("/configurations/hdfs-site/dfs.datanode.data.dir.perm", "755")
dfs_data_dirs_perm = int(dfs_data_dirs_perm, base=8) # convert int from octal representation
data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
if dfs_ha_nameservices is None:
dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
dfs_ha_namenode_ids_all_ns = get_properties_for_all_nameservices(hdfs_site, 'dfs.ha.namenodes')
dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
# hostname of the active HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_active = default("/configurations/cluster-env/dfs_ha_initial_namenode_active", None)
# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_standby = default("/configurations/cluster-env/dfs_ha_initial_namenode_standby", None)
ha_zookeeper_quorum = config['configurations']['core-site']['ha.zookeeper.quorum']
jaas_file = os.path.join(hadoop_conf_secure_dir, 'hdfs_jaas.conf')
zk_namespace = default('/configurations/hdfs-site/ha.zookeeper.parent-znode', '/hadoop-ha')
# Values for the current Host
namenode_id = None
namenode_rpc = None
dfs_ha_namemodes_ids_list = []
other_namenode_id = None
for ns, dfs_ha_namenode_ids in dfs_ha_namenode_ids_all_ns.iteritems():
found = False
if not is_empty(dfs_ha_namenode_ids):
dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
if dfs_ha_namenode_ids_array_len > 1:
dfs_ha_enabled = True
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{ns}.{nn_id}')]
if hostname.lower() in nn_host.lower() or public_hostname.lower() in nn_host.lower():
namenode_id = nn_id
namenode_rpc = nn_host
found = True
# With HA enabled namenode_address is recomputed
namenode_address = format('hdfs://{ns}')
# Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC.
if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2:
other_namenode_id = list(set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0]
if found:
break
if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
https_only = True
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
else:
https_only = False
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
if journalnode_address:
journalnode_port = journalnode_address.split(":")[1]
if security_enabled:
dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
dn_principal_name = dn_principal_name.replace('_HOST',hostname.lower())
dn_kinit_cmd = format("{kinit_path_local} -kt {dn_keytab} {dn_principal_name};")
nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
nn_principal_name = nn_principal_name.replace('_HOST',hostname.lower())
nn_kinit_cmd = format("{kinit_path_local} -kt {nn_keytab} {nn_principal_name};")
jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
if jn_principal_name:
jn_principal_name = jn_principal_name.replace('_HOST', hostname.lower())
jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
hdfs_kinit_cmd = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
zk_principal_name = default("/configurations/zookeeper-env/zookeeper_principal_name", "zookeeper/_HOST@EXAMPLE.COM")
zk_principal_user = zk_principal_name.split('/')[0]
else:
dn_kinit_cmd = ""
nn_kinit_cmd = ""
hdfs_kinit_cmd = ""
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/clusterLevelParams/dfs_type", "")
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
name_node_params = default("/commandParams/namenode", None)
java_home = config['ambariLevelParams']['java_home']
java_version = expect("/ambariLevelParams/java_version", int)
java_exec = format("{java_home}/bin/java")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
hadoop_security_authorization = config['configurations']['core-site']['hadoop.security.authorization']
fs_default_name = config['configurations']['core-site']['fs.defaultFS']
hadoop_security_auth_to_local = config['configurations']['core-site']['hadoop.security.auth_to_local']
if security_enabled:
sn_principal_name = default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
# for curl command in ranger plugin to get db connector
jdk_location = config['ambariLevelParams']['jdk_location']
java_share_dir = '/usr/share/java'
is_https_enabled = is_https_enabled_in_hdfs(config['configurations']['hdfs-site']['dfs.http.policy'],
config['configurations']['hdfs-site']['dfs.https.enable'])
# ranger hdfs plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# ambari-server hostname
ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
# ranger hdfs plugin enabled property
enable_ranger_hdfs = default("/configurations/ranger-hdfs-plugin-properties/ranger-hdfs-plugin-enabled", "No")
enable_ranger_hdfs = True if enable_ranger_hdfs.lower() == 'yes' else False
# get ranger hdfs properties if enable_ranger_hdfs is True
if enable_ranger_hdfs:
# ranger policy url
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if xml_configurations_supported:
policymgr_mgr_url = config['configurations']['ranger-hdfs-security']['ranger.plugin.hdfs.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
# ranger audit db user
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
# ranger hdfs service name
repo_name = str(config['clusterName']) + '_hadoop'
repo_name_value = config['configurations']['ranger-hdfs-security']['ranger.plugin.hdfs.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
# ranger-env config
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_hdfs:
external_admin_username = default('/configurations/ranger-hdfs-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-hdfs-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-hdfs-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-hdfs-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-hdfs-plugin-properties']
policy_user = config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
repo_config_password = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
downloaded_custom_connector = None
previous_jdbc_jar_name = None
driver_curl_source = None
driver_curl_target = None
previous_jdbc_jar = None
# to get db connector related properties
if has_ranger_admin and stack_supports_ranger_audit_db:
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
previous_jdbc_jar = format("{hadoop_lib_home}/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
sql_connector_jar = ''
hdfs_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'hadoop.security.authentication': hadoop_security_authentication,
'hadoop.security.authorization': hadoop_security_authorization,
'fs.default.name': fs_default_name,
'hadoop.security.auth_to_local': hadoop_security_auth_to_local,
'hadoop.rpc.protection': hadoop_rpc_protection,
'commonNameForCertificate': common_name_for_certificate,
'dfs.datanode.kerberos.principal': dn_principal_name if security_enabled else '',
'dfs.namenode.kerberos.principal': nn_principal_name if security_enabled else '',
'dfs.secondary.namenode.kerberos.principal': sn_principal_name if security_enabled else ''
}
hdfs_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(hdfs_ranger_plugin_config),
'description': 'hdfs repo',
'name': repo_name,
'repositoryType': 'hdfs',
'assetType': '1'
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
hdfs_ranger_plugin_config.update(custom_ranger_service_config)
if stack_supports_ranger_kerberos and security_enabled:
hdfs_ranger_plugin_config['policy.download.auth.users'] = hdfs_user
hdfs_ranger_plugin_config['tag.download.auth.users'] = hdfs_user
if stack_supports_ranger_kerberos:
hdfs_ranger_plugin_config['ambari.service.check.user'] = policy_user
hdfs_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': hdfs_ranger_plugin_config,
'description': 'hdfs repo',
'name': repo_name,
'type': 'hdfs'
}
xa_audit_db_is_enabled = False
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
ssl_keystore_password = config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
ssl_truststore_password = config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
# for SQLA explicitly disable audit to DB for Ranger
if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
xa_audit_db_is_enabled = False
# need this to capture cluster name from where ranger hdfs plugin is enabled
cluster_name = config['clusterName']
# ranger hdfs plugin section end
| 50.733677
| 175
| 0.800623
|
2ad7a8d02f5752d4fb18d3e9353f2f7a113fa3ac
| 1,556
|
py
|
Python
|
code/main.py
|
xp-soaring/cambridge_coffee_pot
|
477864aa3a0c0a2d31c2c18342e804109b2e9bbf
|
[
"Apache-2.0"
] | null | null | null |
code/main.py
|
xp-soaring/cambridge_coffee_pot
|
477864aa3a0c0a2d31c2c18342e804109b2e9bbf
|
[
"Apache-2.0"
] | null | null | null |
code/main.py
|
xp-soaring/cambridge_coffee_pot
|
477864aa3a0c0a2d31c2c18342e804109b2e9bbf
|
[
"Apache-2.0"
] | null | null | null |
# -------------------------------------------------------------------
#
# Python script to run sensor
#
# -------------------------------------------------------------------
import sys
import time
from sensor import Sensor
from config import Config
from sensor_utils import list_to_string
VERSION = "0.40"
# loads settings from sensor.json or argv[1]
CONFIG_FILENAME = "sensor_config.json"
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# main code
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Use default filename OR one given as argument
filename = CONFIG_FILENAME
print("main started with {} arguments {}".format(len(sys.argv), list_to_string(sys.argv)))
if len(sys.argv) > 1 :
filename = sys.argv[1]
config = Config(filename)
s = Sensor(config)
# Infinite loop until killed, reading weight and sending data
try:
while True:
#----------------
# GET READING
# ---------------
# get readings from all load cells
value = s.get_weight()
# ---------------
# PROCESS READING
# ---------------
s.process_sample(time.time(), value)
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
pass
# Cleanup and quit
s.finish()
| 23.938462
| 94
| 0.410668
|
1454fd34d81a22aa5beb414059aa9947287e24a9
| 1,266
|
py
|
Python
|
setup.py
|
careyrp/github-pr-stats
|
fa8ac73682d428279ec8dd60093e0d2a7f7ed4fd
|
[
"WTFPL"
] | 63
|
2015-06-08T14:49:41.000Z
|
2022-02-25T22:30:29.000Z
|
setup.py
|
careyrp/github-pr-stats
|
fa8ac73682d428279ec8dd60093e0d2a7f7ed4fd
|
[
"WTFPL"
] | 6
|
2015-01-30T20:49:26.000Z
|
2020-04-20T13:03:12.000Z
|
setup.py
|
careyrp/github-pr-stats
|
fa8ac73682d428279ec8dd60093e0d2a7f7ed4fd
|
[
"WTFPL"
] | 19
|
2015-04-20T18:34:56.000Z
|
2021-05-10T19:26:46.000Z
|
#!/usr/bin/env python
import os
from distutils.core import setup
from github_pr_stats import VERSION
# I really prefer Markdown to reStructuredText. PyPi does not. This allows me
# to have things how I'd like, but not throw complaints when people are trying
# to install the package and they don't have pypandoc or the README in the
# right place.
try:
import pypandoc
description = pypandoc.convert('README.md', 'rst')
except (IOError, OSError, ImportError):
description = ''
try:
license = open('LICENSE').read()
except IOError:
license = 'WTFPL'
setup(
name = 'github-pr-stats',
version = VERSION,
author = 'James Pearson',
author_email = 'pearson@changedmy.name',
packages = ['github_pr_stats', 'github_pr_stats.example_plugins'],
scripts = ['bin/github-pr-stats'],
url = 'https://github.com/xiongchiamiov/github-pr-stats',
license = license,
description = 'Various statistics on the pull requests in your repo.',
long_description = description,
install_requires = [
'ascii-graph >= 0.2.0',
'docopt >= 0.6, < 0.7',
'envoy >= 0.0.2',
'github3.py >= 0.8, < 0.9',
'importlib >= 1.0',
'numpy >= 1.7, < 1.8',
'ordereddict',
'python-dateutil >= 2.2, < 2.3',
],
)
| 28.772727
| 79
| 0.655608
|
151cd9dfcc6506351f7c8150cd392262066402b4
| 4,733
|
py
|
Python
|
cysecuretools/execute/key_reader.py
|
cypresssemiconductorco/cysecuretools
|
f27b6a7a5d5829427d746bac046c496bfe2b5898
|
[
"Apache-2.0"
] | 9
|
2019-09-16T19:33:20.000Z
|
2020-11-05T00:56:20.000Z
|
cysecuretools/execute/key_reader.py
|
Infineon/cysecuretools
|
f27b6a7a5d5829427d746bac046c496bfe2b5898
|
[
"Apache-2.0"
] | 1
|
2021-04-16T08:17:16.000Z
|
2021-05-21T05:55:58.000Z
|
cysecuretools/execute/key_reader.py
|
Infineon/cysecuretools
|
f27b6a7a5d5829427d746bac046c496bfe2b5898
|
[
"Apache-2.0"
] | 1
|
2019-10-03T17:24:24.000Z
|
2019-10-03T17:24:24.000Z
|
"""
Copyright (c) 2020 Cypress Semiconductor Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
import logging
from jose import jwk, exceptions
from jose.constants import ALGORITHMS
import cysecuretools.execute.keygen as keygen
from cysecuretools.execute.provisioning_lib.cyprov_crypto import Crypto
from cysecuretools.execute.provisioning_lib.cyprov_pem import PemKey
from cysecuretools.execute.sys_call import get_prov_details
logger = logging.getLogger(__name__)
class KeyReaderMXS40V1:
def __init__(self, target):
self.target = target
self.policy_parser = target.policy_parser
self.policy_dir = self.policy_parser.policy_dir
def read_public_key(self, tool, key_id, key_format='jwk'):
passed, key = get_prov_details(tool, self.target.register_map, key_id)
if passed:
logger.debug(f'Public key (key_id={key_id}) read successfully')
logger.debug(f'{key}')
pub_key = json.loads(key)
if key_format == 'jwk':
return pub_key
elif key_format == 'pem':
return jwk_to_pem(pub_key)
else:
raise ValueError(f'Invalid key format \'{key_format}\'')
else:
logger.error(f'Cannot read public key (key_id={key_id})')
return None
def get_cypress_public_key(self):
"""
Gets Cypress public key from cy_auth JWT packet.
:return: Cypress public key (JWK).
"""
jwt_text = Crypto.read_jwt(self.policy_parser.get_cy_auth())
json_data = Crypto.readable_jwt(jwt_text)
return json_data["payload"]['cy_pub_key']
def jwk_to_pem(json_key, private_key=False):
pem = PemKey(json_key)
pem_key = pem.to_str(private_key)
return pem_key
def get_aes_key(key_size):
return keygen.generate_aes_key(key_size)
def load_key(key):
"""
Load JWK for certificate signing.
:param key: File that contains the key.
:return: Tuple - private key, public key
"""
priv_key = None
pub_key = None
with open(key, 'r') as f:
key_str = f.read()
key_json = json.loads(key_str)
combined = False
for item in key_json:
if 'priv_key' in item or 'pub_key' in item:
combined = True
break
if not combined:
try:
is_private = 'd' in key_json
if is_private:
if 'alg' in key_json:
priv_key_obj = jwk.construct(key_json)
else:
priv_key_obj = jwk.construct(key_json, ALGORITHMS.ES256)
pub_key_obj = priv_key_obj.public_key()
priv_key = key_json
pub_key = pub_key_obj.to_dict()
# Jose ignores 'kid' and 'use' fields in JWK, so
# copy them from private key
if 'kid' not in pub_key and 'kid' in priv_key:
pub_key['kid'] = priv_key['kid']
if 'use' not in pub_key and 'use' in priv_key:
pub_key['use'] = priv_key['use']
# Jose represents key tokens as bytes, so convert bytes to str
for k, v in pub_key.items():
if isinstance(v, bytes):
pub_key[k] = v.decode('utf-8')
else:
priv_key = None
pub_key = key_json
except exceptions.JWKError:
logger.error(f'Failed to load key {key}')
priv_key = None
pub_key = None
else:
# Input file may be JSON combined from private and public key
for item in key_json:
if 'priv_key' in item:
priv_key = key_json[item]
break
for item in key_json:
if 'pub_key' in item:
pub_key = key_json[item]
break
# Input file does not contain JWK
if not priv_key:
ValueError(f'Private key not found in {key}')
if not pub_key:
if priv_key:
pub_key = priv_key
del pub_key["d"]
else:
ValueError(f'Public key not found in {key}')
return priv_key, pub_key
| 33.567376
| 78
| 0.60469
|
2ffdf3c78b080cfb6f9c92b5515620e2a492537c
| 199
|
py
|
Python
|
setup.py
|
sepandhaghighi/cyjson
|
3b7388f56e5db29b4fe3909de8d014eae8a03e0f
|
[
"MIT"
] | null | null | null |
setup.py
|
sepandhaghighi/cyjson
|
3b7388f56e5db29b4fe3909de8d014eae8a03e0f
|
[
"MIT"
] | null | null | null |
setup.py
|
sepandhaghighi/cyjson
|
3b7388f56e5db29b4fe3909de8d014eae8a03e0f
|
[
"MIT"
] | 1
|
2020-07-13T03:54:17.000Z
|
2020-07-13T03:54:17.000Z
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
setup(
ext_modules = cythonize([Extension("cyj", ["cyj.pyx","./cJSON/cJSON.c"])])
)
| 24.875
| 78
| 0.748744
|
2c9512c531ded0b3229842c2530801017ab19b4c
| 3,728
|
py
|
Python
|
mechroutines/models/_symm.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/models/_symm.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/models/_symm.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 8
|
2019-12-18T20:09:46.000Z
|
2020-11-14T16:37:28.000Z
|
""" Handle symmetry factor stuff
"""
import automol
from autofile import fs
from mechlib.amech_io import printer as ioprinter
def symmetry_factor(pf_filesystems, spc_mod_dct_i, spc_dct_i, rotors,
grxn=None, zma=None):
""" Determines the the overall (internal and external) symmetry factor for
a species or saddle point.
Function will simply take a symmetry factor provided by the user by way
of the spc_mod_dct_i, else it will calculate the symmetry factor using
the requested procedure.
For saddle points, the function ignores the possibility that two
configurations differ only in their torsional values. As a result,
the symmetry factor is a lower bound of the true value.
:param pf_filesystems:
:param grxn:
:rtype: float
"""
symm_factor = spc_dct_i.get('symm_factor')
if symm_factor is not None:
ioprinter.info_message(
' - Reading symmetry number input by user:', symm_factor)
else:
zrxn = spc_dct_i.get('zrxn', None)
if zrxn is not None:
grxn = automol.reac.relabel_for_geometry(zrxn)
else:
grxn = None
sym_model = spc_mod_dct_i['symm']['mod']
# Obtain geometry, energy, and symmetry filesystem
# Obtain the internal symmetry number using some routine
if sym_model == 'sampling':
[cnf_fs, cnf_path, min_cnf_locs, _, _] = pf_filesystems['symm']
geo = cnf_fs[-1].file.geometry.read(min_cnf_locs)
# Obtain the external symssetry number
ext_symm = automol.geom.external_symmetry_factor(geo)
# Set up the symmetry filesystem, read symmetrically similar geos
# includes minimum geo
sym_fs = fs.symmetry(cnf_path)
symm_geos = [geo]
symm_geos += [sym_fs[-1].file.geometry.read(locs)
for locs in sym_fs[-1].existing()]
# Obtain the internal symmetry number and end group factors
if rotors is not None:
ioprinter.info_message(
' - Determining internal sym number ',
'using sampling routine.')
int_symm, endgrp = automol.symm.internal_symm_from_sampling(
symm_geos, rotors, grxn=grxn, zma=zma)
else:
ioprinter.info_message(' - No torsions, internal sym is 1.0')
int_symm, endgrp = 1.0, 1.0
# Obtain overall number, reduced as needed
int_symm = automol.symm.reduce_internal_symm(
geo, int_symm, ext_symm, endgrp)
elif sym_model == 'HCO_model':
if zma is not None:
geo = automol.zmat.geometry(zma)
else:
[cnf_fs, cnf_path, min_cnf_locs, _, _] = pf_filesystems['symm']
geo = cnf_fs[-1].file.geometry.read(min_cnf_locs)
ret = automol.symm.oxygenated_hydrocarbon_symm_num(geo, grxn)
int_symm, ext_symm = ret
else:
[cnf_fs, cnf_path, min_cnf_locs, _, _] = pf_filesystems['symm']
geo = cnf_fs[-1].file.geometry.read(min_cnf_locs)
ioprinter.info_message(
'No symmetry model requested, ',
'setting internal sym factor to 1.0')
ext_symm = automol.geom.external_symmetry_factor(geo)
int_symm = 1.0
if rotors is not None:
rotor_symms = automol.rotor.symmetries(rotors, flat=True)
int_symm = automol.symm.rotor_reduced_symm_factor(
int_symm, rotor_symms)
symm_factor = ext_symm * int_symm
return symm_factor
| 38.040816
| 79
| 0.60515
|
7d8cf4d58e0d5529cfe9f0bf6c80cd0860e5ce79
| 553
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/sunburst/hoverlabel/font/_size.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/sunburst/hoverlabel/font/_size.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/sunburst/hoverlabel/font/_size.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="sunburst.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| 32.529412
| 82
| 0.614828
|
4fbdc0b92df7cd9ee7d6feb1f946f90797bd250b
| 583
|
py
|
Python
|
Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/PRACTICA 1/CAPITULO 3/YouOwnList.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/PRACTICA 1/CAPITULO 3/YouOwnList.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/PRACTICA 1/CAPITULO 3/YouOwnList.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
Automoviles=['Dodge Challeger', 'VW Gti', 'Jeep Rubicon', 'Alfa Romeo Quadro', 'Ford ST', 'Dodge RAM', 'Ford FX4']
M1="Me gustaria compar un " + Automoviles[0].title()+"."
M2="Mi vecino choco su nuevo " + Automoviles[1].title() + "."
M3="El nuevo " + Automoviles[2].title()+ " es mucho mas economico."
M4="Hay una gran diferencia entre el " + Automoviles[3].title() + " y el " + Automoviles[4].title()+"."
M5="La camioneta " + Automoviles[5].title() + " es de gasolina, mientras que la " + Automoviles[6].title() +" es de Diesel."
print(M1)
print(M2)
print(M3)
print(M4)
print(M5)
| 41.642857
| 124
| 0.653516
|
68ef67261ce9d4e7339e07eb0166c130725e01f4
| 101
|
py
|
Python
|
venv/lib/python2.7/_abcoll.py
|
ubcmist/DeployedModel
|
d254f998f1d2fc96f4ed9049bff04424e874c943
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/_abcoll.py
|
ubcmist/DeployedModel
|
d254f998f1d2fc96f4ed9049bff04424e874c943
|
[
"MIT"
] | 40
|
2019-05-04T04:46:31.000Z
|
2022-02-26T10:37:51.000Z
|
venv/lib/python2.7/_abcoll.py
|
ubcmist/DeployedModel
|
d254f998f1d2fc96f4ed9049bff04424e874c943
|
[
"MIT"
] | null | null | null |
/usr/local/Cellar/python@2/2.7.15_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_abcoll.py
| 101
| 101
| 0.811881
|
eaba70868f481c11c06509971f2d498e367b5fad
| 26,535
|
py
|
Python
|
bauh/gems/snap/controller.py
|
DN-debug/bauh
|
83aeccae87d7fe26f6c5bf24be005288d5d54d84
|
[
"Zlib"
] | null | null | null |
bauh/gems/snap/controller.py
|
DN-debug/bauh
|
83aeccae87d7fe26f6c5bf24be005288d5d54d84
|
[
"Zlib"
] | null | null | null |
bauh/gems/snap/controller.py
|
DN-debug/bauh
|
83aeccae87d7fe26f6c5bf24be005288d5d54d84
|
[
"Zlib"
] | null | null | null |
import re
import time
import traceback
from threading import Thread
from typing import List, Set, Type, Optional, Tuple
from bauh.api.abstract.controller import SoftwareManager, SearchResult, ApplicationContext, UpgradeRequirements, \
TransactionResult, SoftwareAction
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.model import SoftwarePackage, PackageHistory, PackageUpdate, PackageSuggestion, \
SuggestionPriority, CustomSoftwareAction, PackageStatus
from bauh.api.abstract.view import SingleSelectComponent, SelectViewType, InputOption, ViewComponent, PanelComponent, \
FormComponent, TextInputComponent
from bauh.api.exception import NoInternetException
from bauh.commons import resource
from bauh.commons.boot import CreateConfigFile
from bauh.commons.category import CategoriesDownloader
from bauh.commons.html import bold
from bauh.commons.system import SystemProcess, ProcessHandler, new_root_subprocess, get_human_size_str
from bauh.commons.view_utils import new_select
from bauh.gems.snap import snap, URL_CATEGORIES_FILE, CATEGORIES_FILE_PATH, SUGGESTIONS_FILE, \
get_icon_path, snapd, ROOT_DIR
from bauh.gems.snap.config import SnapConfigManager
from bauh.gems.snap.model import SnapApplication
from bauh.gems.snap.snapd import SnapdClient
RE_AVAILABLE_CHANNELS = re.compile(re.compile(r'(\w+)\s+(snap install.+)'))
class SnapManager(SoftwareManager):
def __init__(self, context: ApplicationContext):
super(SnapManager, self).__init__(context=context)
self.i18n = context.i18n
self.api_cache = context.cache_factory.new()
context.disk_loader_factory.map(SnapApplication, self.api_cache)
self.enabled = True
self.http_client = context.http_client
self.logger = context.logger
self.ubuntu_distro = context.distro == 'ubuntu'
self.categories = {}
self.suggestions_cache = context.cache_factory.new()
self.info_path = None
self.configman = SnapConfigManager()
self.custom_actions = (
CustomSoftwareAction(i18n_status_key='snap.action.refresh.status',
i18n_label_key='snap.action.refresh.label',
icon_path=resource.get_path('img/refresh.svg', ROOT_DIR),
manager_method='refresh',
requires_root=True,
i18n_confirm_key='snap.action.refresh.confirm'),
CustomSoftwareAction(i18n_status_key='snap.action.channel.status',
i18n_label_key='snap.action.channel.label',
i18n_confirm_key='snap.action.channel.confirm',
icon_path=resource.get_path('img/refresh.svg', ROOT_DIR),
manager_method='change_channel',
requires_root=True,
requires_confirmation=False)
)
def _fill_categories(self, app: SnapApplication):
categories = self.categories.get(app.name.lower())
if categories:
app.categories = categories
if not app.is_application():
categories = app.categories
if categories is None:
categories = []
app.categories = categories
if 'runtime' not in categories:
categories.append('runtime')
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1, is_url: bool = False) -> SearchResult:
if is_url or (not snap.is_installed() and not snapd.is_running()):
return SearchResult([], [], 0)
snapd_client = SnapdClient(self.logger)
apps_found = snapd_client.query(words)
res = SearchResult([], [], 0)
if apps_found:
installed = self.read_installed(disk_loader).installed
for app_json in apps_found:
already_installed = None
if installed:
already_installed = [i for i in installed if i.id == app_json.get('id')]
already_installed = already_installed[0] if already_installed else None
if already_installed:
res.installed.append(already_installed)
else:
res.new.append(self._map_to_app(app_json, installed=False))
res.total = len(res.installed) + len(res.new)
return res
def read_installed(self, disk_loader: DiskCacheLoader, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
if snap.is_installed() and snapd.is_running():
snapd_client = SnapdClient(self.logger)
app_names = {a['snap'] for a in snapd_client.list_only_apps()}
installed = [self._map_to_app(app_json=appjson,
installed=True,
disk_loader=disk_loader,
is_application=app_names and appjson['name'] in app_names) for appjson in snapd_client.list_all_snaps()]
return SearchResult(installed, None, len(installed))
else:
return SearchResult([], None, 0)
def downgrade(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
if not snap.is_installed():
watcher.print("'snap' seems not to be installed")
return False
if not snapd.is_running():
watcher.print("'snapd' seems not to be running")
return False
return ProcessHandler(watcher).handle_simple(snap.downgrade_and_stream(pkg.name, root_password))[0]
def upgrade(self, requirements: UpgradeRequirements, root_password: str, watcher: ProcessWatcher) -> SystemProcess:
raise Exception(f"'upgrade' is not supported by {SnapManager.__class__.__name__}")
def uninstall(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher, disk_loader: DiskCacheLoader) -> TransactionResult:
if snap.is_installed() and snapd.is_running():
uninstalled = ProcessHandler(watcher).handle_simple(snap.uninstall_and_stream(pkg.name, root_password))[0]
if uninstalled:
if self.suggestions_cache:
self.suggestions_cache.delete(pkg.name)
return TransactionResult(success=True, installed=None, removed=[pkg])
return TransactionResult.fail()
def get_managed_types(self) -> Set[Type[SoftwarePackage]]:
return {SnapApplication}
def clean_cache_for(self, pkg: SnapApplication):
super(SnapManager, self).clean_cache_for(pkg)
self.api_cache.delete(pkg.id)
def get_info(self, pkg: SnapApplication) -> dict:
info = {
'description': pkg.description,
'developer': pkg.developer,
'license': pkg.license,
'contact': pkg.contact,
'snap-id': pkg.id,
'name': pkg.name,
'publisher': pkg.publisher,
'revision': pkg.rev,
'tracking': pkg.tracking,
'channel': pkg.channel,
'type': pkg.type
}
if pkg.installed:
commands = [*{c['name'] for c in SnapdClient(self.logger).list_commands(pkg.name)}]
commands.sort()
info['commands'] = commands
if pkg.installed_size:
info['installed_size']: get_human_size_str(pkg.installed_size)
elif pkg.download_size:
info['download_size'] = get_human_size_str(pkg.download_size)
return info
def get_history(self, pkg: SnapApplication) -> PackageHistory:
raise Exception(f"'get_history' is not supported by {pkg.__class__.__name__}")
def install(self, pkg: SnapApplication, root_password: str, disk_loader: DiskCacheLoader, watcher: ProcessWatcher) -> TransactionResult:
# retrieving all installed so it will be possible to know the additional installed runtimes after the operation succeeds
if not snap.is_installed():
watcher.print("'snap' seems not to be installed")
return TransactionResult.fail()
if not snapd.is_running():
watcher.print("'snapd' seems not to be running")
return TransactionResult.fail()
installed_names = {s['name'] for s in SnapdClient(self.logger).list_all_snaps()}
client = SnapdClient(self.logger)
snap_config = self.configman.get_config()
try:
channel = self._request_channel_installation(pkg=pkg, snap_config=snap_config, snapd_client=client, watcher=watcher)
pkg.channel = channel
except:
watcher.print('Aborted by user')
return TransactionResult.fail()
res, output = ProcessHandler(watcher).handle_simple(snap.install_and_stream(app_name=pkg.name,
confinement=pkg.confinement,
root_password=root_password,
channel=channel))
if 'error:' in output:
res = False
if 'not available on stable' in output:
channels = RE_AVAILABLE_CHANNELS.findall(output)
if channels:
opts = [InputOption(label=c[0], value=c[1]) for c in channels]
channel_select = SingleSelectComponent(type_=SelectViewType.RADIO, label='', options=opts, default_option=opts[0])
body = f"<p>{self.i18n['snap.install.available_channels.message'].format(bold(self.i18n['stable']), bold(pkg.name))}.</p>"
body += f"<p>{self.i18n['snap.install.available_channels.help']}:</p>"
if watcher.request_confirmation(title=self.i18n['snap.install.available_channels.title'],
body=body,
components=[channel_select],
confirmation_label=self.i18n['continue'],
deny_label=self.i18n['cancel']):
self.logger.info(f"Installing '{pkg.name}' with the custom command '{channel_select.value}'")
res = ProcessHandler(watcher).handle(SystemProcess(new_root_subprocess(channel_select.value.value.split(' '), root_password=root_password)))
return self._gen_installation_response(success=res, pkg=pkg,
installed=installed_names, disk_loader=disk_loader)
else:
self.logger.error(f"Could not find available channels in the installation output: {output}")
return self._gen_installation_response(success=res, pkg=pkg, installed=installed_names, disk_loader=disk_loader)
def _gen_installation_response(self, success: bool, pkg: SnapApplication, installed: Set[str], disk_loader: DiskCacheLoader):
if success:
new_installed = []
try:
net_available = self.context.internet_checker.is_available()
current_installed = self.read_installed(disk_loader=disk_loader, internet_available=net_available).installed
except:
new_installed = [pkg]
traceback.print_exc()
current_installed = None
if current_installed:
for p in current_installed:
if p.name == pkg.name or (not installed or p.name not in installed):
new_installed.append(p)
return TransactionResult(success=success, installed=new_installed, removed=[])
else:
return TransactionResult.fail()
def is_enabled(self) -> bool:
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> Tuple[bool, Optional[str]]:
return (True, None) if snap.is_installed() else (False, self.i18n['missing_dep'].format(dep=bold('snap')))
def requires_root(self, action: SoftwareAction, pkg: SnapApplication) -> bool:
return action not in (SoftwareAction.PREPARE, SoftwareAction.SEARCH)
def refresh(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
return ProcessHandler(watcher).handle_simple(snap.refresh_and_stream(pkg.name, root_password))[0]
def change_channel(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
if not self.context.internet_checker.is_available():
raise NoInternetException()
try:
channel = self._request_channel_installation(pkg=pkg,
snap_config=None,
snapd_client=SnapdClient(self.logger),
watcher=watcher,
exclude_current=True)
if not channel:
watcher.show_message(title=self.i18n['snap.action.channel.label'],
body=self.i18n['snap.action.channel.error.no_channel'])
return False
return ProcessHandler(watcher).handle_simple(snap.refresh_and_stream(app_name=pkg.name,
root_password=root_password,
channel=channel))[0]
except:
return False
def _start_category_task(self, taskman: TaskManager, create_config: CreateConfigFile, downloader: CategoriesDownloader):
if taskman:
taskman.update_progress('snap_cats', 0, self.i18n['task.waiting_task'].format(bold(create_config.task_name)))
create_config.join()
categories_exp = create_config.config['categories_exp']
downloader.expiration = categories_exp if isinstance(categories_exp, int) else None
taskman.update_progress('snap_cats', 1, None)
def _finish_category_task(self, taskman: TaskManager):
if taskman:
taskman.update_progress('snap_cats', 100, None)
taskman.finish_task('snap_cats')
def prepare(self, task_manager: TaskManager, root_password: str, internet_available: bool):
create_config = CreateConfigFile(taskman=task_manager, configman=self.configman, i18n=self.i18n,
task_icon_path=get_icon_path(), logger=self.logger)
create_config.start()
task_manager.register_task('snap_cats', self.i18n['task.download_categories'], get_icon_path())
category_downloader = CategoriesDownloader(id_='snap', manager=self, http_client=self.http_client,
logger=self.logger,
url_categories_file=URL_CATEGORIES_FILE,
categories_path=CATEGORIES_FILE_PATH,
internet_connection=internet_available,
internet_checker=self.context.internet_checker,
after=lambda: self._finish_category_task(task_manager))
category_downloader.before = lambda: self._start_category_task(task_manager, create_config, category_downloader)
category_downloader.start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
pass
def list_warnings(self, internet_available: bool) -> Optional[List[str]]:
if not snapd.is_running():
snap_bold = bold('Snap')
return [self.i18n['snap.notification.snapd_unavailable'].format(bold('snapd'), snap_bold),
self.i18n['snap.notification.snap.disable'].format(snap_bold,
bold(f"{self.i18n['settings'].capitalize()} > {self.i18n['core.config.tab.types']}"))]
elif internet_available:
available, output = snap.is_api_available()
if not available:
self.logger.warning(f'It seems Snap API is not available. Search output: {output}')
return [self.i18n['snap.notifications.api.unavailable'].format(bold('Snaps'), bold('Snap'))]
def _fill_suggestion(self, name: str, priority: SuggestionPriority, snapd_client: SnapdClient, out: List[PackageSuggestion]):
res = snapd_client.find_by_name(name)
if res:
if len(res) == 1:
app_json = res[0]
else:
jsons_found = [p for p in res if p['name'] == name]
app_json = jsons_found[0] if jsons_found else None
if app_json:
sug = PackageSuggestion(self._map_to_app(app_json, False), priority)
self.suggestions_cache.add(name, sug)
out.append(sug)
return
self.logger.warning(f"Could not retrieve suggestion '{name}'")
def _map_to_app(self, app_json: dict, installed: bool, disk_loader: Optional[DiskCacheLoader] = None, is_application: bool = False) -> SnapApplication:
app = SnapApplication(id=app_json.get('id'),
name=app_json.get('name'),
license=app_json.get('license'),
version=app_json.get('version'),
latest_version=app_json.get('version'),
description=app_json.get('description', app_json.get('summary')),
installed=installed,
rev=app_json.get('revision'),
publisher=app_json['publisher'].get('display-name', app_json['publisher'].get('username')),
verified_publisher=app_json['publisher'].get('validation') == 'verified',
icon_url=app_json.get('icon'),
screenshots={m['url'] for m in app_json.get('media', []) if m['type'] == 'screenshot'},
download_size=app_json.get('download-size'),
channel=app_json.get('channel'),
confinement=app_json.get('confinement'),
app_type=app_json.get('type'),
app=is_application,
installed_size=app_json.get('installed-size'),
extra_actions=self.custom_actions)
if disk_loader and app.installed:
disk_loader.fill(app)
self._fill_categories(app)
app.status = PackageStatus.READY
return app
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
res = []
if snapd.is_running():
self.logger.info(f'Downloading suggestions file {SUGGESTIONS_FILE}')
file = self.http_client.get(SUGGESTIONS_FILE)
if not file or not file.text:
self.logger.warning(f"No suggestion found in {SUGGESTIONS_FILE}")
return res
else:
self.logger.info('Mapping suggestions')
suggestions, threads = [], []
snapd_client = SnapdClient(self.logger)
installed = {s['name'].lower() for s in snapd_client.list_all_snaps()}
for l in file.text.split('\n'):
if l:
if limit <= 0 or len(suggestions) < limit:
sug = l.strip().split('=')
name = sug[1]
if not installed or name not in installed:
cached_sug = self.suggestions_cache.get(name)
if cached_sug:
res.append(cached_sug)
else:
t = Thread(target=self._fill_suggestion, args=(name, SuggestionPriority(int(sug[0])), snapd_client, res))
t.start()
threads.append(t)
time.sleep(0.001) # to avoid being blocked
else:
break
for t in threads:
t.join()
res.sort(key=lambda s: s.priority.value, reverse=True)
return res
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: SnapApplication):
commands = SnapdClient(self.logger).list_commands(pkg.name)
if commands:
if len(commands) == 1:
cmd = commands[0]['name']
else:
desktop_cmd = [c for c in commands if 'desktop-file' in c]
if desktop_cmd:
cmd = desktop_cmd[0]['name']
else:
cmd = commands[0]['name']
self.logger.info(f"Running '{pkg.name}': {cmd}")
snap.run(cmd)
def get_screenshots(self, pkg: SnapApplication) -> List[str]:
return pkg.screenshots if pkg.has_screenshots() else []
def get_settings(self, screen_width: int, screen_height: int) -> Optional[ViewComponent]:
snap_config = self.configman.get_config()
max_width = 200
install_channel = new_select(label=self.i18n['snap.config.install_channel'],
opts=[(self.i18n['yes'].capitalize(), True, None),
(self.i18n['no'].capitalize(), False, None)],
value=bool(snap_config['install_channel']),
id_='snap_install_channel',
max_width=max_width,
tip=self.i18n['snap.config.install_channel.tip'])
categories_exp = TextInputComponent(id_='snap_cat_exp',
value=snap_config['categories_exp'] if isinstance(snap_config['categories_exp'], int) else '',
max_width=max_width,
only_int=True,
label=self.i18n['snap.config.categories_exp'],
tooltip=self.i18n['snap.config.categories_exp.tip'])
return PanelComponent([FormComponent([install_channel, categories_exp], self.i18n['installation'].capitalize())])
def save_settings(self, component: ViewComponent) -> Tuple[bool, Optional[List[str]]]:
snap_config = self.configman.get_config()
panel = component.components[0]
snap_config['install_channel'] = panel.get_component('snap_install_channel').get_selected()
snap_config['categories_exp'] = panel.get_component('snap_cat_exp').get_int_value()
try:
self.configman.save_config(snap_config)
return True, None
except:
return False, [traceback.format_exc()]
def _request_channel_installation(self, pkg: SnapApplication, snap_config: Optional[dict], snapd_client: SnapdClient, watcher: ProcessWatcher, exclude_current: bool = False) -> Optional[str]:
if snap_config is None or snap_config['install_channel']:
try:
data = [r for r in snapd_client.find_by_name(pkg.name) if r['name'] == pkg.name]
except:
self.logger.warning(f"snapd client could not retrieve channels for '{pkg.name}'")
return
if not data:
self.logger.warning(f"snapd client could find a match for name '{pkg.name}' when retrieving its channels")
else:
if not data[0].get('channels'):
self.logger.info(f"No channel available for '{pkg.name}'. Skipping selection.")
else:
if pkg.channel:
current_channel = pkg.channel if '/' in pkg.channel else f'latest/{pkg.channel}'
else:
current_channel = f"latest/{data[0].get('channel', 'stable')}"
opts = []
def_opt = None
for channel in sorted(data[0]['channels'].keys()):
if exclude_current:
if channel != current_channel:
opts.append(InputOption(label=channel, value=channel))
else:
op = InputOption(label=channel, value=channel)
opts.append(op)
if not def_opt and channel == current_channel:
def_opt = op
if not opts:
self.logger.info(f"No different channel available for '{pkg.name}'. Skipping selection.")
return
select = SingleSelectComponent(label='',
options=opts,
default_option=def_opt if def_opt else opts[0],
type_=SelectViewType.RADIO)
if not watcher.request_confirmation(title=self.i18n['snap.install.available_channels.title'],
body=self.i18n['snap.install.channel.body'] + ':',
components=[select],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
raise Exception('aborted')
else:
return select.get_selected()
| 50.160681
| 196
| 0.566723
|
cb547a246bc3d51050b68ca3440223860830862e
| 7,325
|
py
|
Python
|
kubernetes_asyncio/client/models/v1beta1_custom_resource_definition_condition.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1beta1_custom_resource_definition_condition.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1beta1_custom_resource_definition_condition.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1beta1CustomResourceDefinitionCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None): # noqa: E501
"""V1beta1CustomResourceDefinitionCondition - a model defined in OpenAPI""" # noqa: E501
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
Last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1beta1CustomResourceDefinitionCondition.
Last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
Human-readable message indicating details about last transition. # noqa: E501
:return: The message of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1beta1CustomResourceDefinitionCondition.
Human-readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:return: The reason of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1beta1CustomResourceDefinitionCondition.
Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
Status is the status of the condition. Can be True, False, Unknown. # noqa: E501
:return: The status of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1beta1CustomResourceDefinitionCondition.
Status is the status of the condition. Can be True, False, Unknown. # noqa: E501
:param status: The status of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
Type is the type of the condition. # noqa: E501
:return: The type of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1beta1CustomResourceDefinitionCondition.
Type is the type of the condition. # noqa: E501
:param type: The type of this V1beta1CustomResourceDefinitionCondition. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1CustomResourceDefinitionCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.9869
| 125
| 0.6243
|
e7031f2375ce2016636f2a483e15e759eff2d0de
| 7,448
|
py
|
Python
|
thirdweb/core/sdk.py
|
nftlabs/nftlabs-sdk-python
|
ea533142dc0881872b347cd8ce635dc0bfff3153
|
[
"Apache-2.0"
] | 30
|
2021-10-31T13:17:58.000Z
|
2022-02-04T13:41:13.000Z
|
thirdweb/core/sdk.py
|
nftlabs/nftlabs-sdk-python
|
ea533142dc0881872b347cd8ce635dc0bfff3153
|
[
"Apache-2.0"
] | 36
|
2021-11-03T20:30:38.000Z
|
2022-02-14T10:15:40.000Z
|
thirdweb/core/sdk.py
|
nftlabs/nftlabs-sdk-python
|
ea533142dc0881872b347cd8ce635dc0bfff3153
|
[
"Apache-2.0"
] | 10
|
2021-11-10T19:59:41.000Z
|
2022-01-21T21:26:55.000Z
|
from eth_account import Account
from thirdweb.abi.thirdweb_contract import ThirdwebContract
from thirdweb.common.feature_detection import (
fetch_contract_metadata_from_address,
)
from thirdweb.constants.urls import get_provider_for_network
from thirdweb.contracts import Marketplace
from thirdweb.contracts.custom import CustomContract
from thirdweb.contracts.edition_drop import EditionDrop
from thirdweb.contracts.nft_drop import NFTDrop
from thirdweb.contracts.multiwrap import Multiwrap
from thirdweb.core.classes.contract_deployer import ContractDeployer
from thirdweb.core.classes.ipfs_storage import IpfsStorage
from thirdweb.core.classes.provider_handler import ProviderHandler
from thirdweb.contracts import Token, Edition, NFTCollection
from eth_account.account import LocalAccount
from typing import Any, Dict, Optional, Type, Union, cast
from web3 import Web3
from thirdweb.types.sdk import SDKOptions
class ThirdwebSDK(ProviderHandler):
"""
The main entry point for the Thirdweb SDK.
"""
__contract_cache: Dict[
str,
Union[
NFTCollection,
Edition,
Token,
Marketplace,
NFTDrop,
EditionDrop,
Multiwrap,
],
] = {}
storage: IpfsStorage
deployer: ContractDeployer
@staticmethod
def from_private_key(
private_key: str,
network: str,
options: SDKOptions = SDKOptions(),
) -> "ThirdwebSDK":
signer = Account.from_key(private_key)
sdk = ThirdwebSDK(network, signer, options)
return sdk
def __init__(
self,
network: str,
signer: Optional[LocalAccount] = None,
options: SDKOptions = SDKOptions(),
storage: IpfsStorage = IpfsStorage(),
):
"""
Initialize the thirdweb SDK.
:param provider: web3 provider instance to use for getting on-chain data
:param signer: signer to use for sending transactions
:param options: optional SDK configuration options
:param storage: optional IPFS storage instance to use for storing data
"""
provider = get_provider_for_network(network)
super().__init__(provider, signer, options)
self.storage = storage
self.deployer = ContractDeployer(provider, signer, options, storage)
def get_nft_collection(self, address: str) -> NFTCollection:
"""
Returns an NFT Collection contract SDK instance
:param address: address of the NFT Collection contract
:returns: NFT Collection contract SDK instance
"""
return cast(NFTCollection, self._get_contract(address, NFTCollection))
def get_edition(self, address: str) -> Edition:
"""
Returns an Edition contract SDK instance
:param address: address of the Edition contract
:returns: Edition contract SDK instance
"""
return cast(Edition, self._get_contract(address, Edition))
def get_token(self, address: str) -> Token:
"""
Returns a Token contract SDK instance
:param address: address of the Token contract
:returns: Token contract SDK instance
"""
return cast(Token, self._get_contract(address, Token))
def get_marketplace(self, address: str) -> Marketplace:
"""
Returns a Marketplace contract SDK instance
:param address: address of the Marketplace contract
:returns: Marketplace contract SDK instance
"""
return cast(Marketplace, self._get_contract(address, Marketplace))
def get_nft_drop(self, address: str) -> NFTDrop:
"""
Returns an NFT Drop contract SDK instance
:param address: address of the NFT Drop contract
:returns: NFT Drop contract SDK instance
"""
return cast(NFTDrop, self._get_contract(address, NFTDrop))
def get_edition_drop(self, address: str) -> EditionDrop:
"""
Returns an Edition Drop contract SDK instance
:param address: address of the Edition Drop contract
:returns: Edition Drop contract SDK instance
"""
return cast(EditionDrop, self._get_contract(address, EditionDrop))
def get_multiwrap(self, address: str) -> Multiwrap:
"""
Returns a multiwrap contract SDK instance
:param address: address of the multiwrap contract
:returns: multiwrap contract SDK instance
"""
return cast(Multiwrap, self._get_contract(address, Multiwrap))
def get_contract(self, address: str) -> CustomContract:
"""
Returns a custom contract SDK instance
:param address: address of the custom contract
:returns: custom contract SDK instance
"""
if address in self.__contract_cache:
return cast(CustomContract, self.__contract_cache[address])
try:
provider = self.get_provider()
abi = fetch_contract_metadata_from_address(address, provider, self.storage)
return self.get_contract_from_abi(address, abi)
except:
raise Exception(f"Error fetching ABI for this contract\n{address}")
def get_contract_from_abi(self, address: str, abi: str) -> CustomContract:
"""
Returns a custom contract SDK instance given the contract ABI
:param address: address of the custom contract
:param abi: abi of the custom contract
:returns: custom contract SDK instance
"""
if address in self.__contract_cache:
return cast(CustomContract, self.__contract_cache[address])
provider = self.get_provider()
contract = CustomContract(
provider,
address,
abi,
self.storage,
self.get_signer(),
self.get_options(),
)
self.__contract_cache[address] = cast(Any, contract)
return contract
def update_provider(self, provider: Web3):
"""
Update the provider instance used by the SDK.
:param provider: web3 provider instance to use for getting on-chain data
"""
super().update_provider(provider)
for contract in self.__contract_cache.values():
contract.on_provider_updated(provider)
def update_signer(self, signer: Optional[LocalAccount] = None):
"""
Update the signer instance used by the SDK.
:param signer: signer to use for sending transactions
"""
super().update_signer(signer)
for contract in self.__contract_cache.values():
contract.on_signer_updated(signer)
def _get_contract(
self,
address: str,
contract_type: Union[
Type[NFTCollection],
Type[Edition],
Type[Token],
Type[Marketplace],
Type[NFTDrop],
Type[EditionDrop],
Type[Multiwrap],
],
) -> Union[
NFTCollection, Edition, Token, Marketplace, NFTDrop, EditionDrop, Multiwrap
]:
if address in self.__contract_cache:
return self.__contract_cache[address]
contract = contract_type(
self.get_provider(),
address,
self.storage,
self.get_signer(),
self.get_options(),
)
self.__contract_cache[address] = contract
return contract
| 31.033333
| 87
| 0.646079
|
800fd44b1e59e239943b5c532dfba1595d4fbf96
| 423
|
py
|
Python
|
tests/metadata/latest/test_template.py
|
OpenEnergyPlatform/metadata
|
b4e10fa57de1a45e7c866df6605015cad0b54957
|
[
"MIT"
] | 2
|
2020-05-06T17:20:10.000Z
|
2020-05-06T17:20:34.000Z
|
tests/metadata/latest/test_template.py
|
OpenEnergyPlatform/metadata
|
b4e10fa57de1a45e7c866df6605015cad0b54957
|
[
"MIT"
] | 21
|
2019-10-31T16:26:59.000Z
|
2020-05-13T15:12:41.000Z
|
tests/metadata/latest/test_template.py
|
OpenEnergyPlatform/metadata
|
b4e10fa57de1a45e7c866df6605015cad0b54957
|
[
"MIT"
] | null | null | null |
def test_if_template_json_loads_successfully():
from metadata.latest.template import OEMETADATA_LATEST_TEMPLATE
def test_template_against_schema_which_should_succeed():
import jsonschema
from metadata.latest.template import OEMETADATA_LATEST_TEMPLATE
from metadata.latest.schema import OEMETADATA_LATEST_SCHEMA
assert jsonschema.validate(OEMETADATA_LATEST_TEMPLATE, OEMETADATA_LATEST_SCHEMA) == None
| 38.454545
| 92
| 0.8487
|
cbf93a6145c3836d313033b4e03e9cd1c06403d4
| 7,543
|
py
|
Python
|
p1/eightpuzzle.py
|
Mewzyk/stephen_AI
|
9e1cc920619581318c35959a3d888808edf6f959
|
[
"MIT"
] | 1
|
2018-01-09T21:53:58.000Z
|
2018-01-09T21:53:58.000Z
|
p1/eightpuzzle.py
|
Mewzyk/stephen_AI
|
9e1cc920619581318c35959a3d888808edf6f959
|
[
"MIT"
] | null | null | null |
p1/eightpuzzle.py
|
Mewzyk/stephen_AI
|
9e1cc920619581318c35959a3d888808edf6f959
|
[
"MIT"
] | null | null | null |
# eightpuzzle.py
# --------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import search
import random
# Module Classes
class EightPuzzleState:
"""
The Eight Puzzle is described in the course textbook on
page 64.
This class defines the mechanics of the puzzle itself. The
task of recasting this puzzle as a search problem is left to
the EightPuzzleSearchProblem class.
"""
def __init__( self, numbers ):
"""
Constructs a new eight puzzle from an ordering of numbers.
numbers: a list of integers from 0 to 8 representing an
instance of the eight puzzle. 0 represents the blank
space. Thus, the list
[1, 0, 2, 3, 4, 5, 6, 7, 8]
represents the eight puzzle:
-------------
| 1 | | 2 |
-------------
| 3 | 4 | 5 |
-------------
| 6 | 7 | 8 |
------------
The configuration of the puzzle is stored in a 2-dimensional
list (a list of lists) 'cells'.
"""
self.cells = []
numbers = numbers[:] # Make a copy so as not to cause side-effects.
numbers.reverse()
for row in range( 3 ):
self.cells.append( [] )
for col in range( 3 ):
self.cells[row].append( numbers.pop() )
if self.cells[row][col] == 0:
self.blankLocation = row, col
def isGoal( self ):
"""
Checks to see if the puzzle is in its goal state.
-------------
| | 1 | 2 |
-------------
| 3 | 4 | 5 |
-------------
| 6 | 7 | 8 |
-------------
>>> EightPuzzleState([0, 1, 2, 3, 4, 5, 6, 7, 8]).isGoal()
True
>>> EightPuzzleState([1, 0, 2, 3, 4, 5, 6, 7, 8]).isGoal()
False
"""
current = 0
for row in range( 3 ):
for col in range( 3 ):
if current != self.cells[row][col]:
return False
current += 1
return True
def legalMoves( self ):
"""
Returns a list of legal moves from the current state.
Moves consist of moving the blank space up, down, left or right.
These are encoded as 'up', 'down', 'left' and 'right' respectively.
>>> EightPuzzleState([0, 1, 2, 3, 4, 5, 6, 7, 8]).legalMoves()
['down', 'right']
"""
moves = []
row, col = self.blankLocation
if(row != 0):
moves.append('up')
if(row != 2):
moves.append('down')
if(col != 0):
moves.append('left')
if(col != 2):
moves.append('right')
return moves
def result(self, move):
"""
Returns a new eightPuzzle with the current state and blankLocation
updated based on the provided move.
The move should be a string drawn from a list returned by legalMoves.
Illegal moves will raise an exception, which may be an array bounds
exception.
NOTE: This function *does not* change the current object. Instead,
it returns a new object.
"""
row, col = self.blankLocation
if(move == 'up'):
newrow = row - 1
newcol = col
elif(move == 'down'):
newrow = row + 1
newcol = col
elif(move == 'left'):
newrow = row
newcol = col - 1
elif(move == 'right'):
newrow = row
newcol = col + 1
else:
raise "Illegal Move"
# Create a copy of the current eightPuzzle
newPuzzle = EightPuzzleState([0, 0, 0, 0, 0, 0, 0, 0, 0])
newPuzzle.cells = [values[:] for values in self.cells]
# And update it to reflect the move
newPuzzle.cells[row][col] = self.cells[newrow][newcol]
newPuzzle.cells[newrow][newcol] = self.cells[row][col]
newPuzzle.blankLocation = newrow, newcol
return newPuzzle
# Utilities for comparison and display
def __eq__(self, other):
"""
Overloads '==' such that two eightPuzzles with the same configuration
are equal.
>>> EightPuzzleState([0, 1, 2, 3, 4, 5, 6, 7, 8]) == \
EightPuzzleState([1, 0, 2, 3, 4, 5, 6, 7, 8]).result('left')
True
"""
for row in range( 3 ):
if self.cells[row] != other.cells[row]:
return False
return True
def __hash__(self):
return hash(str(self.cells))
def __getAsciiString(self):
"""
Returns a display string for the maze
"""
lines = []
horizontalLine = ('-' * (13))
lines.append(horizontalLine)
for row in self.cells:
rowLine = '|'
for col in row:
if col == 0:
col = ' '
rowLine = rowLine + ' ' + col.__str__() + ' |'
lines.append(rowLine)
lines.append(horizontalLine)
return '\n'.join(lines)
def __str__(self):
return self.__getAsciiString()
# TODO: Implement The methods in this class
class EightPuzzleSearchProblem(search.SearchProblem):
"""
Implementation of a SearchProblem for the Eight Puzzle domain
Each state is represented by an instance of an eightPuzzle.
"""
def __init__(self,puzzle):
"Creates a new EightPuzzleSearchProblem which stores search information."
self.puzzle = puzzle
def startingState(self):
return puzzle
def isGoal(self,state):
return state.isGoal()
def successorStates(self,state):
"""
Returns list of (successor, action, stepCost) pairs where
each succesor is either left, right, up, or down
from the original state and the cost is 1.0 for each
"""
succ = []
for a in state.legalMoves():
succ.append((state.result(a), a, 1))
return succ
def actionsCost(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
return len(actions)
EIGHT_PUZZLE_DATA = [[1, 0, 2, 3, 4, 5, 6, 7, 8],
[1, 7, 8, 2, 3, 4, 5, 6, 0],
[4, 3, 2, 7, 0, 5, 1, 6, 8],
[5, 1, 3, 4, 0, 2, 6, 7, 8],
[1, 2, 5, 7, 6, 8, 0, 4, 3],
[0, 3, 1, 6, 8, 2, 7, 5, 4]]
def loadEightPuzzle(puzzleNumber):
"""
puzzleNumber: The number of the eight puzzle to load.
Returns an eight puzzle object generated from one of the
provided puzzles in EIGHT_PUZZLE_DATA.
puzzleNumber can range from 0 to 5.
>>> print loadEightPuzzle(0)
-------------
| 1 | | 2 |
-------------
| 3 | 4 | 5 |
-------------
| 6 | 7 | 8 |
-------------
"""
return EightPuzzleState(EIGHT_PUZZLE_DATA[puzzleNumber])
def createRandomEightPuzzle(moves=100):
"""
moves: number of random moves to apply
Creates a random eight puzzle by applying
a series of 'moves' random moves to a solved
puzzle.
"""
puzzle = EightPuzzleState([0,1,2,3,4,5,6,7,8])
for i in range(moves):
# Execute a random legal move
puzzle = puzzle.result(random.sample(puzzle.legalMoves(), 1)[0])
return puzzle
if __name__ == '__main__':
puzzle = createRandomEightPuzzle(25)
print('A random puzzle:')
print(puzzle)
problem = EightPuzzleSearchProblem(puzzle)
path = search.breadthFirstSearch(problem)
print('BFS found a path of %d moves: %s' % (len(path), str(path)))
curr = puzzle
i = 1
for a in path:
curr = curr.result(a)
print('After %d move%s: %s' % (i, ("", "s")[i>1], a))
print(curr)
raw_input("Press return for the next state...") # wait for key stroke
i += 1
| 27.32971
| 95
| 0.592602
|
63c38a70ce4e15c40d02e5f6461b08549020771a
| 2,456
|
py
|
Python
|
openstack/tests/unit/identity/v3/test_trust.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 99
|
2018-03-28T15:41:45.000Z
|
2022-01-23T17:22:13.000Z
|
openstack/tests/unit/identity/v3/test_trust.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 5
|
2018-05-25T16:54:23.000Z
|
2021-11-21T02:27:16.000Z
|
openstack/tests/unit/identity/v3/test_trust.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 104
|
2018-04-06T14:33:54.000Z
|
2022-03-01T01:58:09.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.identity.v3 import trust
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'allow_redelegation': False,
'expires_at': '2016-03-09T12:14:57.233772',
'id': IDENTIFIER,
'impersonation': True,
'links': {'self': 'fake_link'},
'project_id': '1',
'redelegated_trust_id': None,
'redelegation_count': '0',
'remaining_uses': 10,
'role_links': {'self': 'other_fake_link'},
'trustee_user_id': '2',
'trustor_user_id': '3',
'roles': [{'name': 'test-role'}],
}
class TestTrust(base.TestCase):
def test_basic(self):
sot = trust.Trust()
self.assertEqual('trust', sot.resource_key)
self.assertEqual('trusts', sot.resources_key)
self.assertEqual('/OS-TRUST/trusts', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = trust.Trust(**EXAMPLE)
self.assertEqual(EXAMPLE['allow_redelegation'], sot.allow_redelegation)
self.assertEqual(EXAMPLE['expires_at'], sot.expires_at)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertTrue(sot.is_impersonation)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['project_id'], sot.project_id)
self.assertEqual(EXAMPLE['role_links'], sot.role_links)
self.assertEqual(EXAMPLE['redelegated_trust_id'],
sot.redelegated_trust_id)
self.assertEqual(EXAMPLE['remaining_uses'], sot.remaining_uses)
self.assertEqual(EXAMPLE['trustee_user_id'], sot.trustee_user_id)
self.assertEqual(EXAMPLE['trustor_user_id'], sot.trustor_user_id)
self.assertEqual(EXAMPLE['roles'], sot.roles)
self.assertEqual(EXAMPLE['redelegation_count'], sot.redelegation_count)
| 38.984127
| 79
| 0.690147
|
d996ebeb6dd7dd2012acd0d8518dc8925bf8cc38
| 2,674
|
py
|
Python
|
onnx2keras/padding_layers.py
|
mxz297/onnx2keras
|
934cf9783b4910c601bb6a7af5c684e0f13d0933
|
[
"MIT"
] | 2
|
2021-04-09T04:02:21.000Z
|
2022-01-25T17:50:50.000Z
|
onnx2keras/padding_layers.py
|
mxz297/onnx2keras
|
934cf9783b4910c601bb6a7af5c684e0f13d0933
|
[
"MIT"
] | 4
|
2021-03-19T20:12:41.000Z
|
2022-01-25T17:43:36.000Z
|
onnx2keras/padding_layers.py
|
mxz297/onnx2keras
|
934cf9783b4910c601bb6a7af5c684e0f13d0933
|
[
"MIT"
] | 2
|
2021-03-17T13:21:01.000Z
|
2021-03-19T15:50:51.000Z
|
from tensorflow import keras
import logging
from .utils import ensure_tf_type
def convert_padding(node, params, layers, node_name, keras_name):
"""
Convert Constant layer
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param node_name: internal converter name
:param keras_name: resulting layer name
:return: None
"""
# It's binary by-default
logger = logging.getLogger("onnx2keras:padding")
params['mode'] = params['mode'].decode('ascii')
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)
pads = params['pads']
if params['mode'] == 'constant':
# raise AssertionError('Cannot convert non-constant padding')
if params['value'] != 0.0:
raise AssertionError('Cannot convert non-zero padding')
# Magic ordering
if len(pads) == 8:
padding_layer = keras.layers.ZeroPadding2D(
padding=((pads[2], pads[6]), (pads[3], pads[7])),
name=keras_name
)
else:
logger.warning("Caution - no test yet")
padding_layer = keras.layers.ZeroPadding3D(
padding=((pads[2], pads[7]), (pads[3], pads[8]), (pads[4], pads[9])),
name=keras_name
)
layers[node_name] = padding_layer(input_0)
elif params['mode'] == 'reflect':
def target_layer(x, pads=pads):
import tensorflow as tf
if len(pads) == 8:
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT')
else:
logger.warning("Caution - no test yet")
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT')
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
layers[node_name] = lambda_layer(input_0)
elif params['mode'] == 'edge':
def target_layer(x, pads=pads):
import tensorflow as tf
if len(pads) == 8: # TODO not tested yet
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'SYMMETRIC')
else:
logger.warning("Caution - no test yet")
layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'SYMMETRIC')
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
layers[node_name] = lambda_layer(input_0)
else:
raise AttributeError('Unknown padding')
| 37.661972
| 124
| 0.566193
|
d5245be656f76d2dad1df4dc74c990adbac8b1c1
| 134
|
py
|
Python
|
python/inspectory.py
|
glennkentwell/code
|
874827791b13682b5ae4203fd1a9dcf1b33cc7b3
|
[
"Unlicense"
] | null | null | null |
python/inspectory.py
|
glennkentwell/code
|
874827791b13682b5ae4203fd1a9dcf1b33cc7b3
|
[
"Unlicense"
] | null | null | null |
python/inspectory.py
|
glennkentwell/code
|
874827791b13682b5ae4203fd1a9dcf1b33cc7b3
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
import inspect as i
def ii(obj):
for p in dir(obj):
print p
i.getdoc(obj.__getattribute__(p))
| 14.888889
| 41
| 0.604478
|
9b1f50c67f724d7cd2c8e54a4ed4d9fe72ec4337
| 319
|
py
|
Python
|
setup.py
|
gatoravi/guesswho
|
68c3213c50709968ba5f172ae080e414101efc02
|
[
"MIT"
] | null | null | null |
setup.py
|
gatoravi/guesswho
|
68c3213c50709968ba5f172ae080e414101efc02
|
[
"MIT"
] | null | null | null |
setup.py
|
gatoravi/guesswho
|
68c3213c50709968ba5f172ae080e414101efc02
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='guesswho',
version='0.1',
description='Infer location based on tweets.',
url='http://github.com/gatoravi/guesswho',
author='Avi Ramu',
author_email='avinash3003@yahoo.co.in',
license='MIT',
packages=['guesswho'],
zip_safe=False)
| 26.583333
| 52
| 0.639498
|
ba5ecf460a43519b032abce97923ffcf7b598153
| 13,988
|
py
|
Python
|
moonv4/moon_utilities/moon_utilities/exceptions.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
moonv4/moon_utilities/moon_utilities/exceptions.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
moonv4/moon_utilities/moon_utilities/exceptions.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
# This software is distributed under the terms and conditions of the 'Apache-2.0'
# license which can be found in the file 'LICENSE' in this package distribution
# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
from oslo_log import log as logging
from werkzeug.exceptions import HTTPException
LOG = logging.getLogger("moon.utilities.exceptions")
_ = str
class MoonErrorMetaClass(type):
def __init__(cls, name, bases, dct):
super(MoonErrorMetaClass, cls).__init__(name, bases, dct)
cls.hierarchy += "/"+str(name)
class MoonError(HTTPException):
__metaclass__ = MoonErrorMetaClass
hierarchy = ""
description = _("There is an error requesting the Moon platform.")
code = 400
title = 'Moon Error'
logger = "ERROR"
def __init__(self, message="", status_code=None, payload=""):
if message:
self.description = message
if status_code:
self.code = status_code
self.payload = payload
super(MoonError, self).__init__()
def __str__(self):
return "{}: {}".format(self.code, self.title)
def __del__(self):
message = "{} ({}) {}".format(self.hierarchy, self.description, self.payload)
if self.logger == "ERROR":
try:
LOG.error(message)
except AttributeError:
LOG.error(message)
elif self.logger == "WARNING":
try:
LOG.warning(message)
except AttributeError:
LOG.warning(message)
elif self.logger == "CRITICAL":
try:
LOG.critical(message)
except AttributeError:
LOG.critical(message)
elif self.logger == "AUTHZ":
try:
LOG.authz(self.hierarchy)
LOG.error(message)
except AttributeError:
LOG.error(message)
else:
try:
LOG.info(message)
except AttributeError:
LOG.info(message)
# def to_dict(self):
# rv = dict(self.payload or ())
# rv['message'] = "{} ({})".format(self.hierarchy, self.description)
# rv['title'] = self.title
# rv['code'] = self.code
# return rv
# Exceptions for Tenant
class TenantException(MoonError):
description = _("There is an error requesting this tenant.")
code = 400
title = 'Tenant Error'
logger = "ERROR"
class TenantUnknown(TenantException):
description = _("The tenant is unknown.")
code = 400
title = 'Tenant Unknown'
logger = "ERROR"
class TenantAddedNameExisting(TenantException):
description = _("The tenant name is existing.")
code = 400
title = 'Added Tenant Name Existing'
logger = "ERROR"
class TenantNoIntraExtension(TenantException):
description = _("The tenant has not intra_extension.")
code = 400
title = 'Tenant No Intra_Extension'
logger = "ERROR"
class TenantNoIntraAuthzExtension(TenantNoIntraExtension):
description = _("The tenant has not intra_admin_extension.")
code = 400
title = 'Tenant No Intra_Admin_Extension'
logger = "ERROR"
# Exceptions for IntraExtension
class IntraExtensionException(MoonError):
description = _("There is an error requesting this IntraExtension.")
code = 400
title = 'Extension Error'
class IntraExtensionUnknown(IntraExtensionException):
description = _("The intra_extension is unknown.")
code = 400
title = 'Intra Extension Unknown'
logger = "Error"
class ModelUnknown(MoonError):
description = _("The model is unknown.")
code = 400
title = 'Model Unknown'
logger = "Error"
class ModelExisting(MoonError):
description = _("The model already exists.")
code = 409
title = 'Model Error'
logger = "Error"
class RootExtensionUnknown(IntraExtensionUnknown):
description = _("The root_extension is unknown.")
code = 400
title = 'Root Extension Unknown'
logger = "Error"
class RootPDPNotInitialized(IntraExtensionException):
description = _("The root_extension is not initialized.")
code = 400
title = 'Root Extension Not Initialized'
logger = "Error"
class IntraExtensionCreationError(IntraExtensionException):
description = _("The arguments for the creation of this Extension were malformed.")
code = 400
title = 'Intra Extension Creation Error'
# Authz exceptions
class AuthzException(MoonError):
description = _("There is an authorization error requesting this IntraExtension.")
code = 403
title = 'Authz Exception'
logger = "AUTHZ"
# Auth exceptions
class AuthException(MoonError):
description = _("There is an authentication error requesting this API. "
"You must provide a valid token from Keystone.")
code = 401
title = 'Auth Exception'
logger = "AUTHZ"
# Admin exceptions
class AdminException(MoonError):
description = _("There is an error requesting this Authz IntraExtension.")
code = 400
title = 'Authz Exception'
logger = "AUTHZ"
class AdminMetaData(AdminException):
code = 400
title = 'Metadata Exception'
class AdminPerimeter(AdminException):
code = 400
title = 'Perimeter Exception'
class AdminScope(AdminException):
code = 400
title = 'Scope Exception'
class AdminAssignment(AdminException):
code = 400
title = 'Assignment Exception'
class AdminMetaRule(AdminException):
code = 400
title = 'Aggregation Algorithm Exception'
class AdminRule(AdminException):
code = 400
title = 'Rule Exception'
class SubjectCategoryNameExisting(AdminMetaData):
description = _("The given subject category name already exists.")
code = 409
title = 'Subject Category Name Existing'
logger = "ERROR"
class SubjectCategoryExisting(AdminMetaData):
description = _("The given subject category already exists.")
code = 409
title = 'Subject Category Existing'
logger = "ERROR"
class ObjectCategoryNameExisting(AdminMetaData):
description = _("The given object category name already exists.")
code = 409
title = 'Object Category Name Existing'
logger = "ERROR"
class ObjectCategoryExisting(AdminMetaData):
description = _("The given object category already exists.")
code = 409
title = 'Object Category Existing'
logger = "ERROR"
class ActionCategoryNameExisting(AdminMetaData):
description = _("The given action category name already exists.")
code = 409
title = 'Action Category Name Existing'
logger = "ERROR"
class ActionCategoryExisting(AdminMetaData):
description = _("The given action category already exists.")
code = 409
title = 'Action Category Existing'
logger = "ERROR"
class SubjectCategoryUnknown(AdminMetaData):
description = _("The given subject category is unknown.")
code = 400
title = 'Subject Category Unknown'
logger = "ERROR"
class ObjectCategoryUnknown(AdminMetaData):
description = _("The given object category is unknown.")
code = 400
title = 'Object Category Unknown'
logger = "ERROR"
class ActionCategoryUnknown(AdminMetaData):
description = _("The given action category is unknown.")
code = 400
title = 'Action Category Unknown'
logger = "ERROR"
class SubjectUnknown(AdminPerimeter):
description = _("The given subject is unknown.")
code = 400
title = 'Subject Unknown'
logger = "ERROR"
class ObjectUnknown(AdminPerimeter):
description = _("The given object is unknown.")
code = 400
title = 'Object Unknown'
logger = "ERROR"
class ActionUnknown(AdminPerimeter):
description = _("The given action is unknown.")
code = 400
title = 'Action Unknown'
logger = "ERROR"
class SubjectNameExisting(AdminPerimeter):
description = _("The given subject name is existing.")
code = 400
title = 'Subject Name Existing'
logger = "ERROR"
class ObjectNameExisting(AdminPerimeter):
description = _("The given object name is existing.")
code = 400
title = 'Object Name Existing'
logger = "ERROR"
class ActionNameExisting(AdminPerimeter):
description = _("The given action name is existing.")
code = 400
title = 'Action Name Existing'
logger = "ERROR"
class ObjectsWriteNoAuthorized(AdminPerimeter):
description = _("The modification on Objects is not authorized.")
code = 400
title = 'Objects Write No Authorized'
logger = "AUTHZ"
class ActionsWriteNoAuthorized(AdminPerimeter):
description = _("The modification on Actions is not authorized.")
code = 400
title = 'Actions Write No Authorized'
logger = "AUTHZ"
class SubjectScopeUnknown(AdminScope):
description = _("The given subject scope is unknown.")
code = 400
title = 'Subject Scope Unknown'
logger = "ERROR"
class ObjectScopeUnknown(AdminScope):
description = _("The given object scope is unknown.")
code = 400
title = 'Object Scope Unknown'
logger = "ERROR"
class ActionScopeUnknown(AdminScope):
description = _("The given action scope is unknown.")
code = 400
title = 'Action Scope Unknown'
logger = "ERROR"
class SubjectScopeNameExisting(AdminScope):
description = _("The given subject scope name is existing.")
code = 400
title = 'Subject Scope Name Existing'
logger = "ERROR"
class ObjectScopeNameExisting(AdminScope):
description = _("The given object scope name is existing.")
code = 400
title = 'Object Scope Name Existing'
logger = "ERROR"
class ActionScopeNameExisting(AdminScope):
description = _("The given action scope name is existing.")
code = 400
title = 'Action Scope Name Existing'
logger = "ERROR"
class SubjectAssignmentUnknown(AdminAssignment):
description = _("The given subject assignment value is unknown.")
code = 400
title = 'Subject Assignment Unknown'
logger = "ERROR"
class ObjectAssignmentUnknown(AdminAssignment):
description = _("The given object assignment value is unknown.")
code = 400
title = 'Object Assignment Unknown'
logger = "ERROR"
class ActionAssignmentUnknown(AdminAssignment):
description = _("The given action assignment value is unknown.")
code = 400
title = 'Action Assignment Unknown'
logger = "ERROR"
class SubjectAssignmentExisting(AdminAssignment):
description = _("The given subject assignment value is existing.")
code = 400
title = 'Subject Assignment Existing'
logger = "ERROR"
class ObjectAssignmentExisting(AdminAssignment):
description = _("The given object assignment value is existing.")
code = 400
title = 'Object Assignment Existing'
logger = "ERROR"
class ActionAssignmentExisting(AdminAssignment):
description = _("The given action assignment value is existing.")
code = 400
title = 'Action Assignment Existing'
logger = "ERROR"
class AggregationAlgorithmNotExisting(AdminMetaRule):
description = _("The given aggregation algorithm is not existing.")
code = 400
title = 'Aggregation Algorithm Not Existing'
logger = "ERROR"
class AggregationAlgorithmUnknown(AdminMetaRule):
description = _("The given aggregation algorithm is unknown.")
code = 400
title = 'Aggregation Algorithm Unknown'
logger = "ERROR"
class SubMetaRuleAlgorithmNotExisting(AdminMetaRule):
description = _("The given sub_meta_rule algorithm is unknown.")
code = 400
title = 'Sub_meta_rule Algorithm Unknown'
logger = "ERROR"
class MetaRuleUnknown(AdminMetaRule):
description = _("The given sub meta rule is unknown.")
code = 400
title = 'Sub Meta Rule Unknown'
logger = "ERROR"
class SubMetaRuleNameExisting(AdminMetaRule):
description = _("The sub meta rule name already exists.")
code = 400
title = 'Sub Meta Rule Name Existing'
logger = "ERROR"
class MetaRuleExisting(AdminMetaRule):
description = _("The sub meta rule already exists.")
code = 400
title = 'Sub Meta Rule Existing'
logger = "ERROR"
class RuleExisting(AdminRule):
description = _("The rule already exists.")
code = 400
title = 'Rule Existing'
logger = "ERROR"
class RuleUnknown(AdminRule):
description = _("The rule for that request doesn't exist.")
code = 400
title = 'Rule Unknown'
logger = "ERROR"
# Keystone exceptions
class KeystoneError(MoonError):
description = _("There is an error connecting to Keystone.")
code = 400
title = 'Keystone error'
logger = "ERROR"
class KeystoneProjectError(KeystoneError):
description = _("There is an error retrieving projects from the Keystone service.")
code = 400
title = 'Keystone project error'
logger = "ERROR"
class KeystoneUserError(KeystoneError):
description = _("There is an error retrieving users from the Keystone service.")
code = 400
title = 'Keystone user error'
logger = "ERROR"
class KeystoneUserConflict(KeystoneUserError):
description = _("A user with that name already exist.")
code = 400
title = 'Keystone user error'
logger = "ERROR"
# Consul exceptions
class ConsulError(MoonError):
description = _("There is an error connecting to Consul.")
code = 400
title = 'Consul error'
logger = "ERROR"
class ConsulComponentNotFound(ConsulError):
description = _("The component do not exist in Consul database.")
code = 500
title = 'Consul error'
logger = "WARNING"
# Containers exceptions
class DockerError(MoonError):
description = _("There is an error with Docker.")
code = 400
title = 'Docker error'
logger = "ERROR"
class ContainerMissing(DockerError):
description = _("Some containers are missing.")
code = 400
title = 'Container missing'
logger = "ERROR"
| 25.808118
| 87
| 0.676937
|
3cb1b7ddb69bc45282b9dbb532ebaaaccf5408e7
| 18,186
|
py
|
Python
|
discpy/invite.py
|
AryamanSrii/DiscPy
|
0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353
|
[
"MIT"
] | null | null | null |
discpy/invite.py
|
AryamanSrii/DiscPy
|
0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353
|
[
"MIT"
] | null | null | null |
discpy/invite.py
|
AryamanSrii/DiscPy
|
0ba89da9ca184f0dfaebeedd4e9b7bc3099a0353
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2021 The DiscPy Developers
Copyright (c) 2015-2021 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import List, Optional, Type, TypeVar, Union, TYPE_CHECKING
from .asset import Asset
from .utils import parse_time, snowflake_time, _get_as_snowflake
from .object import Object
from .mixins import Hashable
from .enums import ChannelType, VerificationLevel, InviteTarget, try_enum
from .appinfo import PartialAppInfo
__all__ = (
"PartialInviteChannel",
"PartialInviteGuild",
"Invite",
)
if TYPE_CHECKING:
from .types.invite import (
Invite as InvitePayload,
InviteGuild as InviteGuildPayload,
GatewayInvite as GatewayInvitePayload,
)
from .types.channel import (
PartialChannel as InviteChannelPayload,
)
from .state import ConnectionState
from .guild import Guild
from .abc import GuildChannel
from .user import User
InviteGuildType = Union[Guild, "PartialInviteGuild", Object]
InviteChannelType = Union[GuildChannel, "PartialInviteChannel", Object]
import datetime
class PartialInviteChannel:
"""Represents a "partial" invite channel.
This model will be given when the user is not part of the
guild the :class:`Invite` resolves to.
.. container:: operations
.. describe:: x == y
Checks if two partial channels are the same.
.. describe:: x != y
Checks if two partial channels are not the same.
.. describe:: hash(x)
Return the partial channel's hash.
.. describe:: str(x)
Returns the partial channel's name.
Attributes
-----------
name: :class:`str`
The partial channel's name.
id: :class:`int`
The partial channel's ID.
type: :class:`ChannelType`
The partial channel's type.
"""
__slots__ = ("id", "name", "type")
def __init__(self, data: InviteChannelPayload):
self.id: int = int(data["id"])
self.name: str = data["name"]
self.type: ChannelType = try_enum(ChannelType, data["type"])
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return (
f"<PartialInviteChannel id={self.id} name={self.name} type={self.type!r}>"
)
@property
def mention(self) -> str:
""":class:`str`: The string that allows you to mention the channel."""
return f"<#{self.id}>"
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return snowflake_time(self.id)
class PartialInviteGuild:
"""Represents a "partial" invite guild.
This model will be given when the user is not part of the
guild the :class:`Invite` resolves to.
.. container:: operations
.. describe:: x == y
Checks if two partial guilds are the same.
.. describe:: x != y
Checks if two partial guilds are not the same.
.. describe:: hash(x)
Return the partial guild's hash.
.. describe:: str(x)
Returns the partial guild's name.
Attributes
-----------
name: :class:`str`
The partial guild's name.
id: :class:`int`
The partial guild's ID.
verification_level: :class:`VerificationLevel`
The partial guild's verification level.
features: List[:class:`str`]
A list of features the guild has. See :attr:`Guild.features` for more information.
description: Optional[:class:`str`]
The partial guild's description.
"""
__slots__ = (
"_state",
"features",
"_icon",
"_banner",
"id",
"name",
"_splash",
"verification_level",
"description",
)
def __init__(self, state: ConnectionState, data: InviteGuildPayload, id: int):
self._state: ConnectionState = state
self.id: int = id
self.name: str = data["name"]
self.features: List[str] = data.get("features", [])
self._icon: Optional[str] = data.get("icon")
self._banner: Optional[str] = data.get("banner")
self._splash: Optional[str] = data.get("splash")
self.verification_level: VerificationLevel = try_enum(
VerificationLevel, data.get("verification_level")
)
self.description: Optional[str] = data.get("description")
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} id={self.id} name={self.name!r} features={self.features} "
f"description={self.description!r}>"
)
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the guild's creation time in UTC."""
return snowflake_time(self.id)
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's banner asset, if available."""
if self._banner is None:
return None
return Asset._from_guild_image(
self._state, self.id, self._banner, path="banners"
)
@property
def splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's invite splash asset, if available."""
if self._splash is None:
return None
return Asset._from_guild_image(
self._state, self.id, self._splash, path="splashes"
)
I = TypeVar("I", bound="Invite")
class Invite(Hashable):
r"""Represents a Discord :class:`Guild` or :class:`abc.GuildChannel` invite.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two invites are equal.
.. describe:: x != y
Checks if two invites are not equal.
.. describe:: hash(x)
Returns the invite hash.
.. describe:: str(x)
Returns the invite URL.
The following table illustrates what methods will obtain the attributes:
+------------------------------------+------------------------------------------------------------+
| Attribute | Method |
+====================================+============================================================+
| :attr:`max_age` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`max_uses` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`created_at` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`temporary` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`uses` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`approximate_member_count` | :meth:`Client.fetch_invite` with `with_counts` enabled |
+------------------------------------+------------------------------------------------------------+
| :attr:`approximate_presence_count` | :meth:`Client.fetch_invite` with `with_counts` enabled |
+------------------------------------+------------------------------------------------------------+
| :attr:`expires_at` | :meth:`Client.fetch_invite` with `with_expiration` enabled |
+------------------------------------+------------------------------------------------------------+
If it's not in the table above then it is available by all methods.
Attributes
-----------
max_age: :class:`int`
How long before the invite expires in seconds.
A value of ``0`` indicates that it doesn't expire.
code: :class:`str`
The URL fragment used for the invite.
guild: Optional[Union[:class:`Guild`, :class:`Object`, :class:`PartialInviteGuild`]]
The guild the invite is for. Can be ``None`` if it's from a group direct message.
revoked: :class:`bool`
Indicates if the invite has been revoked.
created_at: :class:`datetime.datetime`
An aware UTC datetime object denoting the time the invite was created.
temporary: :class:`bool`
Indicates that the invite grants temporary membership.
If ``True``, members who joined via this invite will be kicked upon disconnect.
uses: :class:`int`
How many times the invite has been used.
max_uses: :class:`int`
How many times the invite can be used.
A value of ``0`` indicates that it has unlimited uses.
inviter: Optional[:class:`User`]
The user who created the invite.
approximate_member_count: Optional[:class:`int`]
The approximate number of members in the guild.
approximate_presence_count: Optional[:class:`int`]
The approximate number of members currently active in the guild.
This includes idle, dnd, online, and invisible members. Offline members are excluded.
expires_at: Optional[:class:`datetime.datetime`]
The expiration date of the invite. If the value is ``None`` when received through
`Client.fetch_invite` with `with_expiration` enabled, the invite will never expire.
.. versionadded:: 2.0
channel: Union[:class:`abc.GuildChannel`, :class:`Object`, :class:`PartialInviteChannel`]
The channel the invite is for.
target_type: :class:`InviteTarget`
The type of target for the voice channel invite.
.. versionadded:: 2.0
target_user: Optional[:class:`User`]
The user whose stream to display for this invite, if any.
.. versionadded:: 2.0
target_application: Optional[:class:`PartialAppInfo`]
The embedded application the invite targets, if any.
.. versionadded:: 2.0
"""
__slots__ = (
"max_age",
"code",
"guild",
"revoked",
"created_at",
"uses",
"temporary",
"max_uses",
"inviter",
"channel",
"target_user",
"target_type",
"_state",
"approximate_member_count",
"approximate_presence_count",
"target_application",
"expires_at",
)
BASE = "https://discord.gg"
def __init__(
self,
*,
state: ConnectionState,
data: InvitePayload,
guild: Optional[Union[PartialInviteGuild, Guild]] = None,
channel: Optional[Union[PartialInviteChannel, GuildChannel]] = None,
):
self._state: ConnectionState = state
self.max_age: Optional[int] = data.get("max_age")
self.code: str = data["code"]
self.guild: Optional[InviteGuildType] = self._resolve_guild(
data.get("guild"), guild
)
self.revoked: Optional[bool] = data.get("revoked")
self.created_at: Optional[datetime.datetime] = parse_time(
data.get("created_at")
)
self.temporary: Optional[bool] = data.get("temporary")
self.uses: Optional[int] = data.get("uses")
self.max_uses: Optional[int] = data.get("max_uses")
self.approximate_presence_count: Optional[int] = data.get(
"approximate_presence_count"
)
self.approximate_member_count: Optional[int] = data.get(
"approximate_member_count"
)
expires_at = data.get("expires_at", None)
self.expires_at: Optional[datetime.datetime] = (
parse_time(expires_at) if expires_at else None
)
inviter_data = data.get("inviter")
self.inviter: Optional[User] = (
None if inviter_data is None else self._state.create_user(inviter_data)
)
self.channel: Optional[InviteChannelType] = self._resolve_channel(
data.get("channel"), channel
)
target_user_data = data.get("target_user")
self.target_user: Optional[User] = (
None
if target_user_data is None
else self._state.create_user(target_user_data)
)
self.target_type: InviteTarget = try_enum(
InviteTarget, data.get("target_type", 0)
)
application = data.get("target_application")
self.target_application: Optional[PartialAppInfo] = (
PartialAppInfo(data=application, state=state) if application else None
)
@classmethod
def from_incomplete(
cls: Type[I], *, state: ConnectionState, data: InvitePayload
) -> I:
guild: Optional[Union[Guild, PartialInviteGuild]]
try:
guild_data = data["guild"]
except KeyError:
# If we're here, then this is a group DM
guild = None
else:
guild_id = int(guild_data["id"])
guild = state._get_guild(guild_id)
if guild is None:
# If it's not cached, then it has to be a partial guild
guild = PartialInviteGuild(state, guild_data, guild_id)
# As far as I know, invites always need a channel
# So this should never raise.
channel: Union[PartialInviteChannel, GuildChannel] = PartialInviteChannel(
data["channel"]
)
if guild is not None and not isinstance(guild, PartialInviteGuild):
# Upgrade the partial data if applicable
channel = guild.get_channel(channel.id) or channel
return cls(state=state, data=data, guild=guild, channel=channel)
@classmethod
def from_gateway(
cls: Type[I], *, state: ConnectionState, data: GatewayInvitePayload
) -> I:
guild_id: Optional[int] = _get_as_snowflake(data, "guild_id")
guild: Optional[Union[Guild, Object]] = state._get_guild(guild_id)
channel_id = int(data["channel_id"])
if guild is not None:
channel = guild.get_channel(channel_id) or Object(id=channel_id) # type: ignore
else:
guild = Object(id=guild_id) if guild_id is not None else None
channel = Object(id=channel_id)
return cls(state=state, data=data, guild=guild, channel=channel) # type: ignore
def _resolve_guild(
self,
data: Optional[InviteGuildPayload],
guild: Optional[Union[Guild, PartialInviteGuild]] = None,
) -> Optional[InviteGuildType]:
if guild is not None:
return guild
if data is None:
return None
guild_id = int(data["id"])
return PartialInviteGuild(self._state, data, guild_id)
def _resolve_channel(
self,
data: Optional[InviteChannelPayload],
channel: Optional[Union[PartialInviteChannel, GuildChannel]] = None,
) -> Optional[InviteChannelType]:
if channel is not None:
return channel
if data is None:
return None
return PartialInviteChannel(data)
def __str__(self) -> str:
return self.url
def __repr__(self) -> str:
return (
f"<Invite code={self.code!r} guild={self.guild!r} "
f"online={self.approximate_presence_count} "
f"members={self.approximate_member_count}>"
)
def __hash__(self) -> int:
return hash(self.code)
@property
def id(self) -> str:
""":class:`str`: Returns the proper code portion of the invite."""
return self.code
@property
def url(self) -> str:
""":class:`str`: A property that retrieves the invite URL."""
return self.BASE + "/" + self.code
async def delete(self, *, reason: Optional[str] = None):
"""|coro|
Revokes the instant invite.
You must have the :attr:`~Permissions.manage_channels` permission to do this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this invite. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to revoke invites.
NotFound
The invite is invalid or expired.
HTTPException
Revoking the invite failed.
"""
await self._state.http.delete_invite(self.code, reason=reason)
| 34.706107
| 103
| 0.584351
|
d1dfc34c725ca6bb2ed614df2838c1c8a8504741
| 772
|
py
|
Python
|
energyplot.py
|
Druciaki/Energy
|
5f475ff924dbd27ac67f82d5f53575196ac297f3
|
[
"BSD-3-Clause"
] | null | null | null |
energyplot.py
|
Druciaki/Energy
|
5f475ff924dbd27ac67f82d5f53575196ac297f3
|
[
"BSD-3-Clause"
] | null | null | null |
energyplot.py
|
Druciaki/Energy
|
5f475ff924dbd27ac67f82d5f53575196ac297f3
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
from decimal import *
def buildgraphic(guesses,outputfile="output/CargaEletrica.png"):
plt.clf()
for name in guesses:
data = guesses[name]
x ,y = None, None # Hotfix - assure parameter type (TODO:REVIEW)
if type(data[0]) != type(list()):
x = data[0].tolist()
else:
x = data[0]
if type(data[1]) != type(list()):
y = data[1].tolist()
else:
y = data[1]
for i in range(0, len(data[0])):
x[i] = Decimal(data[0][i]/1000000)
plt.plot(x, y, label=name)
plt.xlabel('Tempo')
plt.ylabel('Carga')
plt.title('Previsao via Regressao Linear')
plt.legend()
plt.savefig(outputfile)
#plt.show()
| 24.903226
| 72
| 0.544041
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.