hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adaa9aea54e52fe3101af3cc0f1408e70edf2923
| 6,682
|
py
|
Python
|
Chapter17/lib/i2a.py
|
feiwang20/DRLHandsOn-Playground
|
b940c4959993d3ad7c6a2b0429b155d21cafe845
|
[
"MIT"
] | 2,497
|
2018-05-25T09:45:54.000Z
|
2022-03-31T14:30:28.000Z
|
Chapter17/lib/i2a.py
|
feiwang20/DRLHandsOn-Playground
|
b940c4959993d3ad7c6a2b0429b155d21cafe845
|
[
"MIT"
] | 94
|
2018-09-02T11:53:00.000Z
|
2022-03-02T07:36:31.000Z
|
Chapter17/lib/i2a.py
|
feiwang20/DRLHandsOn-Playground
|
b940c4959993d3ad7c6a2b0429b155d21cafe845
|
[
"MIT"
] | 1,254
|
2018-05-23T11:21:24.000Z
|
2022-03-30T16:56:13.000Z
|
import ptan
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import common
ROLLOUT_HIDDEN = 256
EM_OUT_SHAPE = (1, ) + common.IMG_SHAPE[1:]
class EnvironmentModel(nn.Module):
def __init__(self, input_shape, n_actions):
super(EnvironmentModel, self).__init__()
self.input_shape = input_shape
self.n_actions = n_actions
# input color planes will be equal to frames plus one-hot encoded actions
n_planes = input_shape[0] + n_actions
self.conv1 = nn.Sequential(
nn.Conv2d(n_planes, 64, kernel_size=4, stride=4, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU()
)
# output is one single frame with delta from the current frame
self.deconv = nn.ConvTranspose2d(64, 1, kernel_size=4, stride=4, padding=0)
self.reward_conv = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3),
nn.MaxPool2d(2),
nn.ReLU()
)
rw_conv_out = self._get_reward_conv_out((n_planes, ) + input_shape[1:])
self.reward_fc = nn.Sequential(
nn.Linear(rw_conv_out, 128),
nn.ReLU(),
nn.Linear(128, 1)
)
def _get_reward_conv_out(self, shape):
o = self.conv1(torch.zeros(1, *shape))
o = self.reward_conv(o)
return int(np.prod(o.size()))
def forward(self, imgs, actions):
batch_size = actions.size()[0]
act_planes_v = torch.FloatTensor(batch_size, self.n_actions, *self.input_shape[1:]).zero_().to(actions.device)
act_planes_v[range(batch_size), actions] = 1.0
comb_input_v = torch.cat((imgs, act_planes_v), dim=1)
c1_out = self.conv1(comb_input_v)
c2_out = self.conv2(c1_out)
c2_out += c1_out
img_out = self.deconv(c2_out)
rew_conv = self.reward_conv(c2_out).view(batch_size, -1)
rew_out = self.reward_fc(rew_conv)
return img_out, rew_out
class RolloutEncoder(nn.Module):
def __init__(self, input_shape, hidden_size=ROLLOUT_HIDDEN):
super(RolloutEncoder, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
)
conv_out_size = self._get_conv_out(input_shape)
self.rnn = nn.LSTM(input_size=conv_out_size+1, hidden_size=hidden_size, batch_first=False)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, obs_v, reward_v):
"""
Input is in (time, batch, *) order
"""
n_time = obs_v.size()[0]
n_batch = obs_v.size()[1]
n_items = n_time * n_batch
obs_flat_v = obs_v.view(n_items, *obs_v.size()[2:])
conv_out = self.conv(obs_flat_v)
conv_out = conv_out.view(n_time, n_batch, -1)
rnn_in = torch.cat((conv_out, reward_v), dim=2)
_, (rnn_hid, _) = self.rnn(rnn_in)
return rnn_hid.view(-1)
class I2A(nn.Module):
def __init__(self, input_shape, n_actions, net_em, net_policy, rollout_steps):
super(I2A, self).__init__()
self.n_actions = n_actions
self.rollout_steps = rollout_steps
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
)
conv_out_size = self._get_conv_out(input_shape)
fc_input = conv_out_size + ROLLOUT_HIDDEN * n_actions
self.fc = nn.Sequential(
nn.Linear(fc_input, 512),
nn.ReLU()
)
self.policy = nn.Linear(512, n_actions)
self.value = nn.Linear(512, 1)
# used for rollouts
self.encoder = RolloutEncoder(EM_OUT_SHAPE)
self.action_selector = ptan.actions.ProbabilityActionSelector()
# save refs without registering
object.__setattr__(self, "net_em", net_em)
object.__setattr__(self, "net_policy", net_policy)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
fx = x.float() / 255
enc_rollouts = self.rollouts_batch(fx)
conv_out = self.conv(fx).view(fx.size()[0], -1)
fc_in = torch.cat((conv_out, enc_rollouts), dim=1)
fc_out = self.fc(fc_in)
return self.policy(fc_out), self.value(fc_out)
def rollouts_batch(self, batch):
batch_size = batch.size()[0]
batch_rest = batch.size()[1:]
if batch_size == 1:
obs_batch_v = batch.expand(batch_size * self.n_actions, *batch_rest)
else:
obs_batch_v = batch.unsqueeze(1)
obs_batch_v = obs_batch_v.expand(batch_size, self.n_actions, *batch_rest)
obs_batch_v = obs_batch_v.contiguous().view(-1, *batch_rest)
actions = np.tile(np.arange(0, self.n_actions, dtype=np.int64), batch_size)
step_obs, step_rewards = [], []
for step_idx in range(self.rollout_steps):
actions_t = torch.tensor(actions, dtype=torch.int64).to(batch.device)
obs_next_v, reward_v = self.net_em(obs_batch_v, actions_t)
step_obs.append(obs_next_v.detach())
step_rewards.append(reward_v.detach())
# don't need actions for the last step
if step_idx == self.rollout_steps-1:
break
# combine the delta from EM into new observation
cur_plane_v = obs_batch_v[:, 1:2]
new_plane_v = cur_plane_v + obs_next_v
obs_batch_v = torch.cat((cur_plane_v, new_plane_v), dim=1)
# select actions
logits_v, _ = self.net_policy(obs_batch_v)
probs_v = F.softmax(logits_v, dim=1)
probs = probs_v.data.cpu().numpy()
actions = self.action_selector(probs)
step_obs_v = torch.stack(step_obs)
step_rewards_v = torch.stack(step_rewards)
flat_enc_v = self.encoder(step_obs_v, step_rewards_v)
return flat_enc_v.view(batch_size, -1)
| 35.924731
| 118
| 0.601317
|
9db11f5f8c2a174e7114bb8ca435531af53462f1
| 400
|
py
|
Python
|
gram/migrations/0005_auto_20200604_2316.py
|
dgkilolo/Gram
|
7242cfa412d9e4014ee9bc5e1cbf76234d7633aa
|
[
"RSA-MD"
] | null | null | null |
gram/migrations/0005_auto_20200604_2316.py
|
dgkilolo/Gram
|
7242cfa412d9e4014ee9bc5e1cbf76234d7633aa
|
[
"RSA-MD"
] | 6
|
2021-03-19T04:20:58.000Z
|
2021-09-22T19:07:33.000Z
|
gram/migrations/0005_auto_20200604_2316.py
|
dgkilolo/Gram
|
7242cfa412d9e4014ee9bc5e1cbf76234d7633aa
|
[
"RSA-MD"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-06-04 20:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0004_auto_20200604_2250'),
]
operations = [
migrations.AlterField(
model_name='image',
name='picture',
field=models.ImageField(blank=True, upload_to='gram/'),
),
]
| 21.052632
| 67
| 0.5975
|
59bcb5ae74b2a247fb9d5ddb215b6d682f6eaf0f
| 2,301
|
py
|
Python
|
Apriori/Apriori.py
|
GreetingSit/MachineLearningAlgorithm
|
28bd6e38d08372ddb456428ba408929e30b2afd1
|
[
"MIT"
] | null | null | null |
Apriori/Apriori.py
|
GreetingSit/MachineLearningAlgorithm
|
28bd6e38d08372ddb456428ba408929e30b2afd1
|
[
"MIT"
] | null | null | null |
Apriori/Apriori.py
|
GreetingSit/MachineLearningAlgorithm
|
28bd6e38d08372ddb456428ba408929e30b2afd1
|
[
"MIT"
] | null | null | null |
#Copyright (c) 2017 ChenyChen
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.0
import numpy as np
class Apriori(object):
def __init__(self,submin,dataset):
self.submin=submin
self.dataset=dataset
self.dict={}
self.predict={}
def initdict(self):
for item in self.dataset:
for k in item:
if k not in dict.keys():
dict[k]=1.0
def union(self,listA,listB):
return list(set(listA).union(set(listB)))
def apriori(self):
if len(dict)==0:
return
self.predict=self.dict
temp={}
for i in self.dict.keys():
for item in self.dataset:
if [False for c in i if c not in item]:
self.dict[i]+=1.0/len(dict.keys())
for i in self.dict.keys():
if self.dict[i]<=self.submin:
self.dict.pop(i)
for i in self.dict.keys():
for j in self.dict.keys():
if self.union(self.dict[i],self.dict[j]) not in temp.keys() and len(self.union(self.dict[i],self.dict[j]))== len(self.dict[i])+1:
temp[self.union(self.dict[i], self.dict[j])]=1.0
self.dict=temp
self.apriori()
def print(self):
print(self.predict)
| 38.35
| 145
| 0.644502
|
0d3580a606efab2c77423805567d303a495a1a5d
| 934
|
py
|
Python
|
test/fit_tests/tests/redfish10/test_redfish10_api_sessionservice.py
|
smiller171/RackHD
|
02a6406bb50d8b49efbc2b04ecd4435e0341aa65
|
[
"Apache-2.0"
] | null | null | null |
test/fit_tests/tests/redfish10/test_redfish10_api_sessionservice.py
|
smiller171/RackHD
|
02a6406bb50d8b49efbc2b04ecd4435e0341aa65
|
[
"Apache-2.0"
] | null | null | null |
test/fit_tests/tests/redfish10/test_redfish10_api_sessionservice.py
|
smiller171/RackHD
|
02a6406bb50d8b49efbc2b04ecd4435e0341aa65
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import os
import sys
import subprocess
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/fit_tests/common")
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class redfish10_api_sessionservice(fit_common.unittest.TestCase):
def test_redfish_v1_sessionservice(self):
api_data = fit_common.rackhdapi('/redfish/v1/SessionService')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
def test_redfish_v1_sessionservice_sessions(self):
api_data = fit_common.rackhdapi('/redfish/v1/SessionService/Sessions')
self.assertIn(api_data['status'], [200, 501], "Incorrect HTTP return code")
if __name__ == '__main__':
fit_common.unittest.main()
| 31.133333
| 125
| 0.737687
|
817ce2dbedda023ff60b30652d003ef7cfb4005c
| 1,730
|
py
|
Python
|
setup.py
|
stitchfix/caravel
|
087c47a37e45bb9c2608bbb2279188b59547e449
|
[
"Apache-2.0"
] | 6
|
2016-06-07T14:07:34.000Z
|
2020-02-17T17:20:18.000Z
|
setup.py
|
stitchfix/caravel
|
087c47a37e45bb9c2608bbb2279188b59547e449
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
stitchfix/caravel
|
087c47a37e45bb9c2608bbb2279188b59547e449
|
[
"Apache-2.0"
] | 3
|
2016-09-27T14:13:02.000Z
|
2022-02-04T07:03:36.000Z
|
import imp, os
from setuptools import setup, find_packages
version = imp.load_source(
'version', os.path.join('caravel', 'version.py'))
setup(
name='caravel',
description=(
"A interactive data visualization platform build on SqlAlchemy "
"and druid.io"),
version=version.VERSION_STRING,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=['caravel/bin/caravel'],
install_requires=[
'alembic>=0.8.5, <0.9.0',
'babel==2.3.4',
'cryptography>=1.1.1, <2.0.0',
'flask-appbuilder>=1.6.2, <2.0.0',
'Flask-BabelPkg==0.9.6',
'flask-cache>=0.13.1, <0.14.0',
'flask-migrate>=1.5.1, <2.0.0',
'flask-script>=2.0.5, <3.0.0',
'flask-sqlalchemy==2.0.0',
'flask-testing>=0.4.2, <0.5.0',
'flask>=0.10.1, <1.0.0',
'humanize>=0.5.1, <0.6.0',
'gunicorn>=19.3.0, <20.0.0',
'markdown>=2.6.2, <3.0.0',
'pandas==0.18.0',
'parsedatetime==2.0.0',
'pydruid==0.3.0, <0.4.0',
'python-dateutil>=2.4.2, <3.0.0',
'requests>=2.7.0, <3.0.0',
'sqlalchemy>=1.0.12, <2.0.0',
'sqlalchemy-utils>=0.31.3, <0.32.0',
'sqlparse>=0.1.16, <0.2.0',
'werkzeug>=0.11.2, <0.12.0',
],
tests_require=['coverage'],
author='Maxime Beauchemin',
author_email='maximebeauchemin@gmail.com',
url='https://github.com/airbnb/caravel',
download_url=(
'https://github.com/airbnb/caravel/tarball/' + version.VERSION_STRING),
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| 32.037037
| 79
| 0.550867
|
906ee299a04c521b0fcc502b24b2c29ca4251341
| 3,090
|
py
|
Python
|
tools/verifyplaceholders.py
|
CyberFlameGO/dragonfly
|
4d106476132f94661554779e8c13851f050b4a8f
|
[
"ECL-2.0",
"Apache-2.0"
] | 58
|
2015-01-09T16:32:43.000Z
|
2021-11-25T04:38:18.000Z
|
tools/verifyplaceholders.py
|
CyberFlameGO/dragonfly
|
4d106476132f94661554779e8c13851f050b4a8f
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2015-01-23T22:10:00.000Z
|
2018-04-30T18:55:21.000Z
|
tools/verifyplaceholders.py
|
CyberFlameGO/dragonfly
|
4d106476132f94661554779e8c13851f050b4a8f
|
[
"ECL-2.0",
"Apache-2.0"
] | 24
|
2015-01-22T15:10:51.000Z
|
2021-10-31T21:45:06.000Z
|
# TODO
#
# This script is just a quick draft.
# The functionality should probably be
# included in existing scripts.
# The purpose is to check if any of the place holders
# in the master string file also exists in the generated
# language file and if the place-holders are correct and complete.
#
import os
import re
import codecs
LANGS = [
"be",
"bg",
"cs",
"da",
"de",
"el",
"en-GB",
"es-ES",
"es-LA",
"et",
"fi",
"fr",
"fr-CA",
"fy",
"gd",
"hi",
"hr",
"hu",
"id",
"it",
"ja",
"ka",
"ko",
"lt",
"mk",
"nb",
"nl",
"nn",
"pl",
"pt",
"pt-BR",
"ro",
"ru",
"sk",
"sr",
"sv",
"ta",
"te",
"tr",
"uk",
"vi",
"zh-cn",
"zh-tw"
]
LANGDIR = "./strings"
MASTER = "./src/ui-strings/ui_strings-en.js"
langfiles = ["ui_strings-%s.js" % l for l in LANGS]
files = os.listdir(LANGDIR)
for f in files:
if not f in langfiles:
os.unlink(os.path.join(LANGDIR, f))
reid = re.compile(r"ui_strings\.([A-Z0-9_]+)\s*=")
repl = re.compile(r"(%(?:\([^\)]*\))?s)")
def get_placeholders(path):
pls = {}
with codecs.open(path, "r", "utf_8_sig") as f:
for n, l in enumerate(f, 1):
m = reid.search(l)
if m:
placeholders = repl.findall(l)
if placeholders:
pls[m.groups()[0]] = {'line': l.strip(),
'line-number': n,
'placeholders': placeholders}
return pls
def check_pls(m_pls, pls):
missing = []
error = []
for pl in m_pls:
if pl in pls:
c_pl = pls.pop(pls.index(pl))
if not pl == c_pl:
error.append((pl, c_pl))
else:
missing.append(pl)
return missing, error
master_pls = get_placeholders(MASTER)
for l in langfiles:
pls = get_placeholders(os.path.join(LANGDIR, l))
print "checking:", l
for id in master_pls:
if id in pls:
missing, error = check_pls(master_pls[id]['placeholders'], pls[id]['placeholders'])
if missing:
for m in missing:
print "missing placeholder"
print "file:", l
print "\tid:", id
print "\tline number:", pls[id]['line-number']
print "\tline master:", master_pls[id]['line']
print "\tline check: ", pls[id]['line']
print "\tplaceholder:", m
if error:
for e in error:
print "broken placeholder"
print "file:", l
print "\tid:", id
print "broken placeholder"
print "\tline number :", master_pls[id]['line-number']
print "\tline: ", master_pls[id]['line']
print "\tplaceholder expeceted:", e[0]
print "\tplaceholder actual :", e[1]
| 24.330709
| 95
| 0.467961
|
ada475d9fce3aa96cad53e13884e5b191fba5aa3
| 236
|
py
|
Python
|
head-first-python/countfromby.py
|
ornichola/learning-new
|
e567218d8887805e38b1361715d5e3bd51a6bcaf
|
[
"Unlicense"
] | 2
|
2019-05-24T20:10:16.000Z
|
2020-07-11T06:06:43.000Z
|
head-first-python/countfromby.py
|
ornichola/learning-new
|
e567218d8887805e38b1361715d5e3bd51a6bcaf
|
[
"Unlicense"
] | null | null | null |
head-first-python/countfromby.py
|
ornichola/learning-new
|
e567218d8887805e38b1361715d5e3bd51a6bcaf
|
[
"Unlicense"
] | 21
|
2019-03-11T20:25:05.000Z
|
2022-02-28T13:53:10.000Z
|
class CountFromBy:
def __init__(self, v: int, i: int) -> None:
self.val = v
self.incr = i
def increase(self) - > None:
self.val += self.incr
def __repr__(self) -> str:
return str(self.val)
| 19.666667
| 47
| 0.542373
|
b795e2be10afad669680f1e6317b371bde88a819
| 928
|
py
|
Python
|
app/user/views.py
|
sunnyrpandya/recipe-app-api
|
92fbefb9bd80e967cd1111ddc25c3c8da5980c39
|
[
"MIT"
] | null | null | null |
app/user/views.py
|
sunnyrpandya/recipe-app-api
|
92fbefb9bd80e967cd1111ddc25c3c8da5980c39
|
[
"MIT"
] | null | null | null |
app/user/views.py
|
sunnyrpandya/recipe-app-api
|
92fbefb9bd80e967cd1111ddc25c3c8da5980c39
|
[
"MIT"
] | null | null | null |
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserview(generics.RetrieveUpdateAPIView):
""" Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
""" Retrieve and return authenticated user"""
return self.request.user
| 33.142857
| 66
| 0.786638
|
cb4f348e12f69bbc3010dc06c9cc20d3acd0e066
| 1,115
|
py
|
Python
|
TCP_Socket/tcp_server.py
|
LeandroTeodoroRJ/ExemplosPython
|
a1e91beeb55599b5bf0e8e876d2fc1c541d821cb
|
[
"MIT"
] | 1
|
2021-12-24T20:46:29.000Z
|
2021-12-24T20:46:29.000Z
|
TCP_Socket/tcp_server.py
|
LeandroTeodoroRJ/ExemplosPython
|
a1e91beeb55599b5bf0e8e876d2fc1c541d821cb
|
[
"MIT"
] | null | null | null |
TCP_Socket/tcp_server.py
|
LeandroTeodoroRJ/ExemplosPython
|
a1e91beeb55599b5bf0e8e876d2fc1c541d821cb
|
[
"MIT"
] | 1
|
2021-12-24T20:46:20.000Z
|
2021-12-24T20:46:20.000Z
|
# EXEMPLO SERVIDOR TCP PARA PYTHON
'''
Referências:
https://docs.python.org/3/library/socket.html
https://realpython.com/python-sockets/
'''
import socket
import time
servidor = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#AF_INET -> Protocolo IP V4
#SOCK_TREAM -> TCP
#SOCK_DGRAM -> UDP
servidor.bind(('0.0.0.0', 1300)) #Escuta a porta 1300 da máquina local
servidor.listen()
while True: #While do deamon
conn, addr = servidor.accept() #conn é um objeto do tipo socket
with conn:
data = conn.recv(1024) #Recebe máx 1024 bytes de buffer
if not data: #Se não houver recepção de dados fecha o socket...
conn.close() #... mas continua ouvindo a porta
continue
else:
conn.sendall(data) #Devolve o dado para o cliente
print('Recebido dados: ', data)
print('Conectado ao IP: ', addr)
time.sleep(10/1000) #Delay de 10ms
conn.close() #Finalizado a comunicação, fecha o soquete
| 32.794118
| 89
| 0.584753
|
12b88bf0665f206b4dd7895db1d940cc251bb614
| 4,294
|
py
|
Python
|
solve_captchas_with_model2.py
|
storopoli/Solving-Captchas
|
1d861da0c40bee63dae1b33d51ac31071ff376e3
|
[
"MIT"
] | 1
|
2019-03-20T12:20:02.000Z
|
2019-03-20T12:20:02.000Z
|
solve_captchas_with_model2.py
|
storopoli/Solving-Captchas
|
1d861da0c40bee63dae1b33d51ac31071ff376e3
|
[
"MIT"
] | null | null | null |
solve_captchas_with_model2.py
|
storopoli/Solving-Captchas
|
1d861da0c40bee63dae1b33d51ac31071ff376e3
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.models import load_model
from helpers import resize_to_fit
from imutils import paths
import numpy as np
import imutils
import cv2
import pickle
MODEL_FILENAME = "captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model_labels.dat"
CAPTCHA_IMAGE_FOLDER = "generated_captcha_images2"
# Load up the model labels (so we can translate model predictions to actual letters)
with open(MODEL_LABELS_FILENAME, "rb") as f:
lb = pickle.load(f)
# Load the trained neural network
model = load_model(MODEL_FILENAME)
# Grab some random CAPTCHA images to test against.
# In the real world, you'd replace this section with code to grab a real
# CAPTCHA image from a live website.
captcha_image_files = list(paths.list_images(CAPTCHA_IMAGE_FOLDER))
#captcha_image_files = np.random.choice(captcha_image_files, size=(10,), replace=False)
# loop over the image paths
for image_file in captcha_image_files:
# Load the image and convert it to grayscale
image = cv2.imread(image_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Add some extra padding around the image
image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
# threshold the image (convert it to pure black and white)
thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# find the contours (continuous blobs of pixels) the image
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Hack for compatibility with different OpenCV versions
contours = contours[0] if imutils.is_cv2() else contours[1]
letter_image_regions = []
# Now we can loop through each of the four contours and extract the letter
# inside of each one
for contour in contours:
# Get the rectangle that contains the contour
(x, y, w, h) = cv2.boundingRect(contour)
# Compare the width and height of the contour to detect letters that
# are conjoined into one chunk
if w / h > 1.25:
# This contour is too wide to be a single letter!
# Split it in half into two letter regions!
half_width = int(w / 2)
letter_image_regions.append((x, y, half_width, h))
letter_image_regions.append((x + half_width, y, half_width, h))
else:
# This is a normal letter by itself
letter_image_regions.append((x, y, w, h))
# If we found more or less than 4 letters in the captcha, our letter extraction
# didn't work correcly. Skip the image instead of saving bad training data!
# Sort the detected letter images based on the x coordinate to make sure
# we are processing them from left-to-right so we match the right image
# with the right letter
letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])
# Create an output image and a list to hold our predicted letters
output = cv2.merge([image] * 3)
predictions = []
# loop over the lektters
for letter_bounding_box in letter_image_regions:
# Grab the coordinates of the letter in the image
x, y, w, h = letter_bounding_box
# Extract the letter from the original image with a 2-pixel margin around the edge
letter_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
# Re-size the letter image to 20x20 pixels to match training data
letter_image = resize_to_fit(letter_image, 20, 20)
# Turn the single image into a 4d list of images to make Keras happy
letter_image = np.expand_dims(letter_image, axis=2)
letter_image = np.expand_dims(letter_image, axis=0)
# Ask the neural network to make a prediction
prediction = model.predict(letter_image)
# Convert the one-hot-encoded prediction back to a normal letter
letter = lb.inverse_transform(prediction)[0]
predictions.append(letter)
# draw the prediction on the output image
cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)
cv2.putText(output, letter, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
# Print the captcha's text
captcha_text = "".join(predictions)
print("CAPTCHA text is: {}".format(captcha_text))
| 40.130841
| 99
| 0.693759
|
2523a35ec1f335dee390590fcf62e1bf039a4bce
| 578
|
py
|
Python
|
examples/dummy_plugin/dummy_plugin/migrations/0003_anotherdummymodel__custom_field_data.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 384
|
2021-02-24T01:40:40.000Z
|
2022-03-30T10:30:59.000Z
|
examples/dummy_plugin/dummy_plugin/migrations/0003_anotherdummymodel__custom_field_data.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 1,067
|
2021-02-24T00:58:08.000Z
|
2022-03-31T23:38:23.000Z
|
examples/dummy_plugin/dummy_plugin/migrations/0003_anotherdummymodel__custom_field_data.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 128
|
2021-02-24T02:45:16.000Z
|
2022-03-20T18:48:36.000Z
|
# Generated by Django 3.1.13 on 2021-11-02 13:53
import django.core.serializers.json
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("dummy_plugin", "0002_anotherdummymodel"),
]
operations = [
migrations.AddField(
model_name="anotherdummymodel",
name="_custom_field_data",
field=models.JSONField(
blank=True,
default=dict,
encoder=django.core.serializers.json.DjangoJSONEncoder,
),
),
]
| 24.083333
| 71
| 0.596886
|
12d32c202c4e9a94e5f09a775d6dec99897bf71f
| 3,211
|
py
|
Python
|
requirements/compile.py
|
tolomea/django-perf-rec
|
308b1fc8e4d614a1a707b776c1e255946aa2dc2e
|
[
"MIT"
] | null | null | null |
requirements/compile.py
|
tolomea/django-perf-rec
|
308b1fc8e4d614a1a707b776c1e255946aa2dc2e
|
[
"MIT"
] | null | null | null |
requirements/compile.py
|
tolomea/django-perf-rec
|
308b1fc8e4d614a1a707b776c1e255946aa2dc2e
|
[
"MIT"
] | 1
|
2020-08-18T10:44:22.000Z
|
2020-08-18T10:44:22.000Z
|
#!/usr/bin/env python
import os
import subprocess
import sys
from pathlib import Path
if __name__ == "__main__":
os.chdir(Path(__file__).parent)
os.environ["CUSTOM_COMPILE_COMMAND"] = "requirements/compile.py"
os.environ.pop("PIP_REQUIRE_VIRTUALENV")
common_args = ["-m", "piptools", "compile", "--generate-hashes"] + sys.argv[1:]
subprocess.run(
[
"python3.5",
*common_args,
"-P",
"Django>=2.2,<2.3",
"-o",
"py35-django22.txt",
],
check=True,
)
subprocess.run(
[
"python3.6",
*common_args,
"-P",
"Django>=2.2,<2.3",
"-o",
"py36-django22.txt",
],
check=True,
)
subprocess.run(
[
"python3.6",
*common_args,
"-P",
"Django>=3.0a1,<3.1",
"-o",
"py36-django30.txt",
],
check=True,
)
subprocess.run(
[
"python3.6",
*common_args,
"-P",
"Django>=3.1a1,<3.2",
"-o",
"py36-django31.txt",
],
check=True,
)
subprocess.run(
[
"python3.7",
*common_args,
"-P",
"Django>=2.2,<2.3",
"-o",
"py37-django22.txt",
],
check=True,
)
subprocess.run(
[
"python3.7",
*common_args,
"-P",
"Django>=3.0a1,<3.1",
"-o",
"py37-django30.txt",
],
check=True,
)
subprocess.run(
[
"python3.7",
*common_args,
"-P",
"Django>=3.1a1,<3.2",
"-o",
"py37-django31.txt",
],
check=True,
)
subprocess.run(
[
"python3.8",
*common_args,
"-P",
"Django>=2.2,<2.3",
"-o",
"py38-django22.txt",
],
check=True,
)
subprocess.run(
[
"python3.8",
*common_args,
"-P",
"Django>=3.0a1,<3.1",
"-o",
"py38-django30.txt",
],
check=True,
)
subprocess.run(
[
"python3.8",
*common_args,
"-P",
"Django>=3.1a1,<3.2",
"-o",
"py38-django31.txt",
],
check=True,
)
subprocess.run(
[
"python3.9",
*common_args,
"-P",
"Django>=2.2,<2.3",
"-o",
"py39-django22.txt",
],
check=True,
)
subprocess.run(
[
"python3.9",
*common_args,
"-P",
"Django>=3.0a1,<3.1",
"-o",
"py39-django30.txt",
],
check=True,
)
subprocess.run(
[
"python3.9",
*common_args,
"-P",
"Django>=3.1a1,<3.2",
"-o",
"py39-django31.txt",
],
check=True,
)
| 20.716129
| 83
| 0.35721
|
f5e2362075928ddb174cadf0af766c6901899377
| 2,675
|
py
|
Python
|
mayan/apps/reviewer_form/apps.py
|
dsduenas/fall-2021-hw2-team-name
|
c00e06c00611aab7f42dbcdb463eef4d604b9853
|
[
"Apache-2.0"
] | 3
|
2021-09-15T20:27:41.000Z
|
2021-09-25T21:35:14.000Z
|
mayan/apps/reviewer_form/apps.py
|
dsduenas/fall-2021-hw2-team-name
|
c00e06c00611aab7f42dbcdb463eef4d604b9853
|
[
"Apache-2.0"
] | 24
|
2021-09-27T20:30:08.000Z
|
2021-10-02T03:35:03.000Z
|
mayan/apps/reviewer_form/apps.py
|
dsduenas/fall-2021-hw2-team-name
|
c00e06c00611aab7f42dbcdb463eef4d604b9853
|
[
"Apache-2.0"
] | 3
|
2021-09-15T20:32:00.000Z
|
2021-09-25T21:32:36.000Z
|
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.acls.permissions import permission_acl_edit, permission_acl_view
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.classes import ModelCopy, ModelQueryFields
from mayan.apps.events.classes import EventModelRegistry, ModelEventType
from mayan.apps.navigation.classes import SourceColumn
from .events import (
event_cabinet_edited, event_cabinet_document_added,
event_cabinet_document_removed
)
from .html_widgets import DocumentCabinetWidget
from .menus import menu_cabinets
from .methods import method_document_get_cabinets
class ReviewerFormApp(MayanAppConfig):
app_namespace = 'reviewer_form'
app_url = 'reviewer_form'
has_rest_api = True
has_static_media = True
has_tests = False
name = 'mayan.apps.reviewer_form'
verbose_name = _('reviewer_form')
def ready(self):
super().ready()
ReviewerForm = self.get_model(model_name='ReviewerForm')
EventModelRegistry.register(model=ReviewerForm)
ModelCopy(
model=ReviewerForm,
bind_link=True, register_permission=True
).add_fields(
//YAJIN: to be modified
field_names=('label', 'documents')
}
)
ModelEventType.register(
model=ReviewerForm, event_types=(
reviewer_form_created
)
)
# ModelPermission.register(
# model=Document, permissions=(
# permission_cabinet_add_document,
# permission_cabinet_remove_document, permission_cabinet_view,
# permission_events_view
# )
# )
# ModelPermission.register(
# model=Cabinet, permissions=(
# permission_acl_edit, permission_acl_view,
# permission_cabinet_delete, permission_cabinet_edit,
# permission_cabinet_view, permission_cabinet_add_document,
# permission_cabinet_remove_document
# )
# )
SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=Cabinet
)
SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=CabinetSearchResult
)
SourceColumn(
attribute='get_full_path', source=CabinetSearchResult
)
SourceColumn(
label=_('Cabinets'), order=1, source=Document,
widget=DocumentCabinetWidget
)
| 29.076087
| 80
| 0.655327
|
223180928860ccbaec6649c0937be679756a0167
| 8,140
|
py
|
Python
|
prototxt_basic.py
|
ewrfcas/MXNet2Caffe
|
383ed9773f82ef4909f6674ab4b4398f98190f93
|
[
"MIT"
] | 1
|
2018-03-22T06:34:14.000Z
|
2018-03-22T06:34:14.000Z
|
prototxt_basic.py
|
ewrfcas/MXNet2Caffe
|
383ed9773f82ef4909f6674ab4b4398f98190f93
|
[
"MIT"
] | null | null | null |
prototxt_basic.py
|
ewrfcas/MXNet2Caffe
|
383ed9773f82ef4909f6674ab4b4398f98190f93
|
[
"MIT"
] | null | null | null |
# prototxt_basic
import sys
import pprint
attrstr = "attrs"
def data(txt_file, info):
txt_file.write('name: "mxnet-mdoel"\n')
txt_file.write('layer {\n')
txt_file.write(' name: "data"\n')
txt_file.write(' type: "Input"\n')
txt_file.write(' top: "data"\n')
txt_file.write(' input_param {\n')
txt_file.write(' shape: { dim: 1 dim: 3 dim: 64 dim: 64 }\n') # TODO
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
def fuzzy_haskey(d, key):
for eachkey in d:
if key in eachkey:
return True
return False
def Convolution(txt_file, info):
#if info['attrs']['no_bias'] == 'True':
#bias_term = 'false'
#else:
#bias_term = 'true'
#if info['top'] == 'conv1_1':
#pprint.pprint(info)
if fuzzy_haskey(info['params'], 'bias'):
bias_term = 'true'
elif info[attrstr].has_key('no_bias') and info['attrs']['no_bias'] == 'True':
bias_term = 'true'
else:
bias_term = 'false'
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Convolution"\n')
txt_file.write(' convolution_param {\n')
txt_file.write(' num_output: %s\n' % info[attrstr]['num_filter'])
txt_file.write(' kernel_size: %s\n' % info[attrstr]['kernel'].split('(')[1].split(',')[0]) # TODO
if info[attrstr].has_key('pad'):
txt_file.write(' pad: %s\n' % info[attrstr]['pad'].split('(')[1].split(',')[0]) # TODO
if info[attrstr].has_key('num_group'):
txt_file.write(' group: %s\n' % info[attrstr]['num_group'])
if info[attrstr].has_key('stride'):
txt_file.write(' stride: %s\n' % info[attrstr]['stride'].split('(')[1].split(',')[0])
txt_file.write(' bias_term: %s\n' % bias_term)
txt_file.write(' }\n')
if 'share' in info.keys() and info['share']:
txt_file.write(' param {\n')
txt_file.write(' name: "%s"\n' % info['params'][0])
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
def ChannelwiseConvolution(txt_file, info):
Convolution(txt_file, info)
def BatchNorm(txt_file, info):
pprint.pprint(info)
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "BatchNorm"\n')
txt_file.write(' batch_norm_param {\n')
txt_file.write(' use_global_stats: true\n') # TODO
if info[attrstr].has_key('momentum'):
txt_file.write(' moving_average_fraction: %s\n' % info[attrstr]['momentum'])
else:
txt_file.write(' moving_average_fraction: 0.9\n')
if info[attrstr].has_key('eps'):
txt_file.write(' eps: %s\n' % info[attrstr]['eps'])
else:
txt_file.write(' eps: 0.001\n')
txt_file.write(' }\n')
txt_file.write('}\n')
# if info['fix_gamma'] is "False": # TODO
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['top'])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s_scale"\n' % info['top'])
txt_file.write(' type: "Scale"\n')
txt_file.write(' scale_param { bias_term: true }\n')
txt_file.write('}\n')
txt_file.write('\n')
pass
def Activation(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "ReLU"\n') # TODO
txt_file.write('}\n')
txt_file.write('\n')
pass
def Concat(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Concat"\n')
for bottom_i in info['bottom']:
txt_file.write(' bottom: "%s"\n' % bottom_i)
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write('}\n')
txt_file.write('\n')
pass
def ElementWiseSum(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Eltwise"\n')
for bottom_i in info['bottom']:
txt_file.write(' bottom: "%s"\n' % bottom_i)
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' eltwise_param { operation: SUM }\n')
txt_file.write('}\n')
txt_file.write('\n')
pass
def Pooling(txt_file, info):
pool_type = 'AVE' if info[attrstr]['pool_type'] == 'avg' else 'MAX'
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "Pooling"\n')
txt_file.write(' pooling_param {\n')
txt_file.write(' pool: %s\n' % pool_type) # TODO
txt_file.write(' kernel_size: %s\n' % info[attrstr]['kernel'].split('(')[1].split(',')[0])
txt_file.write(' stride: %s\n' % info[attrstr]['stride'].split('(')[1].split(',')[0])
if info[attrstr].has_key('pad'):
txt_file.write(' pad: %s\n' % info[attrstr]['pad'].split('(')[1].split(',')[0])
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
pass
def FullyConnected(txt_file, info):
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "InnerProduct"\n')
txt_file.write(' inner_product_param {\n')
txt_file.write(' num_output: %s\n' % info[attrstr]['num_hidden'])
txt_file.write(' }\n')
txt_file.write('}\n')
txt_file.write('\n')
pass
def Flatten(txt_file, info):
pass
def SoftmaxOutput(txt_file, info):
pass
def LeakyReLU(txt_file, info):
if info[attrstr]['act_type'] == 'elu':
txt_file.write('layer {\n')
txt_file.write(' bottom: "%s"\n' % info['bottom'][0])
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
txt_file.write(' type: "ELU"\n')
txt_file.write(' elu_param { alpha: 0.25 }\n')
txt_file.write('}\n')
txt_file.write('\n')
else:
raise Exception("unsupported Activation")
def Eltwise(txt_file, info, op):
txt_file.write('layer {\n')
txt_file.write(' type: "Eltwise"\n')
txt_file.write(' top: "%s"\n' % info['top'])
txt_file.write(' name: "%s"\n' % info['top'])
for btom in info['bottom']:
txt_file.write(' bottom: "%s"\n' % btom)
txt_file.write(' eltwise_param { operation: %s }\n' % op)
txt_file.write('}\n')
txt_file.write('\n')
# ----------------------------------------------------------------
def write_node(txt_file, info):
if 'label' in info['name']:
return
if info['op'] == 'null' and info['name'] == 'data':
data(txt_file, info)
elif info['op'] == 'Convolution':
Convolution(txt_file, info)
elif info['op'] == 'ChannelwiseConvolution':
ChannelwiseConvolution(txt_file, info)
elif info['op'] == 'BatchNorm':
BatchNorm(txt_file, info)
elif info['op'] == 'Activation':
Activation(txt_file, info)
elif info['op'] == 'ElementWiseSum':
ElementWiseSum(txt_file, info)
elif info['op'] == '_Plus':
ElementWiseSum(txt_file, info)
elif info['op'] == 'Concat':
Concat(txt_file, info)
elif info['op'] == 'Pooling':
Pooling(txt_file, info)
elif info['op'] == 'Flatten':
Flatten(txt_file, info)
elif info['op'] == 'FullyConnected':
FullyConnected(txt_file, info)
elif info['op'] == 'SoftmaxOutput':
SoftmaxOutput(txt_file, info)
elif info['op'] == 'LeakyReLU':
LeakyReLU(txt_file, info)
elif info['op'] == 'elemwise_add':
ElementWiseSum(txt_file, info)
else:
pprint.pprint(info)
sys.exit("Warning! Unknown mxnet op:{}".format(info['op']))
| 35.701754
| 101
| 0.573096
|
5185247a46848c8afe9efa8321aa0bc1cdfa2121
| 1,537
|
py
|
Python
|
src/pypimod/sources/pypi_web.py
|
yeraydiazdiaz/pypimod
|
e456bc30f8c4d21158b80bd46eadd853e420aea6
|
[
"Apache-2.0"
] | 2
|
2019-10-06T21:40:08.000Z
|
2019-10-07T00:18:03.000Z
|
src/pypimod/sources/pypi_web.py
|
yeraydiazdiaz/pypimod
|
e456bc30f8c4d21158b80bd46eadd853e420aea6
|
[
"Apache-2.0"
] | 8
|
2019-10-26T16:12:04.000Z
|
2021-12-13T20:27:11.000Z
|
src/pypimod/sources/pypi_web.py
|
yeraydiazdiaz/pypimod
|
e456bc30f8c4d21158b80bd46eadd853e420aea6
|
[
"Apache-2.0"
] | null | null | null |
from urllib.parse import urljoin
from typing import List
from bs4 import BeautifulSoup
import httpx
from pypimod import exceptions, constants
async def get_pypi_urls(project_name: str) -> List[str]:
"""Fetch the projects PyPI page and return author email and maintainer URLs."""
project_url = constants.PYPI_PROJECT_URL.format(name=project_name)
soup = await fetch_project_page_soup(project_url)
return get_pypi_maintainer_urls_from_body_soup(soup)
async def fetch_project_page_soup(project_url: str) -> BeautifulSoup:
async with httpx.AsyncClient() as client:
response = await client.get(project_url)
try:
response.raise_for_status()
except httpx.exceptions.HTTPError as e:
raise exceptions.PyPIWebError(
"Error fetching project URL {}: {}".format(
project_url, response.status_code
)
) from e
return BeautifulSoup(response.text, "lxml")
def get_pypi_maintainer_urls_from_body_soup(response_soup: BeautifulSoup) -> List[str]:
return [
urljoin(constants.BASE_PYPI_URL, maintainer["href"])
for maintainer in response_soup.find(
string="Maintainers"
).parent.parent.find_all("a")
]
def get_pypi_author_email_from_body_soup(response_soup: BeautifulSoup) -> str:
author = response_soup.find(string="Author:")
if author is None:
return ""
author_href = author.parent.parent.find("a")["href"]
_, author_email = author_href.split(":")
return author_email
| 30.74
| 87
| 0.707222
|
a4c96c44c4b36252593adc8c8b3a75235731dda3
| 21,260
|
py
|
Python
|
pyspeckit/spectrum/parinfo.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | null | null | null |
pyspeckit/spectrum/parinfo.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 1
|
2021-05-14T19:17:41.000Z
|
2021-05-14T19:17:41.000Z
|
pyspeckit/spectrum/parinfo.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 1
|
2018-10-02T15:11:17.000Z
|
2018-10-02T15:11:17.000Z
|
from __future__ import print_function
from astropy.extern.six.moves import xrange
try:
from lmfit import Parameters, Parameter
LMFIT_PARAMETERS_INSTALLED = True
except ImportError:
LMFIT_PARAMETERS_INSTALLED = False
class ParinfoList(list):
"""
Store a list of model parameter values and their associated metadata (name,
error, order, limits, etc.) in a class-friendly manner
"""
def __init__(self, *args, **kwargs):
"""
Create a parinfolist from either a list of parameters
or a list or a tuple
Parameters
----------
preserve_order : bool
Keep the p['n'] values for each parameter?
Default to false, so par #'s will go from 0 to n(pars)-1
"""
if LMFIT_PARAMETERS_INSTALLED:
list.__init__(self,[])
if len(args) == 1 and isinstance(args[0],Parameters):
self._from_Parameters(args[0])
self._dict = dict([(pp['parname'],pp) for pp in self])
return
list.__init__(self, *args)
preserve_order = kwargs.pop('preserve_order',False)
# re-order the parameters from 0 to n-1 unless told otherwise
if not preserve_order:
self._set_numbers()
self._check_names()
self._set_attributes()
self._dict = dict([(pp['parname'],pp) for pp in self])
def _set_numbers(self):
""" Set the parameters in order by their current order in the list """
for ii,pp in enumerate(self):
if pp.n != ii:
pp.n = ii
def __str__(self):
return "\n".join([repr(p) for p in self])
def _getter(attributename):
def getattribute(self):
return [v[attributename] for v in self]
return getattribute
def _setter(attributename):
def setattribute(self, values):
if len(values) == len(self):
for parinf,newval in zip(self,values):
parinf[attributename] = newval
else:
raise ValueError("Must have len(new values) = %i (was %i)" % (len(self),len(values)))
return setattribute
def keys(self):
""" Dictionary-like behavior """
return self.parnames
def items(self):
""" Dictionary-like behavior """
return zip(self.parnames, self[:])
#def values(self):
# """ Dictionary-like behavior """
# return [v['value'] for v in self]
names = property(fget=_getter('parname'), fset=_setter('parname'))
parnames=names
shortnames = property(fget=_getter('shortparname'), fset=_setter('shortparname'))
shortparnames=shortnames
values = property(fget=_getter('value'), fset=_setter('value'))
errors = property(fget=_getter('error'), fset=_setter('error'))
n = property(fget=_getter('n'), fset=_setter('n'))
order=n
fixed = property(fget=_getter('fixed'), fset=_setter('fixed'))
limits = property(fget=_getter('limits'), fset=_setter('limits'))
limited = property(fget=_getter('limited'), fset=_setter('limited'))
tied = property(fget=_getter('tied'), fset=_setter('tied'))
def __getitem__(self, key):
if type(key) in (int, slice):
return super(ParinfoList,self).__getitem__(key)
else:
return self._dict[key]
def __setitem__(self, key, val):
"""
DO NOT allow items to be replaced/overwritten,
instead use their own setters
"""
# if key already exists, use its setter
if key in self._dict or (type(key) is int and key < len(self)):
self[key] = val
elif type(key) is int:
# can't set a new list element this way
raise IndexError("Index %i out of range" % key)
elif isinstance(val,Parinfo):
# otherwise, add the item
self.__dict__[key] = val
else:
raise TypeError("Can only add Parinfo items to ParinfoLists")
def _set_attributes(self):
self.__dict__.update(dict([(pp['parname'],pp) for pp in self]))
def _check_names(self):
"""
Make sure all names are unique. If they're not, append #'s at the end
(but strip #'s first)
"""
name_counter = {}
names_stripped = [name.strip('0123456789') for name in self.names]
for ii,name in enumerate(names_stripped):
if names_stripped.count(name) > 1:
if name in name_counter:
name_counter[name] += 1
self[ii]['parname'] = self[ii]['parname'].strip('0123456789')+ "{0}".format(name_counter[name])
else:
name_counter[name] = 0
self[ii]['parname'] = self[ii]['parname'].strip('0123456789')+ "{0}".format(name_counter[name])
# remove un-numbered versions if numbered versions are now being used
if name in self.__dict__:
self.__dict__.pop(name)
def append(self, value, renumber=None):
"""
Append to the list. Will renumber the parameter if its number already
exists in the list unless renumber == False
"""
if hasattr(value,'n') and value.n in self.n and renumber is not False:
# indexed from 0, so len(self) = max(self.n)+1
value.n = len(self)
super(ParinfoList, self).append(value)
self._check_names()
self._set_attributes()
def as_Parameters(self):
"""
Convert a ParinfoList to an lmfit Parameters class
"""
if LMFIT_PARAMETERS_INSTALLED:
P = Parameters()
for par in self:
P.add(name=par.parname,
value=par.value,
vary=not(par.fixed),
expr=par.tied if par.tied is not '' else None,
min=par.limits[0] if par.limited[0] else None,
max=par.limits[1] if par.limited[1] else None)
return P
def _from_Parameters(self, lmpars):
"""
Read from an lmfit Parameters instance
"""
if len(lmpars) == len(self):
for P in lmpars.values():
self[P.name].value = P.value
self[P.name].error = P.stderr
self[P.name].limits = (P.min,P.max)
self[P.name].limited = (P.min is not None,P.max is not None)
self[P.name].expr = '' if P.expr is None else P.expr
else:
for par in lmpars.values():
self.append(Parinfo(par))
def tableprint(self, item_length=15, numbered=True):
"""
Print data in table-friendly format
Parameters
----------
item_length : int
Number of characters per item printed
numbered : bool
Are the parameters numbered? In pyspeckit, they will always be,
but this was included for compatibility with generic fitters
"""
stripped_names = list(set([par.parname.strip("0123456789") for par in self]))
nlines = len(self.n) / len(stripped_names)
strformat = "%" + str(item_length) + "s"
fltformat = "%" + str(item_length) + "g"
print(" ".join([strformat % name for name in stripped_names]))
if numbered:
for ii in xrange(nlines):
print(" ".join([fltformat % (self[name+"%i" % ii].value) for
name in stripped_names]))
else:
print(" ".join([fltformat % (self[name].value) for name in
stripped_names]))
class Parinfo(dict):
"""
A class for storing attributes of a fitted model parameter. It is based on
mpfit's parinfo dictionary, which is just a dictionary containing a few set
values. This implements them as 'gettable' attributes instead, but far
more importantly, includes sanity checks when setting values.
Attributes
----------
value: number
The value of the parameter. Arithmetic operations (*,/,+,-,**) will
use this value
error: number
The error on the value
n: int
The order of the parameter in the model or ParinfoList
fixed: bool
Can the value change? If False, error should be 0.
limits: (min,max)
The limits on the value of the parameter. Only applied
if limited
limited: (bool, bool)
Is the parameter value limited?
step: number
from MPFIT: the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is computed
automatically. Ignored when AUTODERIVATIVE=0.
scaleable: bool
Is the value scaled with the data? Important for normalization
procedures
tied: string
mpfit/lmift parameter. Allows you to specify arbitrary expressions for
how the parameter depends on other parameters
mpfit:
a string expression which "ties" the parameter to other free or
fixed parameters. Any expression involving constants and the
parameter array P are permitted. Example: if parameter 2 is always
to be twice parameter 1 then use the following: parinfo(2).tied =
'2 * p(1)'. Since they are totally constrained, tied parameters
are considered to be fixed; no errors are computed for them.
NOTE: the PARNAME can't be used in expressions.
parname: string
The parameter name
shortparname: string (tex)
A shortened version of the parameter name for plotting display
"""
def __init__(self, values=None, **kwargs):
dict.__init__(self, {'value':0.0, 'error':0.0, 'n':0, 'fixed':False,
'limits':(0.0,0.0), 'limited':(False,False),
'step':False, 'scaleable':False, 'tied':'',
'parname':'', 'shortparname':''}, **kwargs)
if LMFIT_PARAMETERS_INSTALLED:
if isinstance(values,Parameter):
self._from_Parameter(values)
self.__dict__ = self
return
if values is not None:
self.update(values)
self.__dict__ = self
def __repr__(self):
try:
reprint = "Param #%i %12s = %12g" % (self.n, self.parname, self.value)
if self.fixed:
reprint += " (fixed)"
elif self.error is not None:
reprint += " +/- %15g " % (self.error)
if any(self.limited):
lolim = "[%g," % self.limits[0] if self.limited[0] else "(-inf,"
uplim = "%g]" % self.limits[1] if self.limited[1] else "inf)"
myrange = lolim + uplim
reprint += " Range:%10s" % myrange
if self.tied is not '':
reprint += " Tied: %s" % self.tied
if self.shortparname is not '':
reprint += " Shortparname: %s" % self.shortparname
return reprint
except AttributeError:
return super(Parinfo,self).__repr__()
def __deepcopy__(self, memo):
copy = Parinfo(self)
copy.__dict__ = copy
return copy
def __copy__(self):
copy = Parinfo(self)
copy.__dict__ = copy
return copy
@property
def max(self):
return self.limits[1]
@max.setter
def max(self, value):
self.limits = (self.limits[0], value)
@property
def min(self):
return self.limits[0]
@min.setter
def min(self, value):
self.limits = (value, self.limits[1])
@property
def vary(self):
return not self.fixed
@vary.setter
def vary(self, value):
self.fixed = not value
@property
def expr(self):
return self.tied
@expr.setter
def expr(self, value):
self._check_OK('tied',value)
self.tied = value
def __setattr__(self, key, value):
# DEBUG print "Setting attribute %s = %s" % (key,value)
self._check_OK(key,value)
return super(Parinfo, self).__setattr__(key, value)
def __setitem__(self, key, value):
# DEBUG print "Setting item %s = %s" % (key,value)
self._check_OK(key,value)
return super(Parinfo, self).__setitem__(key, value)
def _check_OK(self,key,value):
# DEBUG print "Checking whether %s's value %s is OK" % (key,value)
if key == 'value':
if hasattr(self,'limited') and hasattr(self,'limits'):
if self.limited[0] and value < self.limits[0]:
raise ValueError('Set parameter value %r < limit value %r' % (value,self.limits[0]))
if self.limited[1] and value > self.limits[1]:
raise ValueError('Set parameter value %r > limit value %r' % (value,self.limits[1]))
if key in ('limits','limited'):
try:
if len(value) != 2:
raise ValueError("%s must be a 2-tuple" % key)
except TypeError: # if the input was scalar
raise ValueError("%s must be a 2-tuple" % key)
if key in ('parname','tied','shortparname'):
if type(value) is not str:
raise TypeError("%s must be a string" % key)
if key in ('fixed',):
try:
value = bool(value)
except:
raise ValueError("%s had value %s, which could not be converted to boolean" % (key,value))
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _from_Parameter(self, lmpar):
"""
Read a Parinfo instance from and lmfit Parameter
"""
self['limits'] = lmpar.min,lmpar.max
self['limited'] = (lmpar.min not in (None,False),lmpar.max not in (None,False))
self['value'] = lmpar.value
self['error'] = lmpar.stderr
self['parname'] = lmpar.name
self['fixed'] = not(lmpar.vary)
def _operation_wrapper(operation, reverse=False):
"""
Perform an operation (addition, subtraction, mutiplication, division, etc.)
"""
def ofunc(self, other):
""" Operation Function """
intypes = type(other),type(self.value)
try:
returnval = getattr(self.value,'__%s__' % operation)(other)
if type(returnval) not in intypes:
raise TypeError("return value had wrong type: %s" % str(type(returnval)))
else:
return returnval
except TypeError as err: # integers don't have defined operations with floats
#print err
#print "TypeError1: ",self, self.value, other
try:
if hasattr(other,'__r%s__' % operation):
#print "r",operation,": ",self, self.value, other
return getattr(other,'__r%s__' % operation)(self.value)
elif hasattr(other,'__%s__' % operation[1:]):
#print operation,": ",self, self.value, other
return getattr(other,'__%s__' % operation[1:])(self.value)
except:
raise TypeError("Neither side of the operation has a %s attribute!" % operation)
return ofunc
__add__ = _operation_wrapper('add')
__radd__ = _operation_wrapper('radd')
__sub__ = _operation_wrapper('sub')
__rsub__ = _operation_wrapper('rsub')
__mul__ = _operation_wrapper('mul')
__rmul__ = _operation_wrapper('rmul')
__div__ = _operation_wrapper('div')
__rdiv__ = _operation_wrapper('rdiv')
__pow__ = _operation_wrapper('pow')
__rpow__ = _operation_wrapper('rpow')
try:
def __array__(self):
import numpy as np
return np.array(self.value)
except ImportError:
pass
if __name__=="__main__":
import unittest
def check_failure(value):
""" Raise a ValueError if value not in (0,1) """
P = Parinfo()
P.value = 1
P.limited = (True,True)
P.limits = (0,1)
P.value = value
def check_tied(value):
P = Parinfo()
P.tied = value
def check_limits(value):
P = Parinfo()
P.limits = value
def check_index(key):
PL = ParinfoList([Parinfo({'parname':'HEIGHT'}),
Parinfo({'value':15,'parname':'AMPLITUDE'}),
Parinfo({'value':3,'parname':'WIDTH'}),
Parinfo({'value':4,'parname':'WIDTH'})])
return PL[key]
def check_set_list(values):
PL = ParinfoList([Parinfo({'parname':'HEIGHT'}),
Parinfo({'value':15,'parname':'AMPLITUDE'}),
Parinfo({'value':3,'parname':'WIDTH','limits':(0,5),'limited':(True,True)}),
Parinfo({'value':4,'parname':'WIDTH'})])
PL.shortparnames = ['a','b','c','d']
PL.values = values
return PL.values
class MyTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', param=None):
super(MyTestCase, self).__init__(methodName)
self.param = param
def test_checks_value_fail(self):
check_failure(0.5)
self.assertRaises(ValueError, check_failure, 5)
self.assertRaises(ValueError, check_failure, -5)
def test_checks_tied_fail(self):
check_tied('p[0]')
self.assertRaises(TypeError, check_tied, 5)
self.assertRaises(TypeError, check_tied, (1,2,3))
def test_checks_limits_fail(self):
check_limits((1,2))
self.assertRaises(ValueError, check_limits, -5)
self.assertRaises(ValueError, check_limits, (1,2,3))
def test_indexing(self):
self.assertEqual(check_index(0), check_index('HEIGHT'))
self.assertEqual(check_index(1), check_index('AMPLITUDE'))
self.assertEqual(check_index(2), check_index('WIDTH0'))
def test_set_list(self):
self.assertEqual(check_set_list([1,2,3,4]),[1,2,3,4])
self.assertRaises(ValueError,check_set_list,[1,2,10,4])
def test_arithmetic(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5 == value+5
assert par-5 == value-5
assert par/5 == value/5
assert par*5 == value*5
def test_arithmetic2(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5 == value+5
assert par-5 == value-5
assert par/5 == value/5
assert par*5 == value*5
def test_arithmetic3(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5. == value+5.
assert par-5. == value-5.
assert par/5. == value/5.
assert par*5. == value*5.
def test_arithmetic4(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5. == value+5.
assert par-5. == value-5.
assert par/5. == value/5.
assert par*5. == value*5.
def test_arithmetic5(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert 5.+par == 5.+value
assert 5.-par == 5.-value
assert 5./par == 5./value
assert 5.*par == 5.*value
def test_arithmetic6(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert 5.+par == 5.+value
assert 5.-par == 5.-value
assert 5./par == 5./value
assert 5.*par == 5.*value
def test_arithmetic7(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert 5+par == 5+value
assert 5-par == 5-value
assert 5/par == 5/value
assert 5*par == 5*value
def test_arithmetic8(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert 5+par == 5+value
assert 5-par == 5-value
assert 5/par == 5/value
assert 5*par == 5*value
def test_copy(self):
import copy
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
parcopy = copy.copy(par)
assert parcopy.value == value
parcopy = copy.deepcopy(par)
assert parcopy.value == value
unittest.main()
| 36.095076
| 115
| 0.551317
|
17e3626046149463c52b424413476b007645fde0
| 3,319
|
py
|
Python
|
VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/PrereqState.py
|
taoyama/azure-linux-extensions
|
f46377658783ae933fe7007af65ced311259fb4a
|
[
"Apache-2.0"
] | 2
|
2021-11-02T00:16:29.000Z
|
2022-02-17T12:08:42.000Z
|
VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/PrereqState.py
|
mbreakey3/azure-linux-extensions
|
3b58a1390fd0bc010e08f447368913bce5a21a8c
|
[
"Apache-2.0"
] | 3
|
2019-07-29T20:25:09.000Z
|
2019-08-13T00:00:45.000Z
|
VMEncryption/main/oscrypto/rhel_72_lvm/encryptstates/PrereqState.py
|
ChrisCoe/azure-linux-extensions
|
1ca6fce15eca3ddefc33651b094c9a4b4e52fa31
|
[
"Apache-2.0"
] | 1
|
2017-07-17T18:52:10.000Z
|
2017-07-17T18:52:10.000Z
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
from OSEncryptionState import *
from pprint import pprint
class PrereqState(OSEncryptionState):
def __init__(self, context):
super(PrereqState, self).__init__('PrereqState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter prereq state")
if not super(PrereqState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for prereq state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering prereq state")
distro_info = self.context.distro_patcher.distro_info
self.context.logger.log("Distro info: {0}".format(distro_info))
if (((distro_info[0] == 'centos' and distro_info[1] == '7.3.1611') or
(distro_info[0] == 'centos' and distro_info[1].startswith('7.4')) or
(distro_info[0] == 'centos' and distro_info[1].startswith('7.5')) or
(distro_info[0] == 'centos' and distro_info[1].startswith('7.6')) or
(distro_info[0] == 'redhat' and distro_info[1] == '7.3') or
(distro_info[0] == 'redhat' and distro_info[1] == '7.4') or
(distro_info[0] == 'redhat' and distro_info[1] == '7.5') or
(distro_info[0] == 'redhat' and distro_info[1] == '7.6')) and
self.disk_util.is_os_disk_lvm()):
self.context.logger.log("Enabling OS volume encryption on {0} {1}".format(distro_info[0],
distro_info[1]))
else:
raise Exception("RHEL72LVMEncryptionStateMachine called for distro {0} {1}".format(distro_info[0],
distro_info[1]))
self.context.distro_patcher.install_extras()
self._patch_waagent()
self.command_executor.Execute('systemctl daemon-reload', True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit prereq state")
return super(PrereqState, self).should_exit()
def _patch_waagent(self):
self.context.logger.log("Patching waagent")
contents = None
with open('/usr/lib/systemd/system/waagent.service', 'r') as f:
contents = f.read()
contents = re.sub(r'\[Service\]\n', '[Service]\nKillMode=process\n', contents)
with open('/usr/lib/systemd/system/waagent.service', 'w') as f:
f.write(contents)
self.context.logger.log("waagent patched successfully")
| 38.149425
| 110
| 0.619464
|
50d10cfd02ac3877419d8dc6f9a331ee27ff8199
| 3,842
|
py
|
Python
|
funcs/help.py
|
ridhwan-aziz/zipra_bot
|
de328e518aa6876fa91e96e007c198aae8ff2fd4
|
[
"MIT"
] | 4
|
2021-11-11T03:44:21.000Z
|
2022-03-26T14:32:20.000Z
|
funcs/help.py
|
ridhwan-aziz/zipra_bot
|
de328e518aa6876fa91e96e007c198aae8ff2fd4
|
[
"MIT"
] | 2
|
2021-11-23T07:02:35.000Z
|
2022-02-11T13:49:07.000Z
|
funcs/help.py
|
ridhwan-aziz/zipra_bot
|
de328e518aa6876fa91e96e007c198aae8ff2fd4
|
[
"MIT"
] | 1
|
2021-11-25T13:30:24.000Z
|
2021-11-25T13:30:24.000Z
|
import logging
from utils.lang import Language
from utils.helper import ol_generator
from telethon import __version__
from telethon.tl.custom.message import Message
from telethon.tl.types import (
InputMessageEntityMentionName,
InputUser,
InputUserSelf,
KeyboardButtonCallback,
KeyboardButtonRow,
KeyboardButtonUrl,
MessageEntityCode,
ReplyInlineMarkup,
User
)
async def main(*args):
logging.debug("[HelpHandler] Setting the required variables")
event: Message = args[0]
logging.debug("[HelpHandler] Parsing options")
params = args[1].get_args().raw_text
me: User = args[2]
lang = Language(event)
logging.debug("[HelpHandler] Call event.get_sender")
sender = await event.get_sender()
logging.info("[HelpHandler] Checking chat type")
if not event.is_private:
logging.info("[HelpHandler] Chat type is not private")
logging.info("[HelpHandler] Not replying help message")
return await event.reply(
(await lang.get('non_private_error')),
buttons=ReplyInlineMarkup(
[KeyboardButtonRow([
KeyboardButtonUrl(
text=await lang.get('click_here', True),
url=f"https://t.me/{me.username}?start=help"
)
])]
)
)
else:
logging.info("[HelpHandler] Chat type is private")
logging.info("[HelpHandler] Sending help message")
if params is not None:
logging.warning(f"[HelpHandler] Someone trying accessing help with param: {params}")
return await event.respond(f"Menu {params} is still in development")
logging.debug("Getting help_message string and telethon version")
help_message = await lang.get("help_message")
version = f"v{__version__}"
logging.debug("[HelpHandler] Setting up variables for formatting_entities")
# vars
variables = ['name', 'bot_name', 'version']
results = [sender.first_name, me.first_name, version]
offs, lens = ol_generator(help_message, variables, results)
logging.debug("[HelpHandler] Sending message with custom formatting_entities")
return await event.respond(
help_message.format(
name=sender.first_name,
bot_name=me.first_name,
version=version
),
formatting_entities=[
InputMessageEntityMentionName(
offset=offs[0],
length=lens[0],
user_id=InputUser(
sender.id,
sender.access_hash
)
),
InputMessageEntityMentionName(
offset=offs[1],
length=lens[1],
user_id=InputUserSelf()
),
MessageEntityCode(
offset=offs[2],
length=lens[2],
)
],
buttons=ReplyInlineMarkup(
rows=[KeyboardButtonRow(
buttons=[KeyboardButtonCallback(
text=await lang.get('about_me', True),
data=f'help_{sender.id}_about'.encode()
),
KeyboardButtonCallback(
text=await lang.get('usage', True),
data=f'help_{sender.id}_usage'.encode()
)]
), KeyboardButtonRow(
buttons=[KeyboardButtonCallback(
text=await lang.get('privacy_terms', True),
data=f'help_{sender.id}_pnt'.encode()
)]
)]
)
)
| 36.590476
| 96
| 0.542166
|
27a9e9fc6a5f46c188be6c4942baa2520dbebf5f
| 5,993
|
py
|
Python
|
examples/pwr_run/checkpointing/jobs_max_par/job19.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/jobs_max_par/job19.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/jobs_max_par/job19.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.set_defaults(resume=False)
args = parser.parse_args()
# Training parameters
batch_size = 256
args_lr = 0.0007
args_model = 'vgg19'
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_param/' + job_name + '*'
total_epochs = 45
starting_epoch = 0
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
param_dict = {}
modify = False
with open('param.json', 'r') as fp:
param_dict = json.load(fp)
if job_name not in param_dict:
param_dict[job_name] = trainable_count
modify = True
elif param_dict[job_name] != trainable_count:
param_dict[job_name] = trainable_count
modify = True
if modify:
with open('param.json', 'w') as fp:
json.dump(param_dict, fp)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=total_epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch
#verbose=0
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
with open('finish.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
with open('finish.json', 'w') as fp:
json.dump(finish_dict, fp)
| 30.733333
| 118
| 0.703988
|
0b414380aeded17edeeb4565993f20349970f536
| 2,360
|
py
|
Python
|
portal_src/bin/utils/typesystem.py
|
bug00r/portal-py
|
388067bb571455f0249daffd04bb51606dc46194
|
[
"MIT"
] | null | null | null |
portal_src/bin/utils/typesystem.py
|
bug00r/portal-py
|
388067bb571455f0249daffd04bb51606dc46194
|
[
"MIT"
] | null | null | null |
portal_src/bin/utils/typesystem.py
|
bug00r/portal-py
|
388067bb571455f0249daffd04bb51606dc46194
|
[
"MIT"
] | null | null | null |
import types, sys, builtins
class Assembler(object):
def __init__(self, descriptor):
self.__descriptor = descriptor
def get_attribute(self, _module, _typename):
curmodule = _module
for part in _typename.split("."):
if hasattr(curmodule, part):
curmodule = getattr(curmodule, part)
else:
break
return curmodule
def create_type(self,_module, _type):
baselist = []
if hasattr(_type, 'base') and isinstance(_type.base, list):
for _typename in _type.base:
if hasattr(builtins, _typename):
baselist.append(getattr(builtins, _typename))
else:
clz = self.get_attribute(_module, _typename)
if clz is not None:
baselist.append(clz)
clazz = type(_type.name, tuple(baselist), dict())
return clazz
def create_module(self, parentmodul, _module):
module = types.ModuleType(_module.name)
if hasattr(_module, 'types'):
for _type in _module.types:
setattr(module, _type.name, self.create_type(parentmodul, _type))
if hasattr(_module, 'modules'):
for submodule in _module.modules:
setattr(module, submodule.name, self.create_module(module, submodule))
return module
def create(self):
sys.modules[self.__descriptor.name] = self.create_module(None, self.__descriptor);
if __name__ == '__main__':
from portal_src.bin.utils.lang import json2obj
test_config = """
{
"name": "dsa41",
"types": [
{ "name": "Text", "base": ["str"] },
{ "name": "Float", "base": ["float"] },
{ "name": "Integer", "base": ["int"] }
],
"modules": [
{
"name": "extend",
"types": [
{ "name": "NextFloat", "base": ["Float"] }
]
}
]
}
"""
descriptor = json2obj(test_config)
assembler = Assembler(descriptor)
assembler.create()
import dsa41
print(dsa41.Text("argh"))
print(dsa41.Integer(10))
print(dsa41.Float(10e-3))
print(dsa41.extend.NextFloat(34.666666))
| 31.891892
| 90
| 0.527966
|
f5dd9f8e47d4ce7055d4decf5f2448a53139c04d
| 728
|
py
|
Python
|
tests/image/test_image_filters.py
|
kungfuai/kaishi
|
e20360170ccac2111cab61fcd71b81be3c2a7468
|
[
"MIT"
] | 10
|
2020-04-01T16:46:25.000Z
|
2021-02-09T15:56:42.000Z
|
tests/image/test_image_filters.py
|
kungfuai/kaishi
|
e20360170ccac2111cab61fcd71b81be3c2a7468
|
[
"MIT"
] | 14
|
2020-03-23T13:32:35.000Z
|
2021-12-07T19:30:23.000Z
|
tests/image/test_image_filters.py
|
kungfuai/kaishi
|
e20360170ccac2111cab61fcd71b81be3c2a7468
|
[
"MIT"
] | 2
|
2020-08-14T07:23:06.000Z
|
2021-12-06T18:20:42.000Z
|
from kaishi.image.file_group import ImageFileGroup
def test_invalid_file_extensions():
test = ImageFileGroup("tests/data/image", recursive=True)
test.configure_pipeline(["FilterInvalidFileExtensions"])
test.run_pipeline()
assert len(test.filtered["unsupported_extension"]) > 0
def test_invalid_image_headers():
test = ImageFileGroup("tests/data/image", recursive=True)
test.configure_pipeline(["FilterInvalidImageHeaders"])
test.run_pipeline()
assert len(test.filtered["invalid_header"]) > 0
def test_similar():
test = ImageFileGroup("tests/data/image", recursive=True)
test.configure_pipeline(["FilterSimilar"])
test.run_pipeline()
assert len(test.filtered["similar"]) > 0
| 31.652174
| 61
| 0.744505
|
7d3ae4dfde9b111aa72dc4eedfdff4e5fbc9e95c
| 1,500
|
py
|
Python
|
StanCode Projects/campy_drawings/my_drawing.py
|
chengti-wang/stanCode-Projects
|
dec4ebb548e0b8a77478056775dca697ee1e11be
|
[
"MIT"
] | null | null | null |
StanCode Projects/campy_drawings/my_drawing.py
|
chengti-wang/stanCode-Projects
|
dec4ebb548e0b8a77478056775dca697ee1e11be
|
[
"MIT"
] | null | null | null |
StanCode Projects/campy_drawings/my_drawing.py
|
chengti-wang/stanCode-Projects
|
dec4ebb548e0b8a77478056775dca697ee1e11be
|
[
"MIT"
] | null | null | null |
"""
File:
Name:
----------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GRect, GPolygon
from campy.graphics.gwindow import GWindow
def main():
window = GWindow(width=500, height=500, title="drawing")
ear1 = GOval(100, 100, x=90, y=90)
paint(ear1, (184, 115, 51))
window.add(ear1)
ear3 = GOval(80, 80, x=100, y=100)
paint(ear3, (0, 0, 0))
window.add(ear3)
ear2 = GOval(100, 100, x=310, y=90)
paint(ear2, (184, 115, 51))
window.add(ear2)
ear2 = GOval(80, 80, x=320, y=100)
paint(ear2, (0,0,0))
window.add(ear2)
face = GOval(300, 310, x=100, y=100)
face.filled = True
face.fill_color = (184, 115, 51)
window.add(face)
eye1 = GOval(28, 40, x=200, y=215)
eye1.filled = True
paint(eye1, (0,0,0))
window.add(eye1)
eye2 = GOval(28, 40, x=270, y=215)
paint(eye2, (0,0,0))
window.add(eye2)
pupil1 = GOval(12, 12, x=212,y=223)
paint(pupil1, (255, 255, 255))
window.add(pupil1)
pupil2 = GOval(12, 12, x=282, y=223)
paint(pupil2, (255, 255, 255))
window.add(pupil2)
mouth = GOval(170, 110, x=165, y=290)
paint(mouth, (222,184,135))
window.add(mouth)
mouth1 = GOval(50, 50, x=225, y=300)
paint(mouth1, (0, 0, 0))
window.add(mouth1)
mouth2 = GOval(20, 20, x=220, y=350)
paint(mouth2, (0, 0, 0))
window.add(mouth2)
def paint(obj, color):
obj.filled = True
obj.fill_color = color
if __name__ == '__main__':
main()
| 22.058824
| 60
| 0.577333
|
733bdc8f9d8dee9619c17476c15d1cfa4e14489c
| 8,539
|
py
|
Python
|
kubernetes/client/apis/authentication_v1beta1_api.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/apis/authentication_v1beta1_api.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/apis/authentication_v1beta1_api.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class AuthenticationV1beta1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_token_review(self, body, **kwargs):
"""
create a TokenReview
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token_review(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1beta1TokenReview body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1TokenReview
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_token_review_with_http_info(body, **kwargs)
else:
(data) = self.create_token_review_with_http_info(body, **kwargs)
return data
def create_token_review_with_http_info(self, body, **kwargs):
"""
create a TokenReview
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token_review_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1beta1TokenReview body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1TokenReview
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_token_review" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_token_review`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/authentication.k8s.io/v1beta1/tokenreviews', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1TokenReview',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/authentication.k8s.io/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.951111
| 119
| 0.565289
|
00187aa93d685a6be66c4e1962e845d382be2cbe
| 999
|
py
|
Python
|
src/QuoteEngine/QuoteModel.py
|
WinCanton/meme-generator
|
b53963f932731584ea998236960f58fd39fa3a40
|
[
"MIT"
] | null | null | null |
src/QuoteEngine/QuoteModel.py
|
WinCanton/meme-generator
|
b53963f932731584ea998236960f58fd39fa3a40
|
[
"MIT"
] | null | null | null |
src/QuoteEngine/QuoteModel.py
|
WinCanton/meme-generator
|
b53963f932731584ea998236960f58fd39fa3a40
|
[
"MIT"
] | null | null | null |
"""A class that defines and constructs Quote object.
This class is used as part of the process of extracting Quote
data from various types of input files. Having managed to extract
data, ingestors shall utilise this class definition to construct
Quote objects.
"""
class QuoteModel:
"""A class container that defines and construct a QuoteModel.
This class will be used by ingestor classes to construct Quote
objects following extraction of data from various different
file types, performed uniquely and independently by different
ingestor classes.
"""
def __init__(self, body, author):
"""Initialise Quote object using provided information.
This object constructor uses `body` and `author` information
passed to it to create a Quote object.
"""
self.body = body
self.author = author
def __repr__(self):
"""Define object representation in a string format."""
return f'{self.body} ({self.author})'
| 32.225806
| 68
| 0.706707
|
f5d74223eaccdfe1953e65b5340b645368f8dc6b
| 883
|
py
|
Python
|
setup.py
|
vaizki/python-tm35fin
|
e591e97f686a5ccc1ad62d0ab265c536573d9726
|
[
"MIT"
] | null | null | null |
setup.py
|
vaizki/python-tm35fin
|
e591e97f686a5ccc1ad62d0ab265c536573d9726
|
[
"MIT"
] | null | null | null |
setup.py
|
vaizki/python-tm35fin
|
e591e97f686a5ccc1ad62d0ab265c536573d9726
|
[
"MIT"
] | null | null | null |
"""
Package setup script for ETRS-TM35FIN coordinate system helper classes
"""
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="tm35fin",
version="0.1.1",
author="Jukka Vaisanen",
author_email="vaizki@vaizki.fi",
description="ETRS-TM35FIN coordinate system",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/vaizki/python-tm35fin",
project_urls={
"Bug Tracker": "https://github.com/vaizki/python-tm35fin/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "."},
packages=setuptools.find_packages(where="."),
python_requires=">=3.8",
)
| 28.483871
| 74
| 0.654587
|
0b7f18c12ced8027b85cbb3b9310491a3f69e1f1
| 159
|
py
|
Python
|
PyTester/visitor/Visitor.py
|
Sildra/PyTester
|
ebe16dc4dc169416ee839adc03e42806d8d57620
|
[
"Apache-2.0"
] | null | null | null |
PyTester/visitor/Visitor.py
|
Sildra/PyTester
|
ebe16dc4dc169416ee839adc03e42806d8d57620
|
[
"Apache-2.0"
] | null | null | null |
PyTester/visitor/Visitor.py
|
Sildra/PyTester
|
ebe16dc4dc169416ee839adc03e42806d8d57620
|
[
"Apache-2.0"
] | null | null | null |
class Visitor:
"""description of class"""
@staticmethod
def visit(obj):
pass
@staticmethod
def leave(parent, child):
pass
| 15.9
| 30
| 0.578616
|
feb462b48f41816c61eaf7e7020fa7939eea648a
| 2,249
|
py
|
Python
|
openpeerpower/components/daikin/switch.py
|
pcaston/Open-Peer-Power
|
81805d455c548e0f86b0f7fedc793b588b2afdfd
|
[
"Apache-2.0"
] | null | null | null |
openpeerpower/components/daikin/switch.py
|
pcaston/Open-Peer-Power
|
81805d455c548e0f86b0f7fedc793b588b2afdfd
|
[
"Apache-2.0"
] | null | null | null |
openpeerpower/components/daikin/switch.py
|
pcaston/Open-Peer-Power
|
81805d455c548e0f86b0f7fedc793b588b2afdfd
|
[
"Apache-2.0"
] | 1
|
2019-04-24T14:10:08.000Z
|
2019-04-24T14:10:08.000Z
|
"""Support for Daikin AirBase zones."""
import logging
from openpeerpower.helpers.entity import ToggleEntity
from . import DOMAIN as DAIKIN_DOMAIN
_LOGGER = logging.getLogger(__name__)
ZONE_ICON = "mdi:home-circle"
async def async_setup_platform(opp, config, async_add_entities, discovery_info=None):
"""Old way of setting up the platform.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
pass
async def async_setup_entry(opp, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = opp.data[DAIKIN_DOMAIN][entry.entry_id]
zones = daikin_api.device.zones
if zones:
async_add_entities(
[
DaikinZoneSwitch(daikin_api, zone_id)
for zone_id, zone in enumerate(zones)
if zone != ("-", "0")
]
)
class DaikinZoneSwitch(ToggleEntity):
"""Representation of a zone."""
def __init__(self, daikin_api, zone_id):
"""Initialize the zone."""
self._api = daikin_api
self._zone_id = zone_id
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.mac}-zone{self._zone_id}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ZONE_ICON
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._api.name, self._api.device.zones[self._zone_id][0])
@property
def is_on(self):
"""Return the state of the sensor."""
return self._api.device.zones[self._zone_id][1] == "1"
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
async def async_turn_on(self, **kwargs):
"""Turn the zone on."""
await self._api.device.set_zone(self._zone_id, "1")
async def async_turn_off(self, **kwargs):
"""Turn the zone off."""
await self._api.device.set_zone(self._zone_id, "0")
| 28.1125
| 87
| 0.63317
|
ec41406f8b74144344bb0548faa8fceb2ed424f8
| 2,094
|
py
|
Python
|
tests/providers/google/cloud/operators/test_video_intelligence_system.py
|
naweedraja/airflow
|
4bec1cc489f5d19daf7450c75c3e8057c9709dbd
|
[
"Apache-2.0"
] | 2
|
2020-03-24T14:47:18.000Z
|
2020-03-24T14:48:17.000Z
|
tests/providers/google/cloud/operators/test_video_intelligence_system.py
|
naweedraja/airflow
|
4bec1cc489f5d19daf7450c75c3e8057c9709dbd
|
[
"Apache-2.0"
] | 1
|
2021-09-29T17:37:13.000Z
|
2021-09-29T17:37:13.000Z
|
tests/providers/google/cloud/operators/test_video_intelligence_system.py
|
naweedraja/airflow
|
4bec1cc489f5d19daf7450c75c3e8057c9709dbd
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY, GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_BUCKET_NAME = os.environ.get("GCP_VIDEO_INTELLIGENCE_BUCKET_NAME", "test-bucket-name")
GCP_VIDEO_SOURCE_URL = os.environ.get("GCP_VIDEO_INTELLIGENCE_VIDEO_SOURCE_URL", "http://nasa.gov")
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_AI_KEY)
class CloudVideoIntelligenceExampleDagsTest(GoogleSystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
self.execute_with_ctx(
cmd=[
"bash",
"-c",
f"curl {GCP_VIDEO_SOURCE_URL} | gsutil cp - gs://{GCP_BUCKET_NAME}/video.mp4"
], key=GCP_GCS_KEY
)
super().setUp()
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_run_example_dag_spanner(self):
self.run_dag('example_gcp_video_intelligence', CLOUD_DAG_FOLDER)
| 38.777778
| 103
| 0.74212
|
62059442a8fcd86b36fea5f73be366a16f11ac1e
| 3,378
|
py
|
Python
|
wagtail/core/migrations/0062_comment_models_and_pagesubscription.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 2
|
2020-10-19T13:10:14.000Z
|
2020-11-29T05:17:13.000Z
|
wagtail/core/migrations/0062_comment_models_and_pagesubscription.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 3
|
2015-03-10T12:19:01.000Z
|
2021-10-14T22:24:06.000Z
|
wagtail/core/migrations/0062_comment_models_and_pagesubscription.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 2
|
2021-08-23T01:41:30.000Z
|
2021-08-23T17:02:32.000Z
|
# Generated by Django 3.0.3 on 2021-04-19 13:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailcore', '0061_change_promote_tab_helpt_text_and_verbose_names'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('contentpath', models.TextField()),
('position', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('resolved_at', models.DateTimeField(blank=True, null=True)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='wagtailcore.Page')),
('resolved_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='comments_resolved', to=settings.AUTH_USER_MODEL)),
('revision_created', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_comments', to='wagtailcore.PageRevision')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'comment',
'verbose_name_plural': 'comments',
},
),
migrations.CreateModel(
name='CommentReply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('comment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to='wagtailcore.Comment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_replies', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'comment reply',
'verbose_name_plural': 'comment replies',
},
),
migrations.CreateModel(
name='PageSubscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_notifications', models.BooleanField()),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscribers', to='wagtailcore.Page')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='page_subscriptions', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('page', 'user')},
},
),
]
| 51.969231
| 188
| 0.621078
|
7c5095fb3a55a4c2b9be64af91c0533c67af5a29
| 854
|
py
|
Python
|
app/tests_pre_start.py
|
tsatsujnr139/fastapi-role-based-access-control-auth-service
|
6c6addb04edad80e167424e39574697008eb0e64
|
[
"MIT"
] | 47
|
2021-03-06T14:49:43.000Z
|
2022-03-05T12:18:59.000Z
|
app/tests_pre_start.py
|
tsatsujnr139/fastapi-role-based-access-control-auth-service
|
6c6addb04edad80e167424e39574697008eb0e64
|
[
"MIT"
] | 5
|
2021-09-19T15:16:49.000Z
|
2022-01-26T15:47:48.000Z
|
app/tests_pre_start.py
|
tsatsujnr139/fastapi-role-based-access-control-auth-service
|
6c6addb04edad80e167424e39574697008eb0e64
|
[
"MIT"
] | 15
|
2021-03-08T07:54:32.000Z
|
2022-03-09T13:57:23.000Z
|
import logging
from tenacity import (
after_log,
before_log,
retry,
stop_after_attempt,
wait_fixed,
)
from app.db.session import TestingSessionLocal
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
max_tries = 60 * 5 # 5 minutes
wait_seconds = 1
@retry(
stop=stop_after_attempt(max_tries),
wait=wait_fixed(wait_seconds),
before=before_log(logger, logging.INFO),
after=after_log(logger, logging.WARN),
)
def init() -> None:
try:
# Try to create session to check if DB is awake
db = TestingSessionLocal()
db.execute("SELECT 1")
except Exception as e:
logger.error(e)
raise e
def main() -> None:
logger.info("Initializing service")
init()
logger.info("Service finished initializing")
if __name__ == "__main__":
main()
| 19.409091
| 55
| 0.67096
|
a3306833e957bd68f633257b013954b9b0e8b670
| 4,290
|
py
|
Python
|
intg/src/test/python/test_ranger_client.py
|
mudit-97/ranger
|
cbe28e250fbb0ef578a76ba3ebd0e6efb92f3366
|
[
"Apache-2.0"
] | 1
|
2021-01-18T06:17:04.000Z
|
2021-01-18T06:17:04.000Z
|
intg/src/test/python/test_ranger_client.py
|
mudit-97/ranger
|
cbe28e250fbb0ef578a76ba3ebd0e6efb92f3366
|
[
"Apache-2.0"
] | null | null | null |
intg/src/test/python/test_ranger_client.py
|
mudit-97/ranger
|
cbe28e250fbb0ef578a76ba3ebd0e6efb92f3366
|
[
"Apache-2.0"
] | 1
|
2021-04-05T16:54:54.000Z
|
2021-04-05T16:54:54.000Z
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import patch
from apache_ranger.exceptions import RangerServiceException
from apache_ranger.model.ranger_service import RangerService
from apache_ranger.client.ranger_client import API, HttpMethod, HTTPStatus, RangerClient
class MockResponse:
def __init__(self, status_code, response=None, content=None):
self.status_code = status_code
self.response = response
self.content = content
return
def json(self):
return self.response
def text(self):
return str(self.content)
class TestRangerClient(unittest.TestCase):
URL = "url"
AUTH = ("user", "password")
@patch('apache_ranger.client.ranger_client.Session')
def test_get_service_unavailable(self, mock_session):
mock_session.return_value.get.return_value = MockResponse(HTTPStatus.SERVICE_UNAVAILABLE)
result = RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
self.assertTrue(result is None)
@patch('apache_ranger.client.ranger_client.Session')
def test_get_success(self, mock_session):
response = [ RangerService() ]
mock_session.return_value.get.return_value = MockResponse(HTTPStatus.OK, response=response, content='Success')
result = RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
self.assertEqual(response, result)
@patch('apache_ranger.client.ranger_client.Session')
@patch('apache_ranger.client.ranger_client.Response')
def test_get_unexpected_status_code(self, mock_response, mock_session):
content = 'Internal Server Error'
mock_response.text = content
mock_response.content = content
mock_response.status_code = HTTPStatus.INTERNAL_SERVER_ERROR
mock_session.return_value.get.return_value = mock_response
try:
RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
except RangerServiceException as e:
self.assertTrue(HTTPStatus.INTERNAL_SERVER_ERROR, e.statusCode)
@patch('apache_ranger.client.ranger_client.RangerClient.FIND_SERVICES')
def test_unexpected_http_method(self, mock_api):
mock_api.method.return_value = "PATCH"
mock_api.url = TestRangerClient.URL
mock_api.path = RangerClient.URI_SERVICE
try:
RangerClient(TestRangerClient.URL, TestRangerClient.AUTH).find_services()
except RangerServiceException as e:
self.assertTrue('Unsupported HTTP Method' in repr(e))
def test_url_missing_format(self):
params = {'arg1': 1, 'arg2': 2}
try:
API("{arg1}test{arg2}path{arg3}", HttpMethod.GET, HTTPStatus.OK).format_path(params)
self.fail("Supposed to fail")
except KeyError as e:
self.assertTrue('KeyError' in repr(e))
def test_url_invalid_format(self):
params = {'1', '2'}
try:
API("{}test{}path{}", HttpMethod.GET, HTTPStatus.OK).format_path(params)
self.fail("Supposed to fail")
except TypeError as e:
self.assertTrue('TypeError' in repr(e))
if __name__ == '__main__':
unittest.main()
| 37.964602
| 126
| 0.668998
|
0aab2f8d20a6cebbaf7c3678ed5835b523ed8417
| 5,133
|
py
|
Python
|
onlinecourse/models.py
|
VenkataLeela02/Django-online-course
|
a8eaaebd7cd47bf7f539f2894f9e46e61d0083c5
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/models.py
|
VenkataLeela02/Django-online-course
|
a8eaaebd7cd47bf7f539f2894f9e46e61d0083c5
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/models.py
|
VenkataLeela02/Django-online-course
|
a8eaaebd7cd47bf7f539f2894f9e46e61d0083c5
|
[
"Apache-2.0"
] | null | null | null |
import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
# Instructor model
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
# Learner model
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
# Course model
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# Lesson model
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
# Enrollment model
# <HINT> Once a user enrolled a class, an enrollment entry should be created between the user and course
# And we could use the enrollment to track information such as exam submissions
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=5, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
# <HINT> Create a Question Model with:
# Used to persist question content for a course
# Has a One-To-Many (or Many-To-Many if you want to reuse questions) relationship with course
# Has a grade point for each question
# Has question content
# Other fields and methods you would like to design
#class Question(models.Model):
# Foreign key to lesson
# question text
# question grade/mark
# <HINT> A sample model method to calculate if learner get the score of the question
def is_get_score(self, selected_ids):
all_answers = self.choice_set.filter(is_correct=True).count()
selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
if all_answers == selected_correct:
return True
else:
return False
class Question(models.Model):
lesson_id = models.ForeignKey(Lesson, on_delete=models.CASCADE)
grade = models.FloatField(default=0)
question_text = models.TextField()
def is_get_score(self, selected_ids):
all_answers = self.choice_set.filter(is_correct=True).count()
selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
if all_answers == selected_correct:
return True
else:
return False
# <HINT> Create a Choice Model with:
# Used to persist choice content for a question
# One-To-Many (or Many-To-Many if you want to reuse choices) relationship with Question
# Choice content
# Indicate if this choice of the question is a correct one or not
# Other fields and methods you would like to design
# class Choice(models.Model):
class Choice(models.Model):
question_id = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.TextField()
is_correct = models.BooleanField(default=False)
# <HINT> The submission model
# One enrollment could have multiple submission
# One submission could have multiple choices
# One choice could belong to multiple submissions
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
# Other fields and methods you would like to design
| 33.54902
| 104
| 0.698617
|
884653da3b0fbd5a0149b03ebc26425732eb1b2a
| 412
|
py
|
Python
|
lostik/lostik.py
|
anarkiwi/radiostuff
|
8586747b50603e0cad567de7c7ce231fd94a36c5
|
[
"BSD-3-Clause"
] | 10
|
2018-12-25T23:49:24.000Z
|
2021-10-03T00:05:25.000Z
|
lostik/lostik.py
|
anarkiwi/radiostuff
|
8586747b50603e0cad567de7c7ce231fd94a36c5
|
[
"BSD-3-Clause"
] | 3
|
2018-02-18T11:55:54.000Z
|
2021-11-08T22:09:32.000Z
|
lostik/lostik.py
|
anarkiwi/radiostuff
|
8586747b50603e0cad567de7c7ce231fd94a36c5
|
[
"BSD-3-Clause"
] | 8
|
2017-04-09T13:07:30.000Z
|
2021-09-10T01:25:12.000Z
|
#!/usr/bin/env python3
def send_cmd(ser, cmd, checker=None):
print("SEND> %s" % cmd)
ser.write((cmd + "\r\n").encode('utf-8'))
ret = read_line(ser)
if checker is not None:
if ret != checker:
raise Exception("bad reply to <%s>: Want <%s>, got <%s>" % (cmd, checker, ret))
return ret
def read_line(ser):
reply = ser.readline()
return reply.strip().decode('utf-8')
| 27.466667
| 91
| 0.57767
|
ed3219e6ca62c15df24225139d7b567630908a94
| 152
|
py
|
Python
|
tensorclan/dataset/__init__.py
|
extensive-vision-ai/TheTensorClan
|
54b50fcb8f309909478547f37f171d022a838167
|
[
"MIT"
] | null | null | null |
tensorclan/dataset/__init__.py
|
extensive-vision-ai/TheTensorClan
|
54b50fcb8f309909478547f37f171d022a838167
|
[
"MIT"
] | 11
|
2020-07-31T02:26:29.000Z
|
2022-02-08T18:59:59.000Z
|
tensorclan/dataset/__init__.py
|
extensive-vision-ai/TheTensorClan
|
54b50fcb8f309909478547f37f171d022a838167
|
[
"MIT"
] | 1
|
2020-11-24T17:02:54.000Z
|
2020-11-24T17:02:54.000Z
|
from .base_dataset import BaseDataset
from .zoo import *
from .dataset import dataset, get_dataset, get_dataset_cls
from .utils import get_mean_and_std
| 30.4
| 58
| 0.835526
|
0a6867f891e66a7b37b3eef0552b07d1915c6ead
| 2,399
|
py
|
Python
|
py_scripts/dtw.py
|
yu-sakana/dartspose
|
7d65beaf037840511fac18cde8da07e5656c94e6
|
[
"Apache-2.0"
] | null | null | null |
py_scripts/dtw.py
|
yu-sakana/dartspose
|
7d65beaf037840511fac18cde8da07e5656c94e6
|
[
"Apache-2.0"
] | null | null | null |
py_scripts/dtw.py
|
yu-sakana/dartspose
|
7d65beaf037840511fac18cde8da07e5656c94e6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pylab as plt
import seaborn as sns
import pandas as pd
d = lambda a,b: (a - b)**2
first = lambda x: x[0]
second = lambda x: x[1]
def Normalization(p):
min_p = p.min()
max_p = p.max()
nor = (p - min_p) / (max_p - min_p)
return nor
def minVal(v1, v2, v3):
if first(v1) <= min(first(v2), first(v3)):
return v1, 0
elif first(v2) <= first(v3):
return v2, 1
else:
return v3, 2
def calc_dtw(A, B):
S = len(A)
T = len(B)
m = [[0 for j in range(T)] for i in range(S)]
m[0][0] = (d(A[0],B[0]), (-1,-1))
for i in range(1,S):
m[i][0] = (m[i-1][0][0] + d(A[i], B[0]), (i-1,0))
for j in range(1,T):
m[0][j] = (m[0][j-1][0] + d(A[0], B[j]), (0,j-1))
for i in range(1,S):
for j in range(1,T):
minimum, index = minVal(m[i-1][j], m[i][j-1], m[i-1][j-1])
indexes = [(i-1,j), (i,j-1), (i-1,j-1)]
m[i][j] = (first(minimum)+d(A[i], B[j]), indexes[index])
return m
def backward(m):
path = []
path.append([len(m)-1, len(m[0])-1])
while True:
path.append(m[path[-1][0]][path[-1][1]][1])
if path[-1]==(0,0):
break
path = np.array(path)
return path
import matplotlib.gridspec as gridspec
def plot_path(path, A, B):
gs = gridspec.GridSpec(2, 2,
width_ratios=[1,5],
height_ratios=[5,1]
)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax4 = plt.subplot(gs[3])
list_d = [[t[0] for t in row] for row in m]
list_d = np.array(list_d)
ax2.pcolor(list_d, cmap=plt.cm.Blues)
ax2.plot(path[:,1], path[:,0], c="C3")
ax1.plot(A, range(len(A)))
ax1.invert_xaxis()
ax4.plot(B, c="C1")
plt.show()
for line in path:
plt.plot(line, [A[line[0]], B[line[1]]], linewidth=0.2, c="gray")
plt.plot(A)
plt.plot(B)
plt.show()
if __name__ == '__main__':
df = pd.read_csv('test.csv',header=None)
# o = df[0]
o = np.array(df[0])
# s = df[1]
s = np.array(df[1])
s = s[~np.isnan(s)]
o = Normalization(o)
s = Normalization(s)
#print(s)
#print(df)
#m = calc_dtw(o,s)
#print("your score: ",100*(1-calc_dtw(o, s)[-1][-1][0]))
print(calc_dtw(o, s)[-1][-1][0])
m = calc_dtw(o,s)
path = backward(m)
plot_path(path, o, s)
| 24.989583
| 73
| 0.498124
|
57742affd77e4d3bd0013ca61905d9fb7fe33b54
| 991
|
py
|
Python
|
test/test_linear.py
|
oshToy/Character-Aware-LM-WER
|
555e04e4ad0acdbc32c7b57a75b0ec72d2d1b762
|
[
"MIT"
] | 240
|
2016-09-24T06:51:47.000Z
|
2021-05-31T12:19:48.000Z
|
test/test_linear.py
|
oshToy/Character-Aware-LM-WER
|
555e04e4ad0acdbc32c7b57a75b0ec72d2d1b762
|
[
"MIT"
] | 21
|
2016-08-26T19:40:56.000Z
|
2019-06-19T14:46:19.000Z
|
test/test_linear.py
|
oshToy/Character-Aware-LM-WER
|
555e04e4ad0acdbc32c7b57a75b0ec72d2d1b762
|
[
"MIT"
] | 105
|
2016-10-04T09:51:59.000Z
|
2021-11-21T12:10:54.000Z
|
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from model import linear
class TestLinear(tf.test.TestCase):
def test(self):
with self.test_session() as sess:
m = tf.constant(np.array([
[1.0, 2.0],
[2.0, 0.0]
], dtype=np.float32))
l = linear(m, 4)
result = sess.run(l, {
'SimpleLinear/Matrix:0': np.array([
[1.0, 2.0],
[1.0, 2.0],
[1.0, 2.0],
[1.0, 2.0],
]),
'SimpleLinear/Bias:0': np.array([
0.0,
1.0,
2.0,
3.0,
]),
})
self.assertAllClose(result, np.array([
[5.0, 6.0, 7.0, 8.0],
[2.0, 3.0, 4.0, 5.0],
]))
print(result)
| 23.595238
| 51
| 0.382442
|
cad70b83929b07e7b357bbc36c463aa14f554d88
| 5,240
|
py
|
Python
|
tools/get_photo.py
|
bokunimowakaru/m5nicr
|
e41d4ca722cc335986863e0d599349c43fc9c43e
|
[
"MIT"
] | 2
|
2020-11-17T22:11:45.000Z
|
2020-12-27T09:53:06.000Z
|
tools/get_photo.py
|
bokunimowakaru/m5nicr
|
e41d4ca722cc335986863e0d599349c43fc9c43e
|
[
"MIT"
] | null | null | null |
tools/get_photo.py
|
bokunimowakaru/m5nicr
|
e41d4ca722cc335986863e0d599349c43fc9c43e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
################################################################################
# カメラからの配信画像を取得する Python版
#
################################################################################
# 使用、改変、再配布は自由に行えますが、無保証です。権利情報の改変は不可です。
# Copyright (c) 2016-2021 Wataru KUNINO
################################################################################
DEVICE1 = 'cam_a_5' # 配信デバイス名(カメラ)
DEVICE2 = 'pir_s_5' # 配信デバイス名(人感)
SAVETO = 'photo' # 保存先フォルダ名
IP_CAM = None # カメラのIPアドレス
PORT = 1024 # UDPポート番号を1024に
import pathlib # ファイル・パス用
import socket # ソケット通信用
import urllib.request # HTTP通信ライブラリ
import datetime # 年月日・時刻管理
import time # シリアル時刻
def cam(ip, filename = 'cam.jpg'): # IoTカメラ
filename = SAVETO + '/' + filename
url_s = 'http://' + ip # アクセス先をurl_sへ
s = '/cam.jpg' # 文字列変数sにクエリを
try:
res = urllib.request.urlopen(url_s + s) # IoTカメラで撮影を実行
if res.headers['content-type'].lower().find('image/jpeg') < 0:
print('Error content-type :',res.headers['content-type'])
return None # JPEGで無いときにNone
except urllib.error.URLError: # 例外処理発生時
print('Error urllib :',url_s) # エラー表示
return None # 関数を終了する
data = res.read() # コンテンツ(JPEG)を読む
try:
fp = open(filename, 'wb') # 保存用ファイルを開く
except Exception as e: # 例外処理発生時
print(e) # エラー内容を表示
return None # 関数を終了する
fp.write(data) # 写真ファイルを保存する
fp.close() # ファイルを閉じる
print('saved file :',filename) # 保存ファイルを表示する
return filename # ファイル名を応答する
print('Get Photo for Python') # タイトル表示
pathlib.Path(SAVETO).mkdir(exist_ok=True) # フォルダ作成
time_start = time.time() # 開始時刻シリアル値保持
print('Listening UDP port', PORT, '...') # ポート番号表示
try:
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)# ソケットを作成
sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) # オプション
sock.bind(('', PORT)) # ソケットに接続
except Exception as e: # 例外処理発生時
print(e) # エラー内容を表示
exit() # プログラムの終了
while sock: # 永遠に繰り返す
try:
udp, udp_from = sock.recvfrom(128) # UDPパケットを取得
udp = udp.decode() # UDPデータを文字列に
except Exception as e: # 例外処理発生時
print(e) # エラー内容を表示
continue # whileの先頭に戻る
s='' # 表示用の文字列変数s
for c in udp: # UDPパケット内
if ord(c) > ord(' ') and ord(c) <= ord('~'): # 表示可能文字
s += c # 文字列sへ追加
date = datetime.datetime.today() # 日時を取得
date_s = date.strftime('%Y/%m/%d %H:%M:%S') # 日時を文字列に変換
print(date_s + ', ', end='') # 日時を出力
print(udp_from[0], end='') # 送信元アドレスを出力
print(', ' + s, flush=True) # 受信データを出力
if s[5] != '_' or s[7] != ',': # 指定書式で無い時
continue # whileの先頭に戻る
device = s[0:7] # 先頭7文字をデバイス名
value = s.split(',') # CSVデータを分割
if device == DEVICE1: # カメラから受信
if (time.time() - time_start < 300): # 起動後5分以内
IP_CAM = udp_from[0] # カメラIPアドレスを保持
print('IP_CAM =',IP_CAM) # 表示
elif IP_CAM != udp_from[0]: # アドレス不一致時
print('起動後5分を経過したので送信先は更新しません')
continue # whileの先頭に戻る
if device == DEVICE1 or device == DEVICE2: # 対象機器の時
if IP_CAM is not None: # IP_CAMがNoneでは無い時
date_f = date.strftime('%Y%m%d-%H%M%S') # ファイル名用の時刻書式
cam(IP_CAM,'cam_' + date_f + '.jpg') # 撮影(ファイル保存)
sock.close() # ソケットの切断
| 57.582418
| 80
| 0.358779
|
fb7163197de27fd924acf409a0b96c455d15352c
| 9,766
|
py
|
Python
|
yandex/cloud/mdb/mongodb/v1/resource_preset_service_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 36
|
2018-12-23T13:51:50.000Z
|
2022-03-25T07:48:24.000Z
|
yandex/cloud/mdb/mongodb/v1/resource_preset_service_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 15
|
2019-02-28T04:55:09.000Z
|
2022-03-06T23:17:24.000Z
|
yandex/cloud/mdb/mongodb/v1/resource_preset_service_pb2.py
|
korsar182/python-sdk
|
873bf2a9b136a8f2faae72e86fae1f5b5c3d896a
|
[
"MIT"
] | 18
|
2019-02-23T07:10:57.000Z
|
2022-03-28T14:41:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/mongodb/v1/resource_preset_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
from yandex.cloud.mdb.mongodb.v1 import resource_preset_pb2 as yandex_dot_cloud_dot_mdb_dot_mongodb_dot_v1_dot_resource__preset__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/mongodb/v1/resource_preset_service.proto',
package='yandex.cloud.mdb.mongodb.v1',
syntax='proto3',
serialized_options=b'\n\037yandex.cloud.api.mdb.mongodb.v1ZGgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1;mongodb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n9yandex/cloud/mdb/mongodb/v1/resource_preset_service.proto\x12\x1byandex.cloud.mdb.mongodb.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x1dyandex/cloud/validation.proto\x1a\x31yandex/cloud/mdb/mongodb/v1/resource_preset.proto\"<\n\x18GetResourcePresetRequest\x12 \n\x12resource_preset_id\x18\x01 \x01(\tB\x04\xe8\xc7\x31\x01\"Z\n\x1aListResourcePresetsRequest\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06<=1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"}\n\x1bListResourcePresetsResponse\x12\x45\n\x10resource_presets\x18\x01 \x03(\x0b\x32+.yandex.cloud.mdb.mongodb.v1.ResourcePreset\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xee\x02\n\x15ResourcePresetService\x12\xab\x01\n\x03Get\x12\x35.yandex.cloud.mdb.mongodb.v1.GetResourcePresetRequest\x1a+.yandex.cloud.mdb.mongodb.v1.ResourcePreset\"@\x82\xd3\xe4\x93\x02:\x12\x38/managed-mongodb/v1/resourcePresets/{resource_preset_id}\x12\xa6\x01\n\x04List\x12\x37.yandex.cloud.mdb.mongodb.v1.ListResourcePresetsRequest\x1a\x38.yandex.cloud.mdb.mongodb.v1.ListResourcePresetsResponse\"+\x82\xd3\xe4\x93\x02%\x12#/managed-mongodb/v1/resourcePresetsBj\n\x1fyandex.cloud.api.mdb.mongodb.v1ZGgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mongodb/v1;mongodbb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mongodb_dot_v1_dot_resource__preset__pb2.DESCRIPTOR,])
_GETRESOURCEPRESETREQUEST = _descriptor.Descriptor(
name='GetResourcePresetRequest',
full_name='yandex.cloud.mdb.mongodb.v1.GetResourcePresetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_preset_id', full_name='yandex.cloud.mdb.mongodb.v1.GetResourcePresetRequest.resource_preset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=202,
serialized_end=262,
)
_LISTRESOURCEPRESETSREQUEST = _descriptor.Descriptor(
name='ListResourcePresetsRequest',
full_name='yandex.cloud.mdb.mongodb.v1.ListResourcePresetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mongodb.v1.ListResourcePresetsRequest.page_size', index=0,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\006<=1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mongodb.v1.ListResourcePresetsRequest.page_token', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=354,
)
_LISTRESOURCEPRESETSRESPONSE = _descriptor.Descriptor(
name='ListResourcePresetsResponse',
full_name='yandex.cloud.mdb.mongodb.v1.ListResourcePresetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_presets', full_name='yandex.cloud.mdb.mongodb.v1.ListResourcePresetsResponse.resource_presets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mongodb.v1.ListResourcePresetsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=356,
serialized_end=481,
)
_LISTRESOURCEPRESETSRESPONSE.fields_by_name['resource_presets'].message_type = yandex_dot_cloud_dot_mdb_dot_mongodb_dot_v1_dot_resource__preset__pb2._RESOURCEPRESET
DESCRIPTOR.message_types_by_name['GetResourcePresetRequest'] = _GETRESOURCEPRESETREQUEST
DESCRIPTOR.message_types_by_name['ListResourcePresetsRequest'] = _LISTRESOURCEPRESETSREQUEST
DESCRIPTOR.message_types_by_name['ListResourcePresetsResponse'] = _LISTRESOURCEPRESETSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetResourcePresetRequest = _reflection.GeneratedProtocolMessageType('GetResourcePresetRequest', (_message.Message,), {
'DESCRIPTOR' : _GETRESOURCEPRESETREQUEST,
'__module__' : 'yandex.cloud.mdb.mongodb.v1.resource_preset_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mongodb.v1.GetResourcePresetRequest)
})
_sym_db.RegisterMessage(GetResourcePresetRequest)
ListResourcePresetsRequest = _reflection.GeneratedProtocolMessageType('ListResourcePresetsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTRESOURCEPRESETSREQUEST,
'__module__' : 'yandex.cloud.mdb.mongodb.v1.resource_preset_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mongodb.v1.ListResourcePresetsRequest)
})
_sym_db.RegisterMessage(ListResourcePresetsRequest)
ListResourcePresetsResponse = _reflection.GeneratedProtocolMessageType('ListResourcePresetsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTRESOURCEPRESETSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mongodb.v1.resource_preset_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mongodb.v1.ListResourcePresetsResponse)
})
_sym_db.RegisterMessage(ListResourcePresetsResponse)
DESCRIPTOR._options = None
_GETRESOURCEPRESETREQUEST.fields_by_name['resource_preset_id']._options = None
_LISTRESOURCEPRESETSREQUEST.fields_by_name['page_size']._options = None
_LISTRESOURCEPRESETSREQUEST.fields_by_name['page_token']._options = None
_RESOURCEPRESETSERVICE = _descriptor.ServiceDescriptor(
name='ResourcePresetService',
full_name='yandex.cloud.mdb.mongodb.v1.ResourcePresetService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=484,
serialized_end=850,
methods=[
_descriptor.MethodDescriptor(
name='Get',
full_name='yandex.cloud.mdb.mongodb.v1.ResourcePresetService.Get',
index=0,
containing_service=None,
input_type=_GETRESOURCEPRESETREQUEST,
output_type=yandex_dot_cloud_dot_mdb_dot_mongodb_dot_v1_dot_resource__preset__pb2._RESOURCEPRESET,
serialized_options=b'\202\323\344\223\002:\0228/managed-mongodb/v1/resourcePresets/{resource_preset_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='yandex.cloud.mdb.mongodb.v1.ResourcePresetService.List',
index=1,
containing_service=None,
input_type=_LISTRESOURCEPRESETSREQUEST,
output_type=_LISTRESOURCEPRESETSRESPONSE,
serialized_options=b'\202\323\344\223\002%\022#/managed-mongodb/v1/resourcePresets',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_RESOURCEPRESETSERVICE)
DESCRIPTOR.services_by_name['ResourcePresetService'] = _RESOURCEPRESETSERVICE
# @@protoc_insertion_point(module_scope)
| 46.504762
| 1,274
| 0.799508
|
3c6e880213c0d51be5e1c62ee1aff7cca1debb53
| 453
|
py
|
Python
|
server/src/models/abstract_models/meal_model.py
|
minhman727/miao-nutrition-assistant-
|
9cdd54d932f2018d404236bbc5874003e0c6f666
|
[
"MIT"
] | null | null | null |
server/src/models/abstract_models/meal_model.py
|
minhman727/miao-nutrition-assistant-
|
9cdd54d932f2018d404236bbc5874003e0c6f666
|
[
"MIT"
] | null | null | null |
server/src/models/abstract_models/meal_model.py
|
minhman727/miao-nutrition-assistant-
|
9cdd54d932f2018d404236bbc5874003e0c6f666
|
[
"MIT"
] | null | null | null |
from mongoengine import *
from src.models.abstract_models.meal_detail_model import MealDetail
from src.constants.enums import MealType
from models.abstract_models.nutrition_base_model import NutritionBase
class Meal(NutritionBase):
mealType = EnumField(MealType, required=True)
mealDetailId = ListField(GenericReferenceField(), required=True)
totalQuantity = IntField(required=True, min_value=1)
meta = {
"abstract": True
}
| 34.846154
| 69
| 0.779249
|
4d411363d2b2fcfc6cd8125608af4c038926c03d
| 463
|
py
|
Python
|
cride/circles/permissions/circles.py
|
Bruno321/cride
|
bfd911694e3a22f70272f17cde464f5d665d2033
|
[
"MIT"
] | null | null | null |
cride/circles/permissions/circles.py
|
Bruno321/cride
|
bfd911694e3a22f70272f17cde464f5d665d2033
|
[
"MIT"
] | null | null | null |
cride/circles/permissions/circles.py
|
Bruno321/cride
|
bfd911694e3a22f70272f17cde464f5d665d2033
|
[
"MIT"
] | null | null | null |
from rest_framework.permissions import BasePermission
from cride.circles.models import Membership
class IsCircleAdmin(BasePermission):
def has_object_permission(self,request,view,obj):
try:
Membership.objects.get(
user=request.user,
circle=obj,
is_admin=True,
is_active=True
)
except Membership.DoesNotExist:
return False
return True
| 28.9375
| 53
| 0.609071
|
2a5c525bd4444d189b8d37e8ec4d4876f7771675
| 20,473
|
py
|
Python
|
chapter_10/app_v10_1.py
|
shane-kercheval/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash
|
3652e6fc5da31afe91410d7db140129152e08289
|
[
"MIT"
] | null | null | null |
chapter_10/app_v10_1.py
|
shane-kercheval/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash
|
3652e6fc5da31afe91410d7db140129152e08289
|
[
"MIT"
] | null | null | null |
chapter_10/app_v10_1.py
|
shane-kercheval/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash
|
3652e6fc5da31afe91410d7db140129152e08289
|
[
"MIT"
] | null | null | null |
import re
from typing import Collection
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Output, Input, State
from dash.exceptions import PreventUpdate
from dash_table import DataTable
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.COSMO])
server = app.server
poverty_data = pd.read_csv('../data/PovStatsData.csv')
poverty = pd.read_csv('../data/poverty.csv', low_memory=False)
series = pd.read_csv('../data/PovStatsSeries.csv')
gini = 'GINI index (World Bank estimate)'
gini_df = poverty[poverty[gini].notna()]
regions = ['East Asia & Pacific', 'Europe & Central Asia',
'Fragile and conflict affected situations', 'High income',
'IDA countries classified as fragile situations', 'IDA total',
'Latin America & Caribbean', 'Low & middle income', 'Low income',
'Lower middle income', 'Middle East & North Africa',
'Middle income', 'South Asia', 'Sub-Saharan Africa',
'Upper middle income', 'World']
population_df = poverty_data[~poverty_data['Country Name'].isin(regions) &
(poverty_data['Indicator Name']== 'Population, total')]
income_share_df = poverty.filter(regex='Country Name|^year$|Income share.*?20').dropna()
income_share_df = income_share_df.rename(columns={
'Income share held by lowest 20%': '1 Income share held by lowest 20%',
'Income share held by second 20%': '2 Income share held by second 20%',
'Income share held by third 20%': '3 Income share held by third 20%',
'Income share held by fourth 20%': '4 Income share held by fourth 20%',
'Income share held by highest 20%': '5 Income share held by highest 20%'
}).sort_index(axis=1)
income_share_df.columns = [re.sub('\d Income share held by ', '', col).title()
for col in income_share_df.columns]
income_share_cols = income_share_df.columns[:-2]
perc_pov_cols = poverty.filter(regex='Poverty gap').columns
perc_pov_df = poverty[poverty['is_country']].dropna(subset=perc_pov_cols)
perc_pov_years = sorted(set(perc_pov_df['year']))
cividis0 = px.colors.sequential.Cividis[0]
def make_empty_fig():
fig = go.Figure()
fig.layout.paper_bgcolor = '#E5ECF6'
fig.layout.plot_bgcolor = '#E5ECF6'
return fig
def multiline_indicator(indicator):
final = []
split = indicator.split()
for i in range(0, len(split), 3):
final.append(' '.join(split[i:i+3]))
return '<br>'.join(final)
app.layout = html.Div([
dbc.Col([
html.Br(),
html.H1('Poverty And Equity Database'),
html.H2('The World Bank'),
], style={'textAlign': 'center'}),
html.Br(),
dbc.Row([
dbc.Col(lg=2),
dbc.Col([
dbc.Tabs([
dbc.Tab([
html.Br(),
dcc.Dropdown(id='indicator_dropdown',
value='GINI index (World Bank estimate)',
options=[{'label': indicator,
'value': indicator}
for indicator in poverty.columns[3:54]]),
dcc.Graph(id='indicator_map_chart'),
dcc.Markdown(id='indicator_map_details_md',
style={'backgroundColor': '#E5ECF6'})
], label='Explore Metrics'),
dbc.Tab([
html.Br(),
dbc.Row([
dbc.Col(lg=1),
dbc.Col([
dbc.Label('Select the year:'),
dcc.Slider(id='year_cluster_slider',
min=1974, max=2018, step=1, included=False,
value=2018,
marks={year: str(year)
for year in range(1974, 2019, 5)})
], lg=6, md=12),
dbc.Col([
dbc.Label('Select the number of clusters:'),
dcc.Slider(id='ncluster_cluster_slider',
min=2, max=15, step=1, included=False,
value=4,
marks={n: str(n) for n in range(2, 16)}),
], lg=4, md=12)
]),
html.Br(),
dbc.Row([
dbc.Col(lg=1),
dbc.Col([
dbc.Label('Select Indicators:'),
dcc.Dropdown(id='cluster_indicator_dropdown',optionHeight=40,
multi=True,
value=['GINI index (World Bank estimate)'],
options=[{'label': indicator, 'value': indicator}
for indicator in poverty.columns[3:54]]),
], lg=6),
dbc.Col([
dbc.Label(''),html.Br(),
dbc.Button("Submit", id='clustering_submit_button'),
]),
]),
dcc.Loading([
dcc.Graph(id='clustered_map_chart')
])
], label='Cluster Countries'),
]),
], lg=8)
]),
html.Br(),
html.Br(),
html.Hr(),
dbc.Row([
dbc.Col(lg=2),
dbc.Col([
dbc.Label('Indicator:'),
dcc.Dropdown(id='hist_indicator_dropdown',optionHeight=40,
value='GINI index (World Bank estimate)',
options=[{'label': indicator, 'value': indicator}
for indicator in poverty.columns[3:54]]),
], lg=5),
dbc.Col([
dbc.Label('Years:'),
dcc.Dropdown(id='hist_multi_year_selector',
multi=True,
value=[2015],
placeholder='Select one or more years',
options=[{'label': year, 'value': year}
for year in poverty['year'].drop_duplicates().sort_values()]),
], lg=3),
]),
html.Br(),
dbc.Row([
dbc.Col(lg=2),
dbc.Col([
html.Br(),
dbc.Label('Modify number of bins:'),
dcc.Slider(id='hist_bins_slider',
dots=True, min=0, max=100, step=5, included=False,
marks={x: str(x) for x in range(0, 105, 5)}),
dcc.Graph(id='indicator_year_histogram',figure=make_empty_fig()),
], lg=8)
]),
dbc.Row([
dbc.Col(lg=2),
dbc.Col([
html.Div(id='table_histogram_output'),
html.Br(), html.Br(),
], lg=8)
]),
html.H2('Gini Index - World Bank Data', style={'textAlign': 'center'}),
html.Br(),
dbc.Row([
dbc.Col(lg=1),
dbc.Col([
dbc.Label('Year'),
dcc.Dropdown(id='gini_year_dropdown',
placeholder='Select a year',
options=[{'label': year, 'value': year}
for year in gini_df['year'].drop_duplicates().sort_values()]),
html.Br(),
dcc.Graph(id='gini_year_barchart',
figure=make_empty_fig())
], md=12, lg=5),
dbc.Col([
dbc.Label('Countries'),
dcc.Dropdown(id='gini_country_dropdown',
placeholder='Select one or more countries',
multi=True,
options=[{'label': country, 'value': country}
for country in gini_df['Country Name'].unique()]),
html.Br(),
dcc.Graph(id='gini_country_barchart',
figure=make_empty_fig())
], md=12, lg=5),
]),
dbc.Row([
dbc.Col(lg=2),
dbc.Col([
html.Br(),
html.H2('Income Share Distribution', style={'textAlign': 'center'}),
html.Br(),
dbc.Label('Country'),
dcc.Dropdown(id='income_share_country_dropdown',
placeholder='Select a country',
options=[{'label': country, 'value': country}
for country in income_share_df['Country Name'].unique()]),
dcc.Graph(id='income_share_country_barchart',
figure=make_empty_fig())
], lg=8)
]),
html.Br(),
html.H2('Poverty Gap at $1.9, $3.2, and $5.5 (% of population)',
style={'textAlign': 'center'}),
html.Br(),html.Br(),
dbc.Row([
dbc.Col(lg=2),
dbc.Col([
dbc.Label('Select poverty level:'),
dcc.Slider(id='perc_pov_indicator_slider',
min=0,
max=2,
step=1,
included=False,
value=0,
marks={0: {'label': '$1.9', 'style': {'color': cividis0, 'fontWeight': 'bold', 'fontSize': 15}},
1: {'label': '$3.2', 'style': {'color': cividis0, 'fontWeight': 'bold', 'fontSize': 15}},
2: {'label': '$5.5', 'style': {'color': cividis0, 'fontWeight': 'bold', 'fontSize': 15}}}),
], lg=2),
dbc.Col([
dbc.Label('Select year:'),
dcc.Slider(id='perc_pov_year_slider',
min=perc_pov_years[0],
max=perc_pov_years[-1],
step=1,
included=False,
value=2018,
marks={year: {'label': str(year),
'style': {'color': cividis0, 'fontSize': 14}}
for year in perc_pov_years[::5]}),
], lg=5),
]),
dbc.Row([
dbc.Col(lg=1),
dbc.Col([
dcc.Graph(id='perc_pov_scatter_chart',
figure=make_empty_fig())
], lg=10)
]),
dbc.Tabs([
dbc.Tab([
html.Ul([
html.Br(),
html.Li('Number of Economies: 170'),
html.Li('Temporal Coverage: 1974 - 2019'),
html.Li('Update Frequency: Quarterly'),
html.Li('Last Updated: March 18, 2020'),
html.Li([
'Source: ',
html.A('https://datacatalog.worldbank.org/dataset/poverty-and-equity-database',
href='https://datacatalog.worldbank.org/dataset/poverty-and-equity-database')
])
])
], label='Key Facts'),
dbc.Tab([
html.Ul([
html.Br(),
html.Li('Book title: Interactive Dashboards and Data Apps with Plotly and Dash'),
html.Li(['GitHub repo: ',
html.A('https://github.com/PacktPublishing/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash',
href='https://github.com/PacktPublishing/Interactive-Dashboards-and-Data-Apps-with-Plotly-and-Dash')
])
])
], label='Poject Info')
]),
], style={'backgroundColor': '#E5ECF6'})
@app.callback(Output('indicator_map_chart', 'figure'),
Output('indicator_map_details_md', 'children'),
Input('indicator_dropdown', 'value'))
def display_generic_map_chart(indicator):
if indicator is None:
raise PreventUpdate
df = poverty[poverty['is_country']]
fig = px.choropleth(df, locations='Country Code',
color=indicator,
title=indicator,
hover_name='Country Name',
color_continuous_scale='cividis',
animation_frame='year', height=650)
fig.layout.geo.showframe = False
fig.layout.geo.showcountries = True
fig.layout.geo.projection.type = 'natural earth'
fig.layout.geo.lataxis.range = [-53, 76]
fig.layout.geo.lonaxis.range = [-138, 167]
fig.layout.geo.landcolor = 'white'
fig.layout.geo.bgcolor = '#E5ECF6'
fig.layout.paper_bgcolor = '#E5ECF6'
fig.layout.geo.countrycolor = 'gray'
fig.layout.geo.coastlinecolor = 'gray'
fig.layout.coloraxis.colorbar.title = multiline_indicator(indicator)
series_df = series[series['Indicator Name'].eq(indicator)]
if series_df.empty:
markdown = "No details available on this indicator"
else:
limitations = series_df['Limitations and exceptions'].fillna('N/A').str.replace('\n\n', ' ').values[0]
markdown = f"""
## {series_df['Indicator Name'].values[0]}
{series_df['Long definition'].values[0]}
* **Unit of measure** {series_df['Unit of measure'].fillna('count').values[0]}
* **Periodicity** {series_df['Periodicity'].fillna('N/A').values[0]}
* **Source** {series_df['Source'].values[0]}
### Limitations and exceptions:
{limitations}
"""
return fig, markdown
@app.callback(Output('gini_year_barchart', 'figure'),
Input('gini_year_dropdown', 'value'))
def plot_gini_year_barchart(year):
if not year:
raise PreventUpdate
df = gini_df[gini_df['year'].eq(year)].sort_values(gini).dropna(subset=[gini])
n_countries = len(df['Country Name'])
fig = px.bar(df,
x=gini,
y='Country Name',
orientation='h',
height=200 + (n_countries*20),
width=650,
title=gini + ' ' + str(year))
fig.layout.paper_bgcolor = '#E5ECF6'
return fig
@app.callback(Output('gini_country_barchart', 'figure'), Input('gini_country_dropdown', 'value'))
def plot_gini_country_barchart(countries):
if not countries:
raise PreventUpdate
df = gini_df[gini_df['Country Name'].isin(countries)].dropna(subset=[gini])
fig = px.bar(df,
x='year',
y=gini,
height=100 + (250*len(countries)),
facet_row='Country Name',
color='Country Name',
labels={gini: 'Gini Index'},
title=''.join([gini, '<br><b>', ', '.join(countries), '</b>']))
fig.layout.paper_bgcolor = '#E5ECF6'
return fig
@app.callback(Output('income_share_country_barchart', 'figure'), Input('income_share_country_dropdown', 'value'))
def plot_income_share_barchart(country):
if country is None:
raise PreventUpdate
fig = px.bar(income_share_df[income_share_df['Country Name']==country].dropna(),
x=income_share_cols,
y='Year',
barmode='stack',
height=600,
hover_name='Country Name',
title=f'Income Share Quintiles - {country}',
orientation='h')
fig.layout.legend.title = None
fig.layout.legend.orientation = 'h'
fig.layout.legend.x = 0.2
fig.layout.legend.y = -0.15
fig.layout.xaxis.title = 'Percent of Total Income'
fig.layout.paper_bgcolor = '#E5ECF6'
fig.layout.plot_bgcolor = '#E5ECF6'
return fig
@app.callback(Output('perc_pov_scatter_chart', 'figure'),
Input('perc_pov_year_slider', 'value'),
Input('perc_pov_indicator_slider', 'value'))
def plot_perc_pov_chart(year, indicator):
indicator = perc_pov_cols[indicator]
df = (perc_pov_df
[perc_pov_df['year'].eq(year)]
.dropna(subset=[indicator])
.sort_values(indicator))
if df.empty:
raise PreventUpdate
fig = px.scatter(df,
x=indicator,
y='Country Name',
color='Population, total',
size=[30]*len(df),
size_max=15,
hover_name='Country Name',
height=250 +(20*len(df)),
color_continuous_scale='cividis',
title=indicator + '<b>: ' + f'{year}' +'</b>')
fig.layout.paper_bgcolor = '#E5ECF6'
fig.layout.xaxis.ticksuffix = '%'
return fig
@app.callback(Output('indicator_year_histogram', 'figure'),
Output('table_histogram_output', 'children'),
Input('hist_multi_year_selector', 'value'),
Input('hist_indicator_dropdown', 'value'),
Input('hist_bins_slider', 'value'))
def display_histogram(years, indicator, nbins):
if (not years) or (not indicator):
raise PreventUpdate
df = poverty[poverty['year'].isin(years) & poverty['is_country']]
fig = px.histogram(df, x=indicator, facet_col='year', color='year',
title=indicator + ' Histogram',
nbins=nbins,
facet_col_wrap=4, height=700)
fig.for_each_xaxis(lambda axis: axis.update(title=''))
fig.add_annotation(text=indicator, x=0.5, y=-0.12, xref='paper', yref='paper', showarrow=False)
fig.layout.paper_bgcolor = '#E5ECF6'
table = DataTable(columns = [{'name': col, 'id': col}
for col in df[['Country Name', 'year', indicator]].columns],
data = df[['Country Name', 'year', indicator]].to_dict('records'),
style_header={'whiteSpace': 'normal'},
fixed_rows={'headers': True},
virtualization=True,
style_table={'height': '400px'},
sort_action='native',
filter_action='native',
export_format='csv',
style_cell={'minWidth': '150px'}),
return fig, table
@app.callback(Output('clustered_map_chart', 'figure'),
Input('clustering_submit_button', 'n_clicks'),
State('year_cluster_slider', 'value'),
State('ncluster_cluster_slider', 'value'),
State('cluster_indicator_dropdown', 'value'))
def clustered_map(n_clicks, year, n_clusters, indicators):
if not indicators:
raise PreventUpdate
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
scaler = StandardScaler()
kmeans = KMeans(n_clusters=n_clusters)
df = poverty[poverty['is_country'] & poverty['year'].eq(year)][indicators + ['Country Name', 'year']]
data = df[indicators]
if df.isna().all().any():
return px.scatter(title='No available data for the selected combination of year/indicators.')
data_no_na = imp.fit_transform(data)
scaled_data = scaler.fit_transform(data_no_na)
kmeans.fit(scaled_data)
fig = px.choropleth(df,
locations='Country Name',
locationmode='country names',
color=[str(x) for x in kmeans.labels_],
labels={'color': 'Cluster'},
hover_data=indicators,
height=650,
title=f'Country clusters - {year}. Number of clusters: {n_clusters}<br>Inertia: {kmeans.inertia_:,.2f}',
color_discrete_sequence=px.colors.qualitative.T10)
fig.add_annotation(x=-0.1, y=-0.15,
xref='paper', yref='paper',
text='Indicators:<br>' + "<br>".join(indicators),
showarrow=False)
fig.layout.geo.showframe = False
fig.layout.geo.showcountries = True
fig.layout.geo.projection.type = 'natural earth'
fig.layout.geo.lataxis.range = [-53, 76]
fig.layout.geo.lonaxis.range = [-137, 168]
fig.layout.geo.landcolor = 'white'
fig.layout.geo.bgcolor = '#E5ECF6'
fig.layout.paper_bgcolor = '#E5ECF6'
fig.layout.geo.countrycolor = 'gray'
fig.layout.geo.coastlinecolor = 'gray'
return fig
if __name__ == '__main__':
app.run_server(debug=True, port=8070)
| 40.621032
| 132
| 0.52093
|
4a81bfc91d2b574a11a99cfa8add7a75f11933f3
| 2,825
|
py
|
Python
|
msautotest/mspython/test_mapio.py
|
MapServer-backport-bot/MapServer
|
bec3033ed54949b075fbd0646b3d7bda26ea1b8a
|
[
"Unlicense"
] | null | null | null |
msautotest/mspython/test_mapio.py
|
MapServer-backport-bot/MapServer
|
bec3033ed54949b075fbd0646b3d7bda26ea1b8a
|
[
"Unlicense"
] | 1
|
2021-06-07T17:49:05.000Z
|
2021-06-07T18:07:15.000Z
|
msautotest/mspython/test_mapio.py
|
MapServer-backport-bot/MapServer
|
bec3033ed54949b075fbd0646b3d7bda26ea1b8a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: MapServer
# Purpose: Regression test for mapio
# Author: Even Rouault
#
###############################################################################
# Copyright (c) 2017, Even Rouault,<even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import pytest
mapscript_available = False
try:
import mapscript
mapscript_available = True
except ImportError:
pass
pytestmark = pytest.mark.skipif(not mapscript_available, reason="mapscript not available")
def get_relpath_to_this(filename):
return os.path.join(os.path.dirname(__file__), filename)
###############################################################################
#
def test_msIO_getAndStripStdoutBufferMimeHeaders():
if 'SUPPORTS=WMS' not in mapscript.msGetVersion():
pytest.skip()
map = mapscript.mapObj(get_relpath_to_this('test_mapio.map'))
request = mapscript.OWSRequest()
mapscript.msIO_installStdoutToBuffer()
request.loadParamsFromURL('service=WMS&version=1.1.1&request=GetMap&layers=grey&srs=EPSG:4326&bbox=-180,-90,180,90&format=image/png&width=80&height=40')
status = map.OWSDispatch(request)
assert status == 0
headers = mapscript.msIO_getAndStripStdoutBufferMimeHeaders()
assert headers is not None
assert 'Content-Type' in headers
assert headers['Content-Type'] == 'image/png'
assert 'Cache-Control' in headers
assert headers['Cache-Control'] == 'max-age=86400'
result = mapscript.msIO_getStdoutBufferBytes()
assert result is not None
assert result[1:4] == b'PNG'
| 39.788732
| 156
| 0.660531
|
940df2a8dd4ef74dcdb6ee05f2ff644b7d1cd136
| 102,719
|
py
|
Python
|
pysnmp-with-texts/RADIO-BRIDGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/RADIO-BRIDGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/RADIO-BRIDGE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module RADIO-BRIDGE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RADIO-BRIDGE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:44:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
dot1agCfmMepEntry, = mibBuilder.importSymbols("IEEE8021-CFM-MIB", "dot1agCfmMepEntry")
ieee8021QBridgeTpFdbEntry, = mibBuilder.importSymbols("IEEE8021-Q-BRIDGE-MIB", "ieee8021QBridgeTpFdbEntry")
ifIndex, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndex")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, NotificationType, Counter64, Gauge32, TimeTicks, Bits, ObjectIdentity, Integer32, ModuleIdentity, MibIdentifier, enterprises, IpAddress, Unsigned32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "NotificationType", "Counter64", "Gauge32", "TimeTicks", "Bits", "ObjectIdentity", "Integer32", "ModuleIdentity", "MibIdentifier", "enterprises", "IpAddress", "Unsigned32", "iso")
DisplayString, RowStatus, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention", "TruthValue")
radioBridgeRoot = MibIdentifier((1, 3, 6, 1, 4, 1, 31926))
radioBridgeSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 1))
radioBridgeRf = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 2))
radioBridgeTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 3))
radioBridgeRefClock = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 4))
radioBridgeEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 5))
radioBridgeQosClassifier = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 6))
radioBridgeQosIngressQueue = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 7))
radioBridgeQosEgressQueue = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 8))
radioBridgeIp = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 9))
radioBridgeCfm = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 10))
radioBridgeAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 11))
radioBridgeScheduler = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 12))
radioBridgeEncryption = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 13))
radioBridgeMeter = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 14))
radioBridgeEventConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 15))
radioBridgeSnmp = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 17))
radioBridgeLldp = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 19))
radioBridgeWred = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 20))
radioBridgeAuthentication = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 21))
radioBridgeQuota = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 22))
radioBridgePcpProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 23))
radioBridgeSyslog = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 24))
radioBridgeNtp = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 25))
radioBridgeLicense = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 26))
rbSysVoltage = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSysVoltage.setStatus('current')
if mibBuilder.loadTexts: rbSysVoltage.setDescription('')
rbSysTemperature = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSysTemperature.setStatus('current')
if mibBuilder.loadTexts: rbSysTemperature.setDescription('')
rbSysSaveConfiguration = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbSysSaveConfiguration.setStatus('current')
if mibBuilder.loadTexts: rbSysSaveConfiguration.setDescription('')
rbSysReset = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbSysReset.setStatus('current')
if mibBuilder.loadTexts: rbSysReset.setDescription('Read the variable value and then write this value for reset')
rbSwBank1Version = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSwBank1Version.setStatus('current')
if mibBuilder.loadTexts: rbSwBank1Version.setDescription('')
rbSwBank2Version = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSwBank2Version.setStatus('current')
if mibBuilder.loadTexts: rbSwBank2Version.setDescription('')
rbSwBank1Running = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noRunning", 1), ("running", 2), ("running-wait-accept", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSwBank1Running.setStatus('current')
if mibBuilder.loadTexts: rbSwBank1Running.setDescription('')
rbSwBank2Running = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noRunning", 1), ("running", 2), ("running-wait-accept", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSwBank2Running.setStatus('current')
if mibBuilder.loadTexts: rbSwBank2Running.setDescription('')
rbSwBank1ScheduledToRunNextReset = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 9), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSwBank1ScheduledToRunNextReset.setStatus('current')
if mibBuilder.loadTexts: rbSwBank1ScheduledToRunNextReset.setDescription('')
rbSwBank2ScheduledToRunNextReset = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSwBank2ScheduledToRunNextReset.setStatus('current')
if mibBuilder.loadTexts: rbSwBank2ScheduledToRunNextReset.setDescription('')
rbSystemUpAbsoluteTime = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSystemUpAbsoluteTime.setStatus('current')
if mibBuilder.loadTexts: rbSystemUpAbsoluteTime.setDescription('since the Epoch (00:00:00 UTC, January 1, 1970), measured in seconds.')
rbSystemAuthenticationMode = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("local", 1), ("radius", 2), ("tacacs", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbSystemAuthenticationMode.setStatus('current')
if mibBuilder.loadTexts: rbSystemAuthenticationMode.setDescription('')
rbSystemAuthenticationSecret = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 13), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbSystemAuthenticationSecret.setStatus('current')
if mibBuilder.loadTexts: rbSystemAuthenticationSecret.setDescription('')
rbSystemCapabilities = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 14), Bits().clone(namedValues=NamedValues(("nmsFtp", 0)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbSystemCapabilities.setStatus('current')
if mibBuilder.loadTexts: rbSystemCapabilities.setDescription('')
rbDate = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 15), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbDate.setStatus('current')
if mibBuilder.loadTexts: rbDate.setDescription('')
rbTime = MibScalar((1, 3, 6, 1, 4, 1, 31926, 1, 16), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbTime.setStatus('current')
if mibBuilder.loadTexts: rbTime.setDescription('')
rbRfTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 2, 1), )
if mibBuilder.loadTexts: rbRfTable.setStatus('current')
if mibBuilder.loadTexts: rbRfTable.setDescription('')
rbRfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rfIndex"))
if mibBuilder.loadTexts: rbRfEntry.setStatus('current')
if mibBuilder.loadTexts: rbRfEntry.setDescription('')
rfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rfIndex.setStatus('current')
if mibBuilder.loadTexts: rfIndex.setDescription('')
rfNumOfChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(1, 1), ValueRangeConstraint(2, 2), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfNumOfChannels.setStatus('current')
if mibBuilder.loadTexts: rfNumOfChannels.setDescription('')
rfChannelWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("rfWidth250", 1), ("rfWidth500", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfChannelWidth.setStatus('current')
if mibBuilder.loadTexts: rfChannelWidth.setDescription('')
rfOperationalFrequency = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfOperationalFrequency.setStatus('current')
if mibBuilder.loadTexts: rfOperationalFrequency.setDescription('')
rfRole = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfMaster", 1), ("rfSlave", 2), ("rfAuto", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfRole.setStatus('current')
if mibBuilder.loadTexts: rfRole.setDescription('')
rfModeSelector = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfModeAdaptive", 1), ("rfModeStatic", 2), ("rfModeAlign", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfModeSelector.setStatus('current')
if mibBuilder.loadTexts: rfModeSelector.setDescription('')
rfModulationType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfModulationQPSK", 1), ("rfModulationQAM-16", 2), ("rfModulationQAM-64", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfModulationType.setStatus('current')
if mibBuilder.loadTexts: rfModulationType.setDescription('')
rfNumOfSubchannels = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfNumOfSubchannels.setStatus('current')
if mibBuilder.loadTexts: rfNumOfSubchannels.setDescription('')
rfNumOfRepetitions = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(1, 1), ValueRangeConstraint(2, 2), ValueRangeConstraint(4, 4), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfNumOfRepetitions.setStatus('current')
if mibBuilder.loadTexts: rfNumOfRepetitions.setDescription('')
rfFecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfFEC-05", 1), ("rfFEC-067", 2), ("rfFEC-08", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfFecRate.setStatus('current')
if mibBuilder.loadTexts: rfFecRate.setDescription('')
rfOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 17), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfOperationalState.setStatus('current')
if mibBuilder.loadTexts: rfOperationalState.setDescription('')
rfAverageCinr = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfAverageCinr.setStatus('current')
if mibBuilder.loadTexts: rfAverageCinr.setDescription('')
rfAverageRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfAverageRssi.setStatus('current')
if mibBuilder.loadTexts: rfAverageRssi.setDescription('')
rfTxSynthLock = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("txSynthUnlock", 0), ("txSynthLock", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfTxSynthLock.setStatus('current')
if mibBuilder.loadTexts: rfTxSynthLock.setDescription('')
rfRxSynthLock = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("rxSynthUnlock", 0), ("rxSynthLock", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfRxSynthLock.setStatus('current')
if mibBuilder.loadTexts: rfRxSynthLock.setDescription('')
rfRxLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfRxLinkId.setStatus('current')
if mibBuilder.loadTexts: rfRxLinkId.setDescription('')
rfTxLinkId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfTxLinkId.setStatus('current')
if mibBuilder.loadTexts: rfTxLinkId.setDescription('')
rfTxState = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("rf-sync", 1), ("rf-searchCountdown", 2), ("rf-foundCountdown", 3), ("rf-normal", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfTxState.setStatus('current')
if mibBuilder.loadTexts: rfTxState.setDescription('')
rfRxState = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("rf-sync", 1), ("rf-searchCountdown", 2), ("rf-foundCountdown", 3), ("rf-normal", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfRxState.setStatus('current')
if mibBuilder.loadTexts: rfRxState.setDescription('')
rfTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfTemperature.setStatus('current')
if mibBuilder.loadTexts: rfTemperature.setDescription('')
rfAsymmetry = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rf-asymmetry-25tx-75rx", 1), ("rf-asymmetry-50tx-50rx", 2), ("rf-asymmetry-75tx-25rx", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfAsymmetry.setStatus('current')
if mibBuilder.loadTexts: rfAsymmetry.setDescription('')
rfLowestModulationType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfModulationQPSK", 1), ("rfModulationQAM-16", 2), ("rfModulationQAM-64", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLowestModulationType.setStatus('current')
if mibBuilder.loadTexts: rfLowestModulationType.setDescription('')
rfLowestNumOfSubchannels = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 31), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLowestNumOfSubchannels.setStatus('current')
if mibBuilder.loadTexts: rfLowestNumOfSubchannels.setDescription('')
rfLowestNumOfRepetitions = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(1, 1), ValueRangeConstraint(2, 2), ValueRangeConstraint(4, 4), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLowestNumOfRepetitions.setStatus('current')
if mibBuilder.loadTexts: rfLowestNumOfRepetitions.setDescription('')
rfLowestFecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfFEC-05", 1), ("rfFEC-067", 2), ("rfFEC-08", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLowestFecRate.setStatus('current')
if mibBuilder.loadTexts: rfLowestFecRate.setDescription('')
rfTxMute = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 34), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfTxMute.setStatus('current')
if mibBuilder.loadTexts: rfTxMute.setDescription('')
rfRoleStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfMaster", 1), ("rfSlave", 2), ("rfAuto", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfRoleStatus.setStatus('current')
if mibBuilder.loadTexts: rfRoleStatus.setDescription('')
rfLoopModeSelector = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("rfLoopDisabled", 1), ("rfLoopInternalMacSwap", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLoopModeSelector.setStatus('current')
if mibBuilder.loadTexts: rfLoopModeSelector.setDescription('')
rfLoopModulationType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 37), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfModulationQPSK", 1), ("rfModulationQAM-16", 2), ("rfModulationQAM-64", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLoopModulationType.setStatus('current')
if mibBuilder.loadTexts: rfLoopModulationType.setDescription('')
rfLoopNumOfSubchannels = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 38), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLoopNumOfSubchannels.setStatus('current')
if mibBuilder.loadTexts: rfLoopNumOfSubchannels.setDescription('')
rfLoopNumOfRepetitions = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 39), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(1, 1), ValueRangeConstraint(2, 2), ValueRangeConstraint(4, 4), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLoopNumOfRepetitions.setStatus('current')
if mibBuilder.loadTexts: rfLoopNumOfRepetitions.setDescription('')
rfLoopFecRate = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("rfFEC-05", 1), ("rfFEC-067", 2), ("rfFEC-08", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLoopFecRate.setStatus('current')
if mibBuilder.loadTexts: rfLoopFecRate.setDescription('')
rfLoopTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 41), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLoopTimeout.setStatus('current')
if mibBuilder.loadTexts: rfLoopTimeout.setDescription('')
rfTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 42), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-35, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfTxPower.setStatus('current')
if mibBuilder.loadTexts: rfTxPower.setDescription('')
rfTxMuteTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 43), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfTxMuteTimeout.setStatus('current')
if mibBuilder.loadTexts: rfTxMuteTimeout.setDescription('')
rfAlignmentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 44), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("rfAlignmentInactive", 0), ("rfAlignmentActive", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfAlignmentStatus.setStatus('current')
if mibBuilder.loadTexts: rfAlignmentStatus.setDescription('')
rfLoopDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 1, 1, 45), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("rfLoop-tx", 1), ("rfLoop-rx", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rfLoopDirection.setStatus('current')
if mibBuilder.loadTexts: rfLoopDirection.setDescription('')
rbRfStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 2, 2), )
if mibBuilder.loadTexts: rbRfStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: rbRfStatisticsTable.setDescription('')
rbRfStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rfIndex"))
if mibBuilder.loadTexts: rbRfStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: rbRfStatisticsEntry.setDescription('')
rfInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInOctets.setStatus('current')
if mibBuilder.loadTexts: rfInOctets.setDescription('')
rfInIdleOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInIdleOctets.setStatus('current')
if mibBuilder.loadTexts: rfInIdleOctets.setDescription('')
rfInGoodOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInGoodOctets.setStatus('current')
if mibBuilder.loadTexts: rfInGoodOctets.setDescription('')
rfInErroredOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInErroredOctets.setStatus('current')
if mibBuilder.loadTexts: rfInErroredOctets.setDescription('')
rfOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfOutOctets.setStatus('current')
if mibBuilder.loadTexts: rfOutOctets.setDescription('')
rfOutIdleOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfOutIdleOctets.setStatus('current')
if mibBuilder.loadTexts: rfOutIdleOctets.setDescription('')
rfInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInPkts.setStatus('current')
if mibBuilder.loadTexts: rfInPkts.setDescription('')
rfInGoodPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInGoodPkts.setStatus('current')
if mibBuilder.loadTexts: rfInGoodPkts.setDescription('')
rfInErroredPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInErroredPkts.setStatus('current')
if mibBuilder.loadTexts: rfInErroredPkts.setDescription('')
rfInLostPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfInLostPkts.setStatus('current')
if mibBuilder.loadTexts: rfInLostPkts.setDescription('')
rfOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfOutPkts.setStatus('current')
if mibBuilder.loadTexts: rfOutPkts.setDescription('')
rfMinCinr = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfMinCinr.setStatus('current')
if mibBuilder.loadTexts: rfMinCinr.setDescription('')
rfMaxCinr = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfMaxCinr.setStatus('current')
if mibBuilder.loadTexts: rfMaxCinr.setDescription('')
rfMinRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfMinRssi.setStatus('current')
if mibBuilder.loadTexts: rfMinRssi.setDescription('')
rfMaxRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfMaxRssi.setStatus('current')
if mibBuilder.loadTexts: rfMaxRssi.setDescription('')
rfMinModulation = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfMinModulation.setStatus('current')
if mibBuilder.loadTexts: rfMinModulation.setDescription(' byte # 3: see rfModulationType; byte # 2: see rfNumOfSubchannels; byte # 1: see rfNumOfRepetitions; byte # 0: see rfFecRate; ')
rfMaxModulation = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 20), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfMaxModulation.setStatus('current')
if mibBuilder.loadTexts: rfMaxModulation.setDescription(' byte # 3: see rfModulationType; byte # 2: see rfNumOfSubchannels; byte # 1: see rfNumOfRepetitions; byte # 0: see rfFecRate; ')
rfValid = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfValid.setStatus('current')
if mibBuilder.loadTexts: rfValid.setDescription('')
rfArqInLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 22), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfArqInLoss.setStatus('current')
if mibBuilder.loadTexts: rfArqInLoss.setDescription('')
rfArqOutLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 2, 1, 23), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfArqOutLoss.setStatus('current')
if mibBuilder.loadTexts: rfArqOutLoss.setDescription('')
rbRfStatisticsDaysTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 2, 3), )
if mibBuilder.loadTexts: rbRfStatisticsDaysTable.setStatus('current')
if mibBuilder.loadTexts: rbRfStatisticsDaysTable.setDescription('')
rbRfStatisticsDaysEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rfIndex"), (0, "RADIO-BRIDGE-MIB", "rfDayIndex"))
if mibBuilder.loadTexts: rbRfStatisticsDaysEntry.setStatus('current')
if mibBuilder.loadTexts: rbRfStatisticsDaysEntry.setDescription('')
rfDayIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 50), Integer32())
if mibBuilder.loadTexts: rfDayIndex.setStatus('current')
if mibBuilder.loadTexts: rfDayIndex.setDescription('')
rfDaysStart = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 51), TimeTicks())
if mibBuilder.loadTexts: rfDaysStart.setStatus('current')
if mibBuilder.loadTexts: rfDaysStart.setDescription('')
rfDaysInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInOctets.setStatus('current')
if mibBuilder.loadTexts: rfDaysInOctets.setDescription('')
rfDaysInIdleOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInIdleOctets.setStatus('current')
if mibBuilder.loadTexts: rfDaysInIdleOctets.setDescription('')
rfDaysInGoodOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInGoodOctets.setStatus('current')
if mibBuilder.loadTexts: rfDaysInGoodOctets.setDescription('')
rfDaysInErroredOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInErroredOctets.setStatus('current')
if mibBuilder.loadTexts: rfDaysInErroredOctets.setDescription('')
rfDaysOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysOutOctets.setStatus('current')
if mibBuilder.loadTexts: rfDaysOutOctets.setDescription('')
rfDaysOutIdleOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysOutIdleOctets.setStatus('current')
if mibBuilder.loadTexts: rfDaysOutIdleOctets.setDescription('')
rfDaysInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInPkts.setStatus('current')
if mibBuilder.loadTexts: rfDaysInPkts.setDescription('')
rfDaysInGoodPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInGoodPkts.setStatus('current')
if mibBuilder.loadTexts: rfDaysInGoodPkts.setDescription('')
rfDaysInErroredPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInErroredPkts.setStatus('current')
if mibBuilder.loadTexts: rfDaysInErroredPkts.setDescription('')
rfDaysInLostPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysInLostPkts.setStatus('current')
if mibBuilder.loadTexts: rfDaysInLostPkts.setDescription('')
rfDaysOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysOutPkts.setStatus('current')
if mibBuilder.loadTexts: rfDaysOutPkts.setDescription('')
rfDaysMinCinr = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysMinCinr.setStatus('current')
if mibBuilder.loadTexts: rfDaysMinCinr.setDescription('')
rfDaysMaxCinr = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysMaxCinr.setStatus('current')
if mibBuilder.loadTexts: rfDaysMaxCinr.setDescription('')
rfDaysMinRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysMinRssi.setStatus('current')
if mibBuilder.loadTexts: rfDaysMinRssi.setDescription('')
rfDaysMaxRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysMaxRssi.setStatus('current')
if mibBuilder.loadTexts: rfDaysMaxRssi.setDescription('')
rfDaysMinModulation = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysMinModulation.setStatus('current')
if mibBuilder.loadTexts: rfDaysMinModulation.setDescription(' byte # 3: see rfModulationType; byte # 2: see rfNumOfSubchannels; byte # 1: see rfNumOfRepetitions; byte # 0: see rfFecRate; ')
rfDaysMaxModulation = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 20), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysMaxModulation.setStatus('current')
if mibBuilder.loadTexts: rfDaysMaxModulation.setDescription(' byte # 3: see rfModulationType; byte # 2: see rfNumOfSubchannels; byte # 1: see rfNumOfRepetitions; byte # 0: see rfFecRate; ')
rfDaysValid = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysValid.setStatus('current')
if mibBuilder.loadTexts: rfDaysValid.setDescription('')
rfDaysArqInLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 22), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysArqInLoss.setStatus('current')
if mibBuilder.loadTexts: rfDaysArqInLoss.setDescription('')
rfDaysArqOutLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 2, 3, 1, 23), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rfDaysArqOutLoss.setStatus('current')
if mibBuilder.loadTexts: rfDaysArqOutLoss.setDescription('')
rbRefClockTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 4, 1), )
if mibBuilder.loadTexts: rbRefClockTable.setStatus('current')
if mibBuilder.loadTexts: rbRefClockTable.setDescription('')
rbRefClockEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rbRefClockEntry.setStatus('current')
if mibBuilder.loadTexts: rbRefClockEntry.setDescription('')
refClockPrio = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: refClockPrio.setStatus('current')
if mibBuilder.loadTexts: refClockPrio.setDescription('')
refClockStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("down", 0), ("active", 1), ("backup-1", 2), ("backup-2", 3), ("backup-3", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: refClockStatus.setStatus('current')
if mibBuilder.loadTexts: refClockStatus.setDescription('')
refClockQualityLevelActual = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: refClockQualityLevelActual.setStatus('current')
if mibBuilder.loadTexts: refClockQualityLevelActual.setDescription('')
refClockQualityLevelConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: refClockQualityLevelConfig.setStatus('current')
if mibBuilder.loadTexts: refClockQualityLevelConfig.setDescription('')
refClockQualityLevelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1, 5), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: refClockQualityLevelMode.setStatus('current')
if mibBuilder.loadTexts: refClockQualityLevelMode.setDescription('')
refClockSsmCvid = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: refClockSsmCvid.setStatus('current')
if mibBuilder.loadTexts: refClockSsmCvid.setDescription('')
refClockRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 4, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: refClockRowStatus.setStatus('current')
if mibBuilder.loadTexts: refClockRowStatus.setDescription('')
rbEthernetTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 5, 1), )
if mibBuilder.loadTexts: rbEthernetTable.setStatus('current')
if mibBuilder.loadTexts: rbEthernetTable.setDescription('')
rbEthernetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 5, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rbEthernetEntry.setStatus('current')
if mibBuilder.loadTexts: rbEthernetEntry.setDescription('')
ethernetAlarmPropagation = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 5, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("disabled", 0), ("backward", 1), ("forward", 2), ("both-direct", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ethernetAlarmPropagation.setStatus('current')
if mibBuilder.loadTexts: ethernetAlarmPropagation.setDescription('')
ethernetLoopMode = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 5, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("disabled", 0), ("external", 1), ("external-mac-swap", 2), ("internal", 3), ("internal-mac-swap", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ethernetLoopMode.setStatus('current')
if mibBuilder.loadTexts: ethernetLoopMode.setDescription('')
ethernetLoopTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 5, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ethernetLoopTimeout.setStatus('current')
if mibBuilder.loadTexts: ethernetLoopTimeout.setDescription('')
ethernetNetworkType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 5, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("provider-nni", 1), ("customer-uni", 2), ("customer-nni", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ethernetNetworkType.setStatus('current')
if mibBuilder.loadTexts: ethernetNetworkType.setDescription('')
ethernetPcpWriteProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 5, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ethernetPcpWriteProfileId.setStatus('current')
if mibBuilder.loadTexts: ethernetPcpWriteProfileId.setDescription('id of pcp write profile or none (0)')
ethernetClassifierMode = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 5, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("classifier-mode-dscp", 1), ("classifier-mode-pcp-dscp", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ethernetClassifierMode.setStatus('current')
if mibBuilder.loadTexts: ethernetClassifierMode.setDescription('')
rbClassifierCosTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 6, 1), )
if mibBuilder.loadTexts: rbClassifierCosTable.setStatus('current')
if mibBuilder.loadTexts: rbClassifierCosTable.setDescription('')
rbClassifierCosEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "classifierCosId"))
if mibBuilder.loadTexts: rbClassifierCosEntry.setStatus('current')
if mibBuilder.loadTexts: rbClassifierCosEntry.setDescription('')
classifierCosId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 248)))
if mibBuilder.loadTexts: classifierCosId.setStatus('current')
if mibBuilder.loadTexts: classifierCosId.setDescription('')
classifierCosPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 2), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosPortList.setStatus('current')
if mibBuilder.loadTexts: classifierCosPortList.setDescription('')
classifierCosPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosPrecedence.setStatus('current')
if mibBuilder.loadTexts: classifierCosPrecedence.setDescription('')
classifierCosVidList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 4), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosVidList.setStatus('current')
if mibBuilder.loadTexts: classifierCosVidList.setDescription('')
classifierCosPcpList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 5), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosPcpList.setStatus('current')
if mibBuilder.loadTexts: classifierCosPcpList.setDescription('')
classifierCosCos = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosCos.setStatus('current')
if mibBuilder.loadTexts: classifierCosCos.setDescription('')
classifierCosIpCosType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ip-cos-dscp", 1), ("ip-cos-mpls", 2), ("ip-cos-dont-care", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosIpCosType.setStatus('current')
if mibBuilder.loadTexts: classifierCosIpCosType.setDescription('')
classifierCosIpCosList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 8), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosIpCosList.setStatus('current')
if mibBuilder.loadTexts: classifierCosIpCosList.setDescription('')
classifierCosRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosRowStatus.setStatus('current')
if mibBuilder.loadTexts: classifierCosRowStatus.setDescription('')
classifierCosPacketType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unicast", 1), ("non-unicast", 2), ("all", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierCosPacketType.setStatus('current')
if mibBuilder.loadTexts: classifierCosPacketType.setDescription('')
rbClassifierEvcTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 6, 2), )
if mibBuilder.loadTexts: rbClassifierEvcTable.setStatus('current')
if mibBuilder.loadTexts: rbClassifierEvcTable.setDescription('')
rbClassifierEvcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "classifierEvcId"))
if mibBuilder.loadTexts: rbClassifierEvcEntry.setStatus('current')
if mibBuilder.loadTexts: rbClassifierEvcEntry.setDescription('')
classifierEvcId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 248)))
if mibBuilder.loadTexts: classifierEvcId.setStatus('current')
if mibBuilder.loadTexts: classifierEvcId.setDescription('')
classifierEvcPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 2), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcPortList.setStatus('current')
if mibBuilder.loadTexts: classifierEvcPortList.setDescription('')
classifierEvcPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcPrecedence.setStatus('current')
if mibBuilder.loadTexts: classifierEvcPrecedence.setDescription('')
classifierEvcVidList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 4), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcVidList.setStatus('current')
if mibBuilder.loadTexts: classifierEvcVidList.setDescription('')
classifierEvcPcpList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 5), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcPcpList.setStatus('current')
if mibBuilder.loadTexts: classifierEvcPcpList.setDescription('')
classifierEvcEvc = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcEvc.setStatus('current')
if mibBuilder.loadTexts: classifierEvcEvc.setDescription('')
classifierEvcIpCosType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ip-cos-dscp", 1), ("ip-cos-mpls", 2), ("ip-cos-dont-care", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcIpCosType.setStatus('current')
if mibBuilder.loadTexts: classifierEvcIpCosType.setDescription('')
classifierEvcIpCosList = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 8), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcIpCosList.setStatus('current')
if mibBuilder.loadTexts: classifierEvcIpCosList.setDescription('')
classifierEvcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcRowStatus.setStatus('current')
if mibBuilder.loadTexts: classifierEvcRowStatus.setDescription('')
classifierEvcPacketType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 6, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unicast", 1), ("non-unicast", 2), ("all", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: classifierEvcPacketType.setStatus('current')
if mibBuilder.loadTexts: classifierEvcPacketType.setDescription('')
rbQosIngressQueueTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 7, 1), )
if mibBuilder.loadTexts: rbQosIngressQueueTable.setStatus('current')
if mibBuilder.loadTexts: rbQosIngressQueueTable.setDescription('')
rbQosIngressQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 7, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "qosIngressQueueEvcId"), (0, "RADIO-BRIDGE-MIB", "qosIngressQueueCosId"))
if mibBuilder.loadTexts: rbQosIngressQueueEntry.setStatus('current')
if mibBuilder.loadTexts: rbQosIngressQueueEntry.setDescription('')
qosIngressQueueEvcId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 7, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: qosIngressQueueEvcId.setStatus('current')
if mibBuilder.loadTexts: qosIngressQueueEvcId.setDescription('')
qosIngressQueueCosId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 7, 1, 1, 2), Integer32())
if mibBuilder.loadTexts: qosIngressQueueCosId.setStatus('current')
if mibBuilder.loadTexts: qosIngressQueueCosId.setDescription('')
qosIngressQueueMeterId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 7, 1, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: qosIngressQueueMeterId.setStatus('current')
if mibBuilder.loadTexts: qosIngressQueueMeterId.setDescription('')
qosIngressQueueMarking = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 7, 1, 1, 4), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: qosIngressQueueMarking.setStatus('current')
if mibBuilder.loadTexts: qosIngressQueueMarking.setDescription('')
qosIngressQueueRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 7, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: qosIngressQueueRowStatus.setStatus('current')
if mibBuilder.loadTexts: qosIngressQueueRowStatus.setDescription('This object indicates the status of this entry.')
rbQosEgressQueueTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 8, 1), )
if mibBuilder.loadTexts: rbQosEgressQueueTable.setStatus('current')
if mibBuilder.loadTexts: rbQosEgressQueueTable.setDescription('')
rbQosEgressQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "qosEgressQueuePortNum"), (0, "RADIO-BRIDGE-MIB", "qosEgressQueueCosId"))
if mibBuilder.loadTexts: rbQosEgressQueueEntry.setStatus('current')
if mibBuilder.loadTexts: rbQosEgressQueueEntry.setDescription('')
qosEgressQueuePortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: qosEgressQueuePortNum.setStatus('current')
if mibBuilder.loadTexts: qosEgressQueuePortNum.setDescription('')
qosEgressQueueCosId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1, 2), Integer32())
if mibBuilder.loadTexts: qosEgressQueueCosId.setStatus('current')
if mibBuilder.loadTexts: qosEgressQueueCosId.setDescription('')
qosEgressQueueWfqWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: qosEgressQueueWfqWeight.setStatus('current')
if mibBuilder.loadTexts: qosEgressQueueWfqWeight.setDescription('')
qosEgressQueueCir = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: qosEgressQueueCir.setStatus('current')
if mibBuilder.loadTexts: qosEgressQueueCir.setDescription('')
qosEgressQueueMode = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("strictPriority", 1), ("wfg", 2), ("priority-shaper", 3), ("wfq-shaper", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: qosEgressQueueMode.setStatus('current')
if mibBuilder.loadTexts: qosEgressQueueMode.setDescription('')
qosEgressQueueColorDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("color-aware", 1), ("color-drop", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: qosEgressQueueColorDrop.setStatus('current')
if mibBuilder.loadTexts: qosEgressQueueColorDrop.setDescription('')
qosEgressDropMode = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 8, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: qosEgressDropMode.setStatus('current')
if mibBuilder.loadTexts: qosEgressDropMode.setDescription('if negative then wred id, else queue length in microseconds')
rbIpTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 9, 1), )
if mibBuilder.loadTexts: rbIpTable.setStatus('current')
if mibBuilder.loadTexts: rbIpTable.setDescription('')
rbIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbIpIndex"))
if mibBuilder.loadTexts: rbIpEntry.setStatus('current')
if mibBuilder.loadTexts: rbIpEntry.setDescription('')
rbIpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rbIpIndex.setStatus('current')
if mibBuilder.loadTexts: rbIpIndex.setDescription('')
rbIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbIpAddress.setStatus('current')
if mibBuilder.loadTexts: rbIpAddress.setDescription('')
rbIpPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbIpPrefixLen.setStatus('current')
if mibBuilder.loadTexts: rbIpPrefixLen.setDescription('')
rbIpVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbIpVlanId.setStatus('current')
if mibBuilder.loadTexts: rbIpVlanId.setDescription('')
rbIpRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbIpRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbIpRowStatus.setDescription('This object indicates the status of this entry.')
rbIpType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ip-static", 1), ("ip-dhcp", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbIpType.setStatus('current')
if mibBuilder.loadTexts: rbIpType.setDescription('')
rbIpGateway = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 9, 1, 1, 7), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbIpGateway.setStatus('current')
if mibBuilder.loadTexts: rbIpGateway.setDescription('')
rbPeerMep = MibTable((1, 3, 6, 1, 4, 1, 31926, 10, 1), )
if mibBuilder.loadTexts: rbPeerMep.setStatus('current')
if mibBuilder.loadTexts: rbPeerMep.setDescription('')
rbPeerMepEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbMdIndex"), (0, "RADIO-BRIDGE-MIB", "rbMaIndex"), (0, "RADIO-BRIDGE-MIB", "rbMepId"), (0, "RADIO-BRIDGE-MIB", "rbPeerMepId"))
if mibBuilder.loadTexts: rbPeerMepEntry.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepEntry.setDescription('')
rbMdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rbMdIndex.setStatus('current')
if mibBuilder.loadTexts: rbMdIndex.setDescription('')
rbMaIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 2), Integer32())
if mibBuilder.loadTexts: rbMaIndex.setStatus('current')
if mibBuilder.loadTexts: rbMaIndex.setDescription('')
rbMepId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 3), Integer32())
if mibBuilder.loadTexts: rbMepId.setStatus('current')
if mibBuilder.loadTexts: rbMepId.setDescription('')
rbPeerMepId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 4), Integer32())
if mibBuilder.loadTexts: rbPeerMepId.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepId.setDescription('')
rbPeerMepFarEndLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 5), Counter64())
if mibBuilder.loadTexts: rbPeerMepFarEndLoss.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepFarEndLoss.setDescription('')
rbPeerMepNearEndLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 6), Counter64())
if mibBuilder.loadTexts: rbPeerMepNearEndLoss.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepNearEndLoss.setDescription('')
rbPeerMepTotalTxFarEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 7), Counter64())
if mibBuilder.loadTexts: rbPeerMepTotalTxFarEnd.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepTotalTxFarEnd.setDescription('')
rbPeerMepTotalTxNearEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 8), Counter64())
if mibBuilder.loadTexts: rbPeerMepTotalTxNearEnd.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepTotalTxNearEnd.setDescription('')
rbPeerMepFrameDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 9), Counter64())
if mibBuilder.loadTexts: rbPeerMepFrameDelay.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepFrameDelay.setDescription('')
rbPeerMepFrameDelayVariation = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 1, 1, 10), Counter64())
if mibBuilder.loadTexts: rbPeerMepFrameDelayVariation.setStatus('current')
if mibBuilder.loadTexts: rbPeerMepFrameDelayVariation.setDescription('')
rbMep = MibTable((1, 3, 6, 1, 4, 1, 31926, 10, 2), )
if mibBuilder.loadTexts: rbMep.setStatus('current')
if mibBuilder.loadTexts: rbMep.setDescription('')
rbMepEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 10, 2, 1), )
dot1agCfmMepEntry.registerAugmentions(("RADIO-BRIDGE-MIB", "rbMepEntry"))
rbMepEntry.setIndexNames(*dot1agCfmMepEntry.getIndexNames())
if mibBuilder.loadTexts: rbMepEntry.setStatus('current')
if mibBuilder.loadTexts: rbMepEntry.setDescription('')
rbMepAisEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 2, 1, 1), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMepAisEnable.setStatus('current')
if mibBuilder.loadTexts: rbMepAisEnable.setDescription('')
rbMepAisPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 6))).clone(namedValues=NamedValues(("aisPeriod-1-sec", 4), ("aisPeriod-1-min", 6)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMepAisPeriod.setStatus('current')
if mibBuilder.loadTexts: rbMepAisPeriod.setDescription('')
rbMepAisSuppress = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 2, 1, 3), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMepAisSuppress.setStatus('current')
if mibBuilder.loadTexts: rbMepAisSuppress.setDescription('')
rbMepAisLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 2, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMepAisLevel.setStatus('current')
if mibBuilder.loadTexts: rbMepAisLevel.setDescription('')
rbMepAisDefects = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 10, 2, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbMepAisDefects.setStatus('current')
if mibBuilder.loadTexts: rbMepAisDefects.setDescription('')
class AlarmSeverity(TextualConvention, Integer32):
description = ''
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("critical", 1), ("major", 2), ("minor", 3), ("warning", 4), ("no-alarm", 5))
class AlarmType(TextualConvention, Integer32):
description = ''
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
namedValues = NamedValues(("link-down", 1), ("temperature-out-of-range", 2), ("synthesizer-unlock", 3), ("pow-low", 4), ("cfm-mep-defect", 5), ("loopback-active", 6), ("tx-mute", 7), ("ql-eec1-or-worse", 8), ("poe-incompatible", 9), ("rssi-out-of-range", 10), ("cinr-out-of-range", 11), ("lowest-modulation", 12))
rbAlarmsCommon = MibIdentifier((1, 3, 6, 1, 4, 1, 31926, 11, 1))
rbCurrentAlarmChangeCounter = MibScalar((1, 3, 6, 1, 4, 1, 31926, 11, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmChangeCounter.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmChangeCounter.setDescription('The counter is initialized by random number on power-up and incremented on each change in the current alarms table: alarm addition or deletion.')
rbCurrentAlarmMostSevere = MibScalar((1, 3, 6, 1, 4, 1, 31926, 11, 1, 2), AlarmSeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmMostSevere.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmMostSevere.setDescription('The severity of the most severe alarm in the system')
rbCurrentAlarmLastIndex = MibScalar((1, 3, 6, 1, 4, 1, 31926, 11, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmLastIndex.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmLastIndex.setDescription('The counter is initialized by random number on power-up and incremented when alarm is added to the alarms table. It is used as alarm index in current alarms table.')
rbCurrentAlarmLastTrapType = MibScalar((1, 3, 6, 1, 4, 1, 31926, 11, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("alarm-up", 1), ("alarm-down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmLastTrapType.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmLastTrapType.setDescription('Type of last alarm trap.')
rbCurrentAlarmSourceAddr = MibScalar((1, 3, 6, 1, 4, 1, 31926, 11, 1, 10), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmSourceAddr.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmSourceAddr.setDescription('Alarm source IP Address.')
rbCurrentAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 11, 2), )
if mibBuilder.loadTexts: rbCurrentAlarmTable.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmTable.setDescription('Current alarms table.')
rbCurrentAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbCurrentAlarmIndex"))
if mibBuilder.loadTexts: rbCurrentAlarmEntry.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmEntry.setDescription('')
rbCurrentAlarmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmIndex.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmIndex.setDescription('Value of the rbCurrentAlarmLastIndex when alarm is inserted to the table.')
rbCurrentAlarmType = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 2), AlarmType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmType.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmType.setDescription('see AlarmType definition')
rbCurrentAlarmTypeName = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmTypeName.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmTypeName.setDescription('string presentation of the rbCurrentAlarmType')
rbCurrentAlarmSource = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmSource.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmSource.setDescription('name of the managed object originating the alarm: eth host, system, vlan s1 5 etc.')
rbCurrentAlarmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 5), AlarmSeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmSeverity.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmSeverity.setDescription('see AlarmSeverity definition')
rbCurrentAlarmRaisedTime = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmRaisedTime.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmRaisedTime.setDescription('')
rbCurrentAlarmDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmDesc.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmDesc.setDescription('alarm description')
rbCurrentAlarmCause = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmCause.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmCause.setDescription('alarm probably cause')
rbCurrentAlarmAction = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmAction.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmAction.setDescription('alarm corrective actions')
rbCurrentAlarmIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 11, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbCurrentAlarmIfIndex.setStatus('current')
if mibBuilder.loadTexts: rbCurrentAlarmIfIndex.setDescription('port ifIndex if port is the alarm source, -1 otherwise')
trapModulationChange = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 1)).setObjects(("RADIO-BRIDGE-MIB", "rfModulationType"), ("RADIO-BRIDGE-MIB", "rfNumOfSubchannels"), ("RADIO-BRIDGE-MIB", "rfNumOfRepetitions"), ("RADIO-BRIDGE-MIB", "rfFecRate"))
if mibBuilder.loadTexts: trapModulationChange.setStatus('current')
if mibBuilder.loadTexts: trapModulationChange.setDescription('')
trapTemperatureOutOfRange = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 2))
if mibBuilder.loadTexts: trapTemperatureOutOfRange.setStatus('current')
if mibBuilder.loadTexts: trapTemperatureOutOfRange.setDescription('')
trapTemperatureInRange = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 3))
if mibBuilder.loadTexts: trapTemperatureInRange.setStatus('current')
if mibBuilder.loadTexts: trapTemperatureInRange.setDescription('')
trapSfpIn = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 4)).setObjects(("IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: trapSfpIn.setStatus('current')
if mibBuilder.loadTexts: trapSfpIn.setDescription('')
trapSfpOut = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 5)).setObjects(("IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: trapSfpOut.setStatus('current')
if mibBuilder.loadTexts: trapSfpOut.setDescription('')
trapRefClockChanged = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 6)).setObjects(("IF-MIB", "ifIndex"), ("RADIO-BRIDGE-MIB", "refClockQualityLevelActual"))
if mibBuilder.loadTexts: trapRefClockChanged.setStatus('current')
if mibBuilder.loadTexts: trapRefClockChanged.setDescription('')
trapCurrentAlarm = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 11)).setObjects(("RADIO-BRIDGE-MIB", "rbCurrentAlarmChangeCounter"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmMostSevere"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmType"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmTypeName"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmSourceAddr"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmSource"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmSeverity"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmRaisedTime"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmIfIndex"), ("RADIO-BRIDGE-MIB", "rbCurrentAlarmLastTrapType"))
if mibBuilder.loadTexts: trapCurrentAlarm.setStatus('current')
if mibBuilder.loadTexts: trapCurrentAlarm.setDescription('')
trapLoopEnabled = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 12)).setObjects(("IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: trapLoopEnabled.setStatus('current')
if mibBuilder.loadTexts: trapLoopEnabled.setDescription('')
trapLoopDisabled = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 13)).setObjects(("IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: trapLoopDisabled.setStatus('current')
if mibBuilder.loadTexts: trapLoopDisabled.setDescription('')
trapTxMuteEnabled = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 14))
if mibBuilder.loadTexts: trapTxMuteEnabled.setStatus('current')
if mibBuilder.loadTexts: trapTxMuteEnabled.setDescription('')
trapTxMuteDisabled = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 15))
if mibBuilder.loadTexts: trapTxMuteDisabled.setStatus('current')
if mibBuilder.loadTexts: trapTxMuteDisabled.setDescription('')
trapCinrOutOfRange = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 19))
if mibBuilder.loadTexts: trapCinrOutOfRange.setStatus('current')
if mibBuilder.loadTexts: trapCinrOutOfRange.setDescription('')
trapCinrInRange = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 20))
if mibBuilder.loadTexts: trapCinrInRange.setStatus('current')
if mibBuilder.loadTexts: trapCinrInRange.setDescription('')
trapRssiOutOfRange = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 21))
if mibBuilder.loadTexts: trapRssiOutOfRange.setStatus('current')
if mibBuilder.loadTexts: trapRssiOutOfRange.setDescription('')
trapRssiInRange = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 22))
if mibBuilder.loadTexts: trapRssiInRange.setStatus('current')
if mibBuilder.loadTexts: trapRssiInRange.setDescription('')
trapLowestModulation = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 23))
if mibBuilder.loadTexts: trapLowestModulation.setStatus('current')
if mibBuilder.loadTexts: trapLowestModulation.setDescription('')
trapNoLowestModulation = NotificationType((1, 3, 6, 1, 4, 1, 31926, 3, 24))
if mibBuilder.loadTexts: trapNoLowestModulation.setStatus('current')
if mibBuilder.loadTexts: trapNoLowestModulation.setDescription('')
rbSchedulerMode = MibScalar((1, 3, 6, 1, 4, 1, 31926, 12, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("strictPriority", 1), ("wfg", 2), ("priority-shaper", 3), ("wfq-shaper", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbSchedulerMode.setStatus('current')
if mibBuilder.loadTexts: rbSchedulerMode.setDescription('')
rbMeterTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 14, 1), )
if mibBuilder.loadTexts: rbMeterTable.setStatus('current')
if mibBuilder.loadTexts: rbMeterTable.setDescription('')
rbMeterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbMeterId"))
if mibBuilder.loadTexts: rbMeterEntry.setStatus('current')
if mibBuilder.loadTexts: rbMeterEntry.setDescription('')
rbMeterId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 248)))
if mibBuilder.loadTexts: rbMeterId.setStatus('current')
if mibBuilder.loadTexts: rbMeterId.setDescription('')
rbMeterCir = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMeterCir.setStatus('current')
if mibBuilder.loadTexts: rbMeterCir.setDescription('')
rbMeterCbs = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(9216, 50000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMeterCbs.setStatus('current')
if mibBuilder.loadTexts: rbMeterCbs.setDescription('')
rbMeterEir = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMeterEir.setStatus('current')
if mibBuilder.loadTexts: rbMeterEir.setDescription('')
rbMeterEbs = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(9216, 100000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMeterEbs.setStatus('current')
if mibBuilder.loadTexts: rbMeterEbs.setDescription('')
rbMeterColorMode = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("color-aware", 1), ("color-blind", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMeterColorMode.setStatus('current')
if mibBuilder.loadTexts: rbMeterColorMode.setDescription('')
rbMeterRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 14, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbMeterRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbMeterRowStatus.setDescription('')
rbEventConfigTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 15, 1), )
if mibBuilder.loadTexts: rbEventConfigTable.setStatus('current')
if mibBuilder.loadTexts: rbEventConfigTable.setDescription('')
rbEventConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 15, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbEventConfigIndex"))
if mibBuilder.loadTexts: rbEventConfigEntry.setStatus('current')
if mibBuilder.loadTexts: rbEventConfigEntry.setDescription('')
rbEventConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 15, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rbEventConfigIndex.setStatus('current')
if mibBuilder.loadTexts: rbEventConfigIndex.setDescription('')
rbEventConfigId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 15, 1, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbEventConfigId.setStatus('current')
if mibBuilder.loadTexts: rbEventConfigId.setDescription('')
rbEventConfigMask = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 15, 1, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbEventConfigMask.setStatus('current')
if mibBuilder.loadTexts: rbEventConfigMask.setDescription('')
rbRfEncryption = MibScalar((1, 3, 6, 1, 4, 1, 31926, 13, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbRfEncryption.setStatus('current')
if mibBuilder.loadTexts: rbRfEncryption.setDescription('')
rbRfStaticKey = MibScalar((1, 3, 6, 1, 4, 1, 31926, 13, 2), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbRfStaticKey.setStatus('current')
if mibBuilder.loadTexts: rbRfStaticKey.setDescription('')
rbRfAuthenticationString = MibScalar((1, 3, 6, 1, 4, 1, 31926, 13, 3), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbRfAuthenticationString.setStatus('current')
if mibBuilder.loadTexts: rbRfAuthenticationString.setDescription('')
rbAgentReadCommunity = MibScalar((1, 3, 6, 1, 4, 1, 31926, 17, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbAgentReadCommunity.setStatus('current')
if mibBuilder.loadTexts: rbAgentReadCommunity.setDescription('')
rbAgentWriteCommunity = MibScalar((1, 3, 6, 1, 4, 1, 31926, 17, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbAgentWriteCommunity.setStatus('current')
if mibBuilder.loadTexts: rbAgentWriteCommunity.setDescription('')
rbAgentSnmpVersion = MibScalar((1, 3, 6, 1, 4, 1, 31926, 17, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("v2c", 2), ("v3", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbAgentSnmpVersion.setStatus('current')
if mibBuilder.loadTexts: rbAgentSnmpVersion.setDescription('')
rbSysFileOperationTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 18), )
if mibBuilder.loadTexts: rbSysFileOperationTable.setStatus('current')
if mibBuilder.loadTexts: rbSysFileOperationTable.setDescription('This table has a permanent row with index 1. It is not creatable, the fileSessionRowStatus is used to activate the file operation process if necessary variables are assigned.')
rbSysFileOperationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 18, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "fileSessionIndex"))
if mibBuilder.loadTexts: rbSysFileOperationEntry.setStatus('current')
if mibBuilder.loadTexts: rbSysFileOperationEntry.setDescription('')
fileSessionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 1), Integer32())
if mibBuilder.loadTexts: fileSessionIndex.setStatus('current')
if mibBuilder.loadTexts: fileSessionIndex.setDescription('')
fileSessionCommand = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13))).clone(namedValues=NamedValues(("copySwFromRemote", 1), ("copyLicenseFromRemote", 2), ("copyFileFromRemoteToLocal", 3), ("copyFileFromLocalToRemote", 4), ("acceptSw", 5), ("runSw", 6), ("copyDirToRemote", 7), ("copyEventLog", 9), ("copyUserActivityLog", 10), ("runScript", 11), ("copyInventory", 12), ("copyStatsHistory", 13)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fileSessionCommand.setStatus('current')
if mibBuilder.loadTexts: fileSessionCommand.setDescription('')
fileSessionLocalParams = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 3), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fileSessionLocalParams.setStatus('current')
if mibBuilder.loadTexts: fileSessionLocalParams.setDescription('')
fileSessionRemotePath = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 4), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fileSessionRemotePath.setStatus('current')
if mibBuilder.loadTexts: fileSessionRemotePath.setDescription('')
fileSessionServer = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 5), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fileSessionServer.setStatus('current')
if mibBuilder.loadTexts: fileSessionServer.setDescription('')
fileSessionUser = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 6), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fileSessionUser.setStatus('current')
if mibBuilder.loadTexts: fileSessionUser.setDescription('')
fileSessionPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 7), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fileSessionPassword.setStatus('current')
if mibBuilder.loadTexts: fileSessionPassword.setDescription('')
fileSessionResult = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fileSessionResult.setStatus('current')
if mibBuilder.loadTexts: fileSessionResult.setDescription('')
fileSessionState = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("running", 1), ("terminated-ok", 2), ("terminated-error", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fileSessionState.setStatus('current')
if mibBuilder.loadTexts: fileSessionState.setDescription('')
fileSessionRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: fileSessionRowStatus.setStatus('current')
if mibBuilder.loadTexts: fileSessionRowStatus.setDescription('see rbSysFileOperationTable description')
fileSessionProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 18, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ftp", 1), ("sftp", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fileSessionProtocol.setStatus('current')
if mibBuilder.loadTexts: fileSessionProtocol.setDescription('')
rbLldpPortExtensionTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 19, 1), )
if mibBuilder.loadTexts: rbLldpPortExtensionTable.setStatus('current')
if mibBuilder.loadTexts: rbLldpPortExtensionTable.setDescription('extends lldpV2PortConfigTable')
rbLldpPortExtensionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 19, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbLldpPortIfIndex"), (0, "RADIO-BRIDGE-MIB", "rbLldpPortDestAddressIndex"))
if mibBuilder.loadTexts: rbLldpPortExtensionEntry.setStatus('current')
if mibBuilder.loadTexts: rbLldpPortExtensionEntry.setDescription('')
rbLldpPortIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 19, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: rbLldpPortIfIndex.setStatus('current')
if mibBuilder.loadTexts: rbLldpPortIfIndex.setDescription('equal to lldpV2PortConfigIfIndex from RbLldpPortExtensionEntry')
rbLldpPortDestAddressIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 19, 1, 1, 2), Unsigned32())
if mibBuilder.loadTexts: rbLldpPortDestAddressIndex.setStatus('current')
if mibBuilder.loadTexts: rbLldpPortDestAddressIndex.setDescription('equal to lldpV2PortConfigDestAddressIndex from RbLldpPortExtensionEntry')
rbLldpPortVid = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 19, 1, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbLldpPortVid.setStatus('current')
if mibBuilder.loadTexts: rbLldpPortVid.setDescription('')
rbWredTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 20, 1), )
if mibBuilder.loadTexts: rbWredTable.setStatus('current')
if mibBuilder.loadTexts: rbWredTable.setDescription('')
rbWredEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbWredId"))
if mibBuilder.loadTexts: rbWredEntry.setStatus('current')
if mibBuilder.loadTexts: rbWredEntry.setDescription('')
rbWredId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rbWredId.setStatus('current')
if mibBuilder.loadTexts: rbWredId.setDescription('')
rbWredNfactor = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredNfactor.setStatus('current')
if mibBuilder.loadTexts: rbWredNfactor.setDescription('')
rbWredMinThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredMinThreshold.setStatus('current')
if mibBuilder.loadTexts: rbWredMinThreshold.setDescription('')
rbWredMaxThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredMaxThreshold.setStatus('current')
if mibBuilder.loadTexts: rbWredMaxThreshold.setDescription('')
rbWredProbability = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredProbability.setStatus('current')
if mibBuilder.loadTexts: rbWredProbability.setDescription('')
rbWredMinThresholdYellow = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredMinThresholdYellow.setStatus('current')
if mibBuilder.loadTexts: rbWredMinThresholdYellow.setDescription('')
rbWredMaxThresholdYellow = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 7), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredMaxThresholdYellow.setStatus('current')
if mibBuilder.loadTexts: rbWredMaxThresholdYellow.setDescription('')
rbWredProbabilityYellow = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredProbabilityYellow.setStatus('current')
if mibBuilder.loadTexts: rbWredProbabilityYellow.setDescription('')
rbWredRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 20, 1, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbWredRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbWredRowStatus.setDescription('')
rbAuthServersTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 21, 1), )
if mibBuilder.loadTexts: rbAuthServersTable.setStatus('current')
if mibBuilder.loadTexts: rbAuthServersTable.setDescription('')
rbAuthServersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 21, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbAuthServerId"))
if mibBuilder.loadTexts: rbAuthServersEntry.setStatus('current')
if mibBuilder.loadTexts: rbAuthServersEntry.setDescription('')
rbAuthServerId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 21, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5)))
if mibBuilder.loadTexts: rbAuthServerId.setStatus('current')
if mibBuilder.loadTexts: rbAuthServerId.setDescription('')
rbAuthServerIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 21, 1, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbAuthServerIpAddress.setStatus('current')
if mibBuilder.loadTexts: rbAuthServerIpAddress.setDescription('')
rbAuthServerPort = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 21, 1, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbAuthServerPort.setStatus('current')
if mibBuilder.loadTexts: rbAuthServerPort.setDescription('')
rbAuthServerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 21, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbAuthServerRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbAuthServerRowStatus.setDescription('')
rbFdbQuotaTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 22, 1), )
if mibBuilder.loadTexts: rbFdbQuotaTable.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaTable.setDescription('')
rbFdbQuotaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbFdbQuotaId"))
if mibBuilder.loadTexts: rbFdbQuotaEntry.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaEntry.setDescription('')
rbFdbQuotaId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: rbFdbQuotaId.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaId.setDescription('')
rbFdbQuotaSize = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbFdbQuotaSize.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaSize.setDescription('')
rbFdbQuotaRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbFdbQuotaRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaRowStatus.setDescription('')
rbFdbQuotaMaxSize = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbFdbQuotaMaxSize.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaMaxSize.setDescription('')
rbFdbQuotaUsedEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbFdbQuotaUsedEntries.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaUsedEntries.setDescription('')
rbFdbQuotaStaticEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbFdbQuotaStaticEntries.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaStaticEntries.setDescription('')
rbFdbQuotaDynamicEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbFdbQuotaDynamicEntries.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaDynamicEntries.setDescription('')
rbFdbQuotaUnusedEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 1, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbFdbQuotaUnusedEntries.setStatus('current')
if mibBuilder.loadTexts: rbFdbQuotaUnusedEntries.setDescription('')
rbFdbEvcQuotaTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 22, 2), )
if mibBuilder.loadTexts: rbFdbEvcQuotaTable.setStatus('current')
if mibBuilder.loadTexts: rbFdbEvcQuotaTable.setDescription('')
rbFdbEvcQuotaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 22, 2, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbFdbEvcQuotaId"))
if mibBuilder.loadTexts: rbFdbEvcQuotaEntry.setStatus('current')
if mibBuilder.loadTexts: rbFdbEvcQuotaEntry.setDescription('')
rbFdbEvcQuotaId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: rbFdbEvcQuotaId.setStatus('current')
if mibBuilder.loadTexts: rbFdbEvcQuotaId.setDescription('')
rbRefEvcId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 2, 1, 2), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbRefEvcId.setStatus('current')
if mibBuilder.loadTexts: rbRefEvcId.setDescription('')
rbRefFdbQuotaId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 2, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbRefFdbQuotaId.setStatus('current')
if mibBuilder.loadTexts: rbRefFdbQuotaId.setDescription('')
rbFdbEvcQuotaRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbFdbEvcQuotaRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbFdbEvcQuotaRowStatus.setDescription('')
rbFdbExtensionTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 22, 3), )
if mibBuilder.loadTexts: rbFdbExtensionTable.setStatus('current')
if mibBuilder.loadTexts: rbFdbExtensionTable.setDescription('extends the ieee8021QBridgeTpFdbTable')
rbFdbExtensionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 22, 3, 1), )
ieee8021QBridgeTpFdbEntry.registerAugmentions(("RADIO-BRIDGE-MIB", "rbFdbExtensionEntry"))
rbFdbExtensionEntry.setIndexNames(*ieee8021QBridgeTpFdbEntry.getIndexNames())
if mibBuilder.loadTexts: rbFdbExtensionEntry.setStatus('current')
if mibBuilder.loadTexts: rbFdbExtensionEntry.setDescription('An entry containing additional management information applicable to a fdb entry.')
rbRefExtFdbQuotaId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 22, 3, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbRefExtFdbQuotaId.setStatus('current')
if mibBuilder.loadTexts: rbRefExtFdbQuotaId.setDescription('')
rbPcpWriteProfileTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 23, 1), )
if mibBuilder.loadTexts: rbPcpWriteProfileTable.setStatus('current')
if mibBuilder.loadTexts: rbPcpWriteProfileTable.setDescription('')
rbPcpWriteProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 23, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbPcpWriteProfileId"))
if mibBuilder.loadTexts: rbPcpWriteProfileEntry.setStatus('current')
if mibBuilder.loadTexts: rbPcpWriteProfileEntry.setDescription('')
rbPcpWriteProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 23, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)))
if mibBuilder.loadTexts: rbPcpWriteProfileId.setStatus('current')
if mibBuilder.loadTexts: rbPcpWriteProfileId.setDescription('')
rbPcpWriteProfilePcp = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 23, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbPcpWriteProfilePcp.setStatus('current')
if mibBuilder.loadTexts: rbPcpWriteProfilePcp.setDescription('')
rbPcpWriteProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 23, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbPcpWriteProfileRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbPcpWriteProfileRowStatus.setDescription('')
rbSyslogTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 24, 1), )
if mibBuilder.loadTexts: rbSyslogTable.setStatus('current')
if mibBuilder.loadTexts: rbSyslogTable.setDescription('')
rbSyslogEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 24, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbSyslogId"))
if mibBuilder.loadTexts: rbSyslogEntry.setStatus('current')
if mibBuilder.loadTexts: rbSyslogEntry.setDescription('')
rbSyslogId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 24, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rbSyslogId.setStatus('current')
if mibBuilder.loadTexts: rbSyslogId.setDescription('')
rbSyslogServerIp = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 24, 1, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbSyslogServerIp.setStatus('current')
if mibBuilder.loadTexts: rbSyslogServerIp.setDescription('')
rbSyslogRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 24, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbSyslogRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbSyslogRowStatus.setDescription('')
rbNtpTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 25, 1), )
if mibBuilder.loadTexts: rbNtpTable.setStatus('current')
if mibBuilder.loadTexts: rbNtpTable.setDescription('')
rbNtpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 25, 1, 1), ).setIndexNames((0, "RADIO-BRIDGE-MIB", "rbNtpId"))
if mibBuilder.loadTexts: rbNtpEntry.setStatus('current')
if mibBuilder.loadTexts: rbNtpEntry.setDescription('')
rbNtpId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 25, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rbNtpId.setStatus('current')
if mibBuilder.loadTexts: rbNtpId.setDescription('')
rbNtpServerIp = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 25, 1, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbNtpServerIp.setStatus('current')
if mibBuilder.loadTexts: rbNtpServerIp.setDescription('')
rbNtpSecondaryServerIp = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 25, 1, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbNtpSecondaryServerIp.setStatus('current')
if mibBuilder.loadTexts: rbNtpSecondaryServerIp.setDescription('')
rbNtpTmz = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 25, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-12, 14))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbNtpTmz.setStatus('current')
if mibBuilder.loadTexts: rbNtpTmz.setDescription('')
rbNtpRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 25, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rbNtpRowStatus.setStatus('current')
if mibBuilder.loadTexts: rbNtpRowStatus.setDescription('')
rbLicenseTable = MibTable((1, 3, 6, 1, 4, 1, 31926, 26, 1), )
if mibBuilder.loadTexts: rbLicenseTable.setStatus('current')
if mibBuilder.loadTexts: rbLicenseTable.setDescription('')
rbLicenseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 31926, 26, 1, 1), ).setIndexNames((1, "RADIO-BRIDGE-MIB", "rbLicenseId"))
if mibBuilder.loadTexts: rbLicenseEntry.setStatus('current')
if mibBuilder.loadTexts: rbLicenseEntry.setDescription('')
rbLicenseId = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 26, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: rbLicenseId.setStatus('current')
if mibBuilder.loadTexts: rbLicenseId.setDescription('')
rbLicenseCurrentValue = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 26, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rbLicenseCurrentValue.setStatus('current')
if mibBuilder.loadTexts: rbLicenseCurrentValue.setDescription('for data-rate means data rate value, for enable similar to TruthValue')
rbLicenseMaxValue = MibTableColumn((1, 3, 6, 1, 4, 1, 31926, 26, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rbLicenseMaxValue.setStatus('current')
if mibBuilder.loadTexts: rbLicenseMaxValue.setDescription('')
mibBuilder.exportSymbols("RADIO-BRIDGE-MIB", fileSessionResult=fileSessionResult, trapCurrentAlarm=trapCurrentAlarm, rbMepEntry=rbMepEntry, radioBridgeSyslog=radioBridgeSyslog, rfTxMute=rfTxMute, rbSystemAuthenticationSecret=rbSystemAuthenticationSecret, rbSyslogTable=rbSyslogTable, rfDaysMinModulation=rfDaysMinModulation, radioBridgeQosEgressQueue=radioBridgeQosEgressQueue, rfDaysStart=rfDaysStart, rbRfStatisticsDaysTable=rbRfStatisticsDaysTable, trapTemperatureOutOfRange=trapTemperatureOutOfRange, rbPeerMepEntry=rbPeerMepEntry, rbEventConfigMask=rbEventConfigMask, rbPeerMep=rbPeerMep, rbFdbEvcQuotaEntry=rbFdbEvcQuotaEntry, rbMeterColorMode=rbMeterColorMode, rfDaysMaxCinr=rfDaysMaxCinr, rfLoopDirection=rfLoopDirection, qosEgressQueuePortNum=qosEgressQueuePortNum, rbLldpPortVid=rbLldpPortVid, rbMeterCir=rbMeterCir, rbNtpRowStatus=rbNtpRowStatus, trapTxMuteEnabled=trapTxMuteEnabled, rbEventConfigId=rbEventConfigId, rbSwBank2ScheduledToRunNextReset=rbSwBank2ScheduledToRunNextReset, rbMepAisEnable=rbMepAisEnable, rbFdbQuotaTable=rbFdbQuotaTable, rbAgentSnmpVersion=rbAgentSnmpVersion, rbWredProbabilityYellow=rbWredProbabilityYellow, rbAuthServerId=rbAuthServerId, rfTxSynthLock=rfTxSynthLock, fileSessionIndex=fileSessionIndex, rbWredTable=rbWredTable, rbPcpWriteProfileEntry=rbPcpWriteProfileEntry, fileSessionUser=fileSessionUser, rbIpTable=rbIpTable, rfDaysInPkts=rfDaysInPkts, rbClassifierCosTable=rbClassifierCosTable, classifierEvcIpCosType=classifierEvcIpCosType, rbAuthServersEntry=rbAuthServersEntry, rfAverageRssi=rfAverageRssi, rbCurrentAlarmTypeName=rbCurrentAlarmTypeName, rfRoleStatus=rfRoleStatus, rbPcpWriteProfileRowStatus=rbPcpWriteProfileRowStatus, rbNtpSecondaryServerIp=rbNtpSecondaryServerIp, rfDaysMinRssi=rfDaysMinRssi, rbLicenseTable=rbLicenseTable, radioBridgeRefClock=radioBridgeRefClock, radioBridgeTraps=radioBridgeTraps, rfInErroredPkts=rfInErroredPkts, classifierEvcIpCosList=classifierEvcIpCosList, rbRfStatisticsTable=rbRfStatisticsTable, rfInOctets=rfInOctets, radioBridgeSystem=radioBridgeSystem, classifierCosPacketType=classifierCosPacketType, classifierCosPcpList=classifierCosPcpList, rbWredMinThreshold=rbWredMinThreshold, trapSfpOut=trapSfpOut, rfDaysInErroredOctets=rfDaysInErroredOctets, trapTxMuteDisabled=trapTxMuteDisabled, rbNtpEntry=rbNtpEntry, rbQosEgressQueueEntry=rbQosEgressQueueEntry, rbSystemUpAbsoluteTime=rbSystemUpAbsoluteTime, rbRfStaticKey=rbRfStaticKey, rfLowestFecRate=rfLowestFecRate, rfModeSelector=rfModeSelector, classifierCosIpCosList=classifierCosIpCosList, rfOutIdleOctets=rfOutIdleOctets, qosIngressQueueMeterId=qosIngressQueueMeterId, rbMepAisDefects=rbMepAisDefects, rfArqOutLoss=rfArqOutLoss, rfDayIndex=rfDayIndex, ethernetNetworkType=ethernetNetworkType, rbMepAisLevel=rbMepAisLevel, AlarmSeverity=AlarmSeverity, rbLicenseCurrentValue=rbLicenseCurrentValue, rbSysFileOperationEntry=rbSysFileOperationEntry, radioBridgeWred=radioBridgeWred, trapSfpIn=trapSfpIn, rbSysFileOperationTable=rbSysFileOperationTable, trapLowestModulation=trapLowestModulation, rfDaysInErroredPkts=rfDaysInErroredPkts, rfRole=rfRole, rbPeerMepNearEndLoss=rbPeerMepNearEndLoss, rfMaxModulation=rfMaxModulation, classifierCosId=classifierCosId, rfLoopNumOfSubchannels=rfLoopNumOfSubchannels, radioBridgeSnmp=radioBridgeSnmp, radioBridgeAlarms=radioBridgeAlarms, radioBridgePcpProfile=radioBridgePcpProfile, radioBridgeRf=radioBridgeRf, rbCurrentAlarmChangeCounter=rbCurrentAlarmChangeCounter, rbSyslogEntry=rbSyslogEntry, ethernetLoopMode=ethernetLoopMode, ethernetPcpWriteProfileId=ethernetPcpWriteProfileId, rfLoopNumOfRepetitions=rfLoopNumOfRepetitions, rbRefFdbQuotaId=rbRefFdbQuotaId, rbLicenseMaxValue=rbLicenseMaxValue, rbLldpPortIfIndex=rbLldpPortIfIndex, rfInErroredOctets=rfInErroredOctets, rbLldpPortExtensionEntry=rbLldpPortExtensionEntry, classifierCosIpCosType=classifierCosIpCosType, radioBridgeMeter=radioBridgeMeter, rbIpEntry=rbIpEntry, rfOutPkts=rfOutPkts, rbFdbQuotaRowStatus=rbFdbQuotaRowStatus, radioBridgeQosIngressQueue=radioBridgeQosIngressQueue, rbIpVlanId=rbIpVlanId, radioBridgeEncryption=radioBridgeEncryption, rfTemperature=rfTemperature, rbMepAisPeriod=rbMepAisPeriod, rfLoopFecRate=rfLoopFecRate, rfLoopModeSelector=rfLoopModeSelector, rbQosIngressQueueEntry=rbQosIngressQueueEntry, trapTemperatureInRange=trapTemperatureInRange, rfLowestNumOfSubchannels=rfLowestNumOfSubchannels, rbIpGateway=rbIpGateway, rfLoopModulationType=rfLoopModulationType, rfDaysOutIdleOctets=rfDaysOutIdleOctets, rbCurrentAlarmEntry=rbCurrentAlarmEntry, rbWredProbability=rbWredProbability, rbSwBank1ScheduledToRunNextReset=rbSwBank1ScheduledToRunNextReset, refClockStatus=refClockStatus, rfInGoodPkts=rfInGoodPkts, rbLldpPortExtensionTable=rbLldpPortExtensionTable, rbSwBank1Running=rbSwBank1Running, rbCurrentAlarmCause=rbCurrentAlarmCause, rbRefEvcId=rbRefEvcId, rbFdbExtensionEntry=rbFdbExtensionEntry, rfRxState=rfRxState, rfNumOfChannels=rfNumOfChannels, trapModulationChange=trapModulationChange, rbMeterCbs=rbMeterCbs, rbMaIndex=rbMaIndex, rbWredEntry=rbWredEntry, rfDaysInOctets=rfDaysInOctets, rfLowestNumOfRepetitions=rfLowestNumOfRepetitions, rbCurrentAlarmIfIndex=rbCurrentAlarmIfIndex, rbCurrentAlarmIndex=rbCurrentAlarmIndex, rfDaysArqOutLoss=rfDaysArqOutLoss, radioBridgeScheduler=radioBridgeScheduler, radioBridgeNtp=radioBridgeNtp, rfDaysMinCinr=rfDaysMinCinr, rbCurrentAlarmRaisedTime=rbCurrentAlarmRaisedTime, rbSysTemperature=rbSysTemperature, trapCinrInRange=trapCinrInRange, rbSyslogRowStatus=rbSyslogRowStatus, rbCurrentAlarmMostSevere=rbCurrentAlarmMostSevere, rfIndex=rfIndex, rfDaysInLostPkts=rfDaysInLostPkts, rbRfEncryption=rbRfEncryption, classifierEvcPrecedence=classifierEvcPrecedence, rfAsymmetry=rfAsymmetry, rfDaysInIdleOctets=rfDaysInIdleOctets, rbIpIndex=rbIpIndex, trapLoopDisabled=trapLoopDisabled, fileSessionRemotePath=fileSessionRemotePath, classifierEvcPortList=classifierEvcPortList, rbEventConfigEntry=rbEventConfigEntry, rfChannelWidth=rfChannelWidth, rbMeterEbs=rbMeterEbs, rbEventConfigIndex=rbEventConfigIndex, rbMdIndex=rbMdIndex, rbIpType=rbIpType, rbPeerMepFrameDelay=rbPeerMepFrameDelay, rfMinCinr=rfMinCinr, trapNoLowestModulation=trapNoLowestModulation, trapRefClockChanged=trapRefClockChanged, rbCurrentAlarmSeverity=rbCurrentAlarmSeverity, ethernetLoopTimeout=ethernetLoopTimeout, rbCurrentAlarmSource=rbCurrentAlarmSource, rbLicenseId=rbLicenseId, rfValid=rfValid, rbSyslogServerIp=rbSyslogServerIp, rbAgentWriteCommunity=rbAgentWriteCommunity, fileSessionServer=fileSessionServer, rbSchedulerMode=rbSchedulerMode, rfDaysValid=rfDaysValid, rbMep=rbMep, rbFdbQuotaDynamicEntries=rbFdbQuotaDynamicEntries, rbSwBank1Version=rbSwBank1Version, rbFdbQuotaStaticEntries=rbFdbQuotaStaticEntries, rbFdbEvcQuotaRowStatus=rbFdbEvcQuotaRowStatus, trapRssiOutOfRange=trapRssiOutOfRange, rbFdbQuotaEntry=rbFdbQuotaEntry, rfRxSynthLock=rfRxSynthLock, rbNtpTmz=rbNtpTmz, rfInPkts=rfInPkts, rfNumOfRepetitions=rfNumOfRepetitions, rbFdbQuotaMaxSize=rbFdbQuotaMaxSize, rbPeerMepTotalTxFarEnd=rbPeerMepTotalTxFarEnd, rbWredRowStatus=rbWredRowStatus, rbCurrentAlarmSourceAddr=rbCurrentAlarmSourceAddr, fileSessionState=fileSessionState, rfInLostPkts=rfInLostPkts, rbClassifierEvcTable=rbClassifierEvcTable, classifierEvcPacketType=classifierEvcPacketType, rbQosIngressQueueTable=rbQosIngressQueueTable, rbWredId=rbWredId, radioBridgeCfm=radioBridgeCfm, rbWredMaxThresholdYellow=rbWredMaxThresholdYellow, radioBridgeEventConfig=radioBridgeEventConfig, rbRfStatisticsDaysEntry=rbRfStatisticsDaysEntry, rfMaxCinr=rfMaxCinr, qosEgressQueueCir=qosEgressQueueCir, rbRfAuthenticationString=rbRfAuthenticationString, radioBridgeQosClassifier=radioBridgeQosClassifier, rbPeerMepId=rbPeerMepId, rbLicenseEntry=rbLicenseEntry, rbCurrentAlarmType=rbCurrentAlarmType, rbSwBank2Running=rbSwBank2Running, classifierEvcId=classifierEvcId, radioBridgeLicense=radioBridgeLicense, rbIpAddress=rbIpAddress, rfInGoodOctets=rfInGoodOctets, qosEgressQueueWfqWeight=qosEgressQueueWfqWeight, rfFecRate=rfFecRate, rfLoopTimeout=rfLoopTimeout, rbSwBank2Version=rbSwBank2Version, rbNtpId=rbNtpId, rbAuthServersTable=rbAuthServersTable, rbAuthServerPort=rbAuthServerPort, rbNtpServerIp=rbNtpServerIp, rbEventConfigTable=rbEventConfigTable, classifierCosVidList=classifierCosVidList, rfDaysArqInLoss=rfDaysArqInLoss, radioBridgeEthernet=radioBridgeEthernet, rbFdbEvcQuotaId=rbFdbEvcQuotaId, rbQosEgressQueueTable=rbQosEgressQueueTable, fileSessionRowStatus=fileSessionRowStatus, rbMepId=rbMepId, rbMepAisSuppress=rbMepAisSuppress, trapCinrOutOfRange=trapCinrOutOfRange, classifierCosCos=classifierCosCos, rbSysVoltage=rbSysVoltage, refClockPrio=refClockPrio, rbClassifierCosEntry=rbClassifierCosEntry, rbDate=rbDate, rfDaysMaxRssi=rfDaysMaxRssi, rbRefClockTable=rbRefClockTable, classifierEvcVidList=classifierEvcVidList, rbRfStatisticsEntry=rbRfStatisticsEntry, rbWredMaxThreshold=rbWredMaxThreshold, rbWredNfactor=rbWredNfactor, qosIngressQueueEvcId=qosIngressQueueEvcId, trapLoopEnabled=trapLoopEnabled, rfDaysInGoodPkts=rfDaysInGoodPkts, refClockRowStatus=refClockRowStatus, rbAuthServerRowStatus=rbAuthServerRowStatus, rbCurrentAlarmTable=rbCurrentAlarmTable, rbRfTable=rbRfTable, classifierCosRowStatus=classifierCosRowStatus, qosIngressQueueCosId=qosIngressQueueCosId)
mibBuilder.exportSymbols("RADIO-BRIDGE-MIB", rfAlignmentStatus=rfAlignmentStatus, rfDaysMaxModulation=rfDaysMaxModulation, qosEgressQueueColorDrop=qosEgressQueueColorDrop, rfDaysOutOctets=rfDaysOutOctets, rfMaxRssi=rfMaxRssi, rbFdbQuotaId=rbFdbQuotaId, refClockQualityLevelConfig=refClockQualityLevelConfig, rbCurrentAlarmDesc=rbCurrentAlarmDesc, rbFdbQuotaSize=rbFdbQuotaSize, rbFdbQuotaUnusedEntries=rbFdbQuotaUnusedEntries, rbRefExtFdbQuotaId=rbRefExtFdbQuotaId, rbCurrentAlarmLastTrapType=rbCurrentAlarmLastTrapType, radioBridgeRoot=radioBridgeRoot, rbSyslogId=rbSyslogId, rbIpPrefixLen=rbIpPrefixLen, rbPeerMepTotalTxNearEnd=rbPeerMepTotalTxNearEnd, rbTime=rbTime, rfDaysOutPkts=rfDaysOutPkts, rbSystemAuthenticationMode=rbSystemAuthenticationMode, rbWredMinThresholdYellow=rbWredMinThresholdYellow, radioBridgeLldp=radioBridgeLldp, rfTxMuteTimeout=rfTxMuteTimeout, qosIngressQueueRowStatus=qosIngressQueueRowStatus, rbMeterRowStatus=rbMeterRowStatus, radioBridgeIp=radioBridgeIp, rbIpRowStatus=rbIpRowStatus, AlarmType=AlarmType, fileSessionProtocol=fileSessionProtocol, qosIngressQueueMarking=qosIngressQueueMarking, rbMeterId=rbMeterId, rbEthernetTable=rbEthernetTable, rbSysReset=rbSysReset, classifierCosPortList=classifierCosPortList, classifierEvcRowStatus=classifierEvcRowStatus, rbPeerMepFarEndLoss=rbPeerMepFarEndLoss, rfOutOctets=rfOutOctets, refClockSsmCvid=refClockSsmCvid, qosEgressQueueMode=qosEgressQueueMode, classifierEvcPcpList=classifierEvcPcpList, rfInIdleOctets=rfInIdleOctets, trapRssiInRange=trapRssiInRange, radioBridgeAuthentication=radioBridgeAuthentication, refClockQualityLevelActual=refClockQualityLevelActual, rbPcpWriteProfilePcp=rbPcpWriteProfilePcp, classifierCosPrecedence=classifierCosPrecedence, fileSessionCommand=fileSessionCommand, rfOperationalFrequency=rfOperationalFrequency, radioBridgeQuota=radioBridgeQuota, rbNtpTable=rbNtpTable, rbPeerMepFrameDelayVariation=rbPeerMepFrameDelayVariation, rbFdbExtensionTable=rbFdbExtensionTable, rfModulationType=rfModulationType, rfTxLinkId=rfTxLinkId, rbClassifierEvcEntry=rbClassifierEvcEntry, qosEgressQueueCosId=qosEgressQueueCosId, qosEgressDropMode=qosEgressDropMode, rbMeterEir=rbMeterEir, rbAlarmsCommon=rbAlarmsCommon, rfOperationalState=rfOperationalState, rbPcpWriteProfileId=rbPcpWriteProfileId, rfMinModulation=rfMinModulation, rfAverageCinr=rfAverageCinr, rbPcpWriteProfileTable=rbPcpWriteProfileTable, rbRfEntry=rbRfEntry, fileSessionPassword=fileSessionPassword, rfTxPower=rfTxPower, rfDaysInGoodOctets=rfDaysInGoodOctets, rfMinRssi=rfMinRssi, rbAgentReadCommunity=rbAgentReadCommunity, rbRefClockEntry=rbRefClockEntry, refClockQualityLevelMode=refClockQualityLevelMode, rbFdbEvcQuotaTable=rbFdbEvcQuotaTable, rbSystemCapabilities=rbSystemCapabilities, rbCurrentAlarmLastIndex=rbCurrentAlarmLastIndex, rbMeterEntry=rbMeterEntry, fileSessionLocalParams=fileSessionLocalParams, rbAuthServerIpAddress=rbAuthServerIpAddress, rbEthernetEntry=rbEthernetEntry, rfNumOfSubchannels=rfNumOfSubchannels, rfRxLinkId=rfRxLinkId, rfLowestModulationType=rfLowestModulationType, ethernetAlarmPropagation=ethernetAlarmPropagation, rbFdbQuotaUsedEntries=rbFdbQuotaUsedEntries, rfArqInLoss=rfArqInLoss, rbCurrentAlarmAction=rbCurrentAlarmAction, rfTxState=rfTxState, rbLldpPortDestAddressIndex=rbLldpPortDestAddressIndex, rbMeterTable=rbMeterTable, rbSysSaveConfiguration=rbSysSaveConfiguration, ethernetClassifierMode=ethernetClassifierMode, classifierEvcEvc=classifierEvcEvc)
| 101.500988
| 9,252
| 0.779642
|
5e20b491557fce30601a0fe806711a051bb9e47b
| 3,165
|
py
|
Python
|
lib/model/faster_rcnn/resnet.py
|
Arieszhang1994/faster-rcnn_pytorch
|
a25dcf2bb9aeb22b508e67edb5ba093a482a30ad
|
[
"MIT"
] | 4
|
2018-11-16T12:32:49.000Z
|
2019-05-09T10:03:21.000Z
|
lib/model/faster_rcnn/resnet.py
|
Arieszhang1994/faster-rcnn_pytorch
|
a25dcf2bb9aeb22b508e67edb5ba093a482a30ad
|
[
"MIT"
] | null | null | null |
lib/model/faster_rcnn/resnet.py
|
Arieszhang1994/faster-rcnn_pytorch
|
a25dcf2bb9aeb22b508e67edb5ba093a482a30ad
|
[
"MIT"
] | 1
|
2019-01-02T12:50:36.000Z
|
2019-01-02T12:50:36.000Z
|
# --------------------------------------------------------
# Pytorch Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Aries Zhang, based on code from Jiasen Lu, Jianwei Yang
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.utils.config import cfg
from model.faster_rcnn.faster_rcnn import _fasterRCNN
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from model.faster_rcnn.resnetparts import resnet101, resnet50, resnet152
import torch.utils.model_zoo as model_zoo
import pdb
class resnet(_fasterRCNN):
def __init__(self, classes, num_layers=101, pretrained=False, class_agnostic=False):
if num_layers == 101:
self.model_path = 'data/pretrained_model/resnet101_caffe.pth'
elif num_layers == 50:
self.model_path = 'data/pretrained_model/resnet50_caffe.pth'
elif num_layers == 152:
self.model_path = 'data/pretrained_model/resnet152_caffe.pth'
self.dout_base_model = 1024
self.pretrained = pretrained
self.class_agnostic = class_agnostic
self.num_layer = num_layers
_fasterRCNN.__init__(self, classes, class_agnostic, pretrained, 'resnet'+str(num_layers))
def _init_modules(self):
if self.num_layer == 101:
resnet = resnet101()
elif self.num_layer == 50:
resnet = resnet50()
elif self.num_layer == 152:
resnet = resnet152()
if self.pretrained == True:
print("Loading pretrained weights from %s" %(self.model_path))
state_dict = torch.load(self.model_path)
resnet.load_state_dict({k:v for k,v in state_dict.items() if k in resnet.state_dict()})
# Build resnet.
self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1,resnet.relu,
resnet.maxpool,resnet.layer1,resnet.layer2,resnet.layer3)
# Fix blocks
for p in self.RCNN_base[0].parameters(): p.requires_grad=False
for p in self.RCNN_base[1].parameters(): p.requires_grad=False
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if cfg.RESNET.FIXED_BLOCKS >= 3:
for p in self.RCNN_base[6].parameters(): p.requires_grad=False
if cfg.RESNET.FIXED_BLOCKS >= 2:
for p in self.RCNN_base[5].parameters(): p.requires_grad=False
if cfg.RESNET.FIXED_BLOCKS >= 1:
for p in self.RCNN_base[4].parameters(): p.requires_grad=False
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad=False
self.RCNN_base.apply(set_bn_fix)
def train(self, mode=True):
# Override train so that the training mode is set as we want
nn.Module.train(self, mode)
if mode:
# Set fixed blocks to be in eval mode
self.RCNN_base.eval()
self.RCNN_base[5].train()
self.RCNN_base[6].train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.RCNN_base.apply(set_bn_eval)
self.detector.RCNN_top.apply(set_bn_eval)
| 34.78022
| 94
| 0.683728
|
38c80c8d85f8884e9a70dee615a77aa2949985da
| 5,642
|
py
|
Python
|
Dataset/Cross-Platform/saveGABModelAGNN.py
|
hate-alert/Hateful-users-detection
|
a71cd51df339c0c7e327e08961b18ceb5ced9255
|
[
"MIT"
] | 1
|
2021-08-08T02:55:58.000Z
|
2021-08-08T02:55:58.000Z
|
Dataset/Cross-Platform/saveGABModelAGNN.py
|
hate-alert/Hateful-users-detection
|
a71cd51df339c0c7e327e08961b18ceb5ced9255
|
[
"MIT"
] | null | null | null |
Dataset/Cross-Platform/saveGABModelAGNN.py
|
hate-alert/Hateful-users-detection
|
a71cd51df339c0c7e327e08961b18ceb5ced9255
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import os.path as osp
import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import SGConv, ARMAConv, SAGEConv, AGNNConv
import pickle
import numpy as np
import random
import time
import sys
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, confusion_matrix, recall_score, precision_score, f1_score, auc, accuracy_score
import json
import gzip
dataDirectory = '../Dataset/GabData/'
with open(dataDirectory+'GAB_Data/haters.json') as json_file:
haters = json.load(json_file)
with open(dataDirectory+'GAB_Data/nonhaters.json') as json_file:
non_haters = json.load(json_file)
print("User Info loading Done")
with open(dataDirectory+'GABDoc2vec100.p', 'rb') as handle:
doc_vectors=pickle.load(handle)
print("Doc_vec loading done")
with gzip.open(dataDirectory + 'gabEdges1_5degree.pklgz') as fp:
final_list= pickle.load(fp)
print("NetWork Loading Done")
graph={}
graph_dict={}
inv_graph_dict = {}
nodes=0
for i in final_list:
if i[0] not in graph:
graph[i[0]]=[]
graph_dict[i[0]]=nodes
inv_graph_dict[nodes] = i[0];
nodes+=1
if i[1] not in graph:
graph[i[1]]=[]
graph_dict[i[1]]=nodes
inv_graph_dict[nodes] = i[1];
nodes+=1
graph[i[0]].append(i[1])
print("Number of Users in the Network:", nodes)
_X= []
_y =[]
for i in range(0,nodes):
_X.append(doc_vectors[inv_graph_dict[i]])
if inv_graph_dict[i] in haters:
_y.append(1)
elif inv_graph_dict[i] in non_haters:
_y.append(0)
else:
_y.append(2)
featureVector = torch.FloatTensor(_X)
labels = torch.LongTensor(_y)
def getData():
return featureVector, labels
rows=[]
cols=[]
for elem in graph_dict:
neighbours= graph[elem]
r=graph_dict[elem]
for neighbour in neighbours:
c = graph_dict[neighbour]
rows.append(r)
cols.append(c)
edge_index= torch.LongTensor([rows,cols])
print("Edge Index Created")
edge_index= torch.LongTensor([rows,cols])
print("Edges: ", len(rows))
def Diff(li1, li2):
return (list(set(li1) - set(li2)))
def ratio_split(train_haters, train_non_haters, test_haters, test_non_haters, nodes):
#Creating Training List
trainList = list(train_haters)
trainList.extend(train_non_haters)
#Creating Testing DataPoint
textList = list(test_haters)
textList.extend(test_non_haters)
train_mask = [0] * nodes
test_mask = [0] * nodes
for i in trainList:
train_mask[graph_dict[i]] = 1;
for i in textList:
test_mask[graph_dict[i]] = 1;
train_mask = torch.ByteTensor(train_mask)
test_mask = torch.ByteTensor(test_mask)
print("Splitting done")
return train_mask, test_mask
with open(dataDirectory+'GAB_Data/hateval1.json') as json_file:
test_haters = json.load(json_file)
with open(dataDirectory+'GAB_Data/nonhateval1.json') as json_file:
test_non_haters = json.load(json_file)
train_haters = Diff(haters, test_haters)
train_non_haters = Diff(non_haters, test_non_haters)
num_features = 100
num_classes = 2
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.lin1 = torch.nn.Linear(num_features, 32)
self.prop1 = AGNNConv(requires_grad=True)
self.lin2 = torch.nn.Linear(32, 2)
def forward(self):
x=X
x = F.dropout(x, training=self.training)
x = F.relu(self.lin1(x))
x = self.prop1(x, edge_index)
x = F.dropout(x, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=1)
def train():
model.train()
optimizer.zero_grad()
F.nll_loss(model()[train_mask], y[train_mask]).backward()
optimizer.step()
def test():
model.eval()
logits = model()
accs = []
Mf1_score = []
for mask in [train_mask, test_mask]:
pred = logits[mask].max(1)[1]
acc = pred.eq(y[mask]).sum().item() / mask.sum().item()
mfc = f1_score(y[mask].detach().cpu(), pred.detach().cpu(), average='macro')
accs.append(acc)
Mf1_score.append(mfc)
return accs, Mf1_score
random.seed(101)
X, y = getData()
train_mask, test_mask = ratio_split(train_haters, train_non_haters, test_haters, test_non_haters, nodes)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = torch.device('cpu')
model = Net().to(device)
edge_index= edge_index.to(device)
y= y.to(device)
X= X.to(device)
train_mask= train_mask.to(device)
test_mask= test_mask.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
best_val_acc = best_MfScore = train_acc = train_mfscore = 0
for epoch in range(1, 50):
train()
Accuracy, F1Score= test()
if Accuracy[1] > best_val_acc:
best_val_acc = Accuracy[1]
train_acc = Accuracy[0]
best_MfScore = F1Score[1]
train_mfscore = F1Score[0]
#checkpoint = {'model': Net(), 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict()}
#torch.save(checkpoint, 'GABcheckpoint_Final_.pth', pickle_protocol =3)
checkpoint = {'state_dict': model.state_dict(),'optimizer' :optimizer.state_dict()}
torch.save(checkpoint, 'GABAGNNCheckpoint.pth')
test()
print(best_val_acc)
print(best_MfScore)
print('Trainig')
print(train_acc)
print(train_mfscore)
| 24.008511
| 117
| 0.676356
|
9403bc6870c9a8153324a0acb53956769fe555ff
| 396
|
py
|
Python
|
pontoon/sync/templatetags/helpers.py
|
Tratty/pontoon
|
ecb903d72f9274f02137b16669cc3c5859f6329c
|
[
"BSD-3-Clause"
] | 3
|
2020-01-27T12:26:20.000Z
|
2022-02-03T09:56:02.000Z
|
pontoon/sync/templatetags/helpers.py
|
texnoman/pontoon-src
|
6b40ac229605e99966c3bdd1510b772c89d4de24
|
[
"BSD-3-Clause"
] | 9
|
2021-03-10T21:34:51.000Z
|
2022-02-19T03:30:06.000Z
|
pontoon/sync/templatetags/helpers.py
|
texnoman/pontoon-src
|
6b40ac229605e99966c3bdd1510b772c89d4de24
|
[
"BSD-3-Clause"
] | 4
|
2020-01-26T21:28:43.000Z
|
2021-06-10T15:25:19.000Z
|
from __future__ import absolute_import
from django_jinja import library
from pontoon.sync.models import ProjectSyncLog
PROJECT_SYNC_LOG_STATUS = {
ProjectSyncLog.IN_PROGRESS: "In-progress",
ProjectSyncLog.SKIPPED: "Skipped",
ProjectSyncLog.SYNCED: "Synced",
}
@library.global_function
def project_log_status_string(status):
return PROJECT_SYNC_LOG_STATUS.get(status, "---")
| 22
| 53
| 0.785354
|
9ed265aabc180fe94e7d8a09bcce79cf4aa9b46e
| 472
|
py
|
Python
|
lib/taniumpy/object_types/upload_file_list.py
|
c1rdan/pytan
|
5e537a6dcf4136e3b9c3905a39f073396e7f044f
|
[
"MIT"
] | 1
|
2019-01-29T21:22:06.000Z
|
2019-01-29T21:22:06.000Z
|
lib/taniumpy/object_types/upload_file_list.py
|
c1rdan/pytan
|
5e537a6dcf4136e3b9c3905a39f073396e7f044f
|
[
"MIT"
] | null | null | null |
lib/taniumpy/object_types/upload_file_list.py
|
c1rdan/pytan
|
5e537a6dcf4136e3b9c3905a39f073396e7f044f
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class UploadFileList(BaseType):
_soap_tag = 'file_parts'
def __init__(self):
BaseType.__init__(
self,
simple_properties={},
complex_properties={},
list_properties={'upload_file': UploadFile},
)
self.upload_file = []
from .upload_file import UploadFile
| 16.857143
| 56
| 0.586864
|
6a11c29c83517ed22203108d8921c682b43b144c
| 4,650
|
py
|
Python
|
tempest/tests/lib/services/compute/test_security_groups_client.py
|
Hybrid-Cloud/hybrid-tempest
|
319e90c6fa6e46925b495c93cd5258f088a30ec0
|
[
"Apache-2.0"
] | 3
|
2016-07-15T12:27:23.000Z
|
2021-04-23T04:41:10.000Z
|
tempest/tests/lib/services/compute/test_security_groups_client.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | null | null | null |
tempest/tests/lib/services/compute/test_security_groups_client.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | 12
|
2016-07-14T18:13:05.000Z
|
2017-07-08T18:45:42.000Z
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import mockpatch
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import security_groups_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services.compute import base
class TestSecurityGroupsClient(base.BaseComputeServiceTest):
FAKE_SECURITY_GROUP_INFO = [{
"description": "default",
"id": "3fb26eb3-581b-4420-9963-b0879a026506",
"name": "default",
"rules": [],
"tenant_id": "openstack"
}]
def setUp(self):
super(TestSecurityGroupsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = security_groups_client.SecurityGroupsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_security_groups(self, bytes_body=False):
self.check_service_client_function(
self.client.list_security_groups,
'tempest.lib.common.rest_client.RestClient.get',
{"security_groups": self.FAKE_SECURITY_GROUP_INFO},
to_utf=bytes_body)
def test_list_security_groups_with_str_body(self):
self._test_list_security_groups()
def test_list_security_groups_with_bytes_body(self):
self._test_list_security_groups(bytes_body=True)
def _test_show_security_group(self, bytes_body=False):
self.check_service_client_function(
self.client.show_security_group,
'tempest.lib.common.rest_client.RestClient.get',
{"security_group": self.FAKE_SECURITY_GROUP_INFO[0]},
to_utf=bytes_body,
security_group_id='fake-id')
def test_show_security_group_with_str_body(self):
self._test_show_security_group()
def test_show_security_group_with_bytes_body(self):
self._test_show_security_group(bytes_body=True)
def _test_create_security_group(self, bytes_body=False):
post_body = {"name": "test", "description": "test_group"}
self.check_service_client_function(
self.client.create_security_group,
'tempest.lib.common.rest_client.RestClient.post',
{"security_group": self.FAKE_SECURITY_GROUP_INFO[0]},
to_utf=bytes_body,
kwargs=post_body)
def test_create_security_group_with_str_body(self):
self._test_create_security_group()
def test_create_security_group_with_bytes_body(self):
self._test_create_security_group(bytes_body=True)
def _test_update_security_group(self, bytes_body=False):
req_body = {"name": "test", "description": "test_group"}
self.check_service_client_function(
self.client.update_security_group,
'tempest.lib.common.rest_client.RestClient.put',
{"security_group": self.FAKE_SECURITY_GROUP_INFO[0]},
to_utf=bytes_body,
security_group_id='fake-id',
kwargs=req_body)
def test_update_security_group_with_str_body(self):
self._test_update_security_group()
def test_update_security_group_with_bytes_body(self):
self._test_update_security_group(bytes_body=True)
def test_delete_security_group(self):
self.check_service_client_function(
self.client.delete_security_group,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=202, security_group_id='fake-id')
def test_is_resource_deleted_true(self):
mod = ('tempest.lib.services.compute.security_groups_client.'
'SecurityGroupsClient.show_security_group')
self.useFixture(mockpatch.Patch(mod, side_effect=lib_exc.NotFound))
self.assertTrue(self.client.is_resource_deleted('fake-id'))
def test_is_resource_deleted_false(self):
mod = ('tempest.lib.services.compute.security_groups_client.'
'SecurityGroupsClient.show_security_group')
self.useFixture(mockpatch.Patch(mod, return_value='success'))
self.assertFalse(self.client.is_resource_deleted('fake-id'))
| 40.789474
| 78
| 0.710323
|
7756b1b471946ba102aa052fc2cc68b41cc050e4
| 5,208
|
py
|
Python
|
utils/tlm_decode.py
|
mjmccaskey/cFS-EDS-GroundStation
|
02995f26c649f423a5b7e2c95447c951f197b880
|
[
"Apache-2.0"
] | 12
|
2022-02-06T18:55:56.000Z
|
2022-03-25T20:28:35.000Z
|
utils/tlm_decode.py
|
mjmccaskey/cFS-EDS-GroundStation
|
02995f26c649f423a5b7e2c95447c951f197b880
|
[
"Apache-2.0"
] | 7
|
2022-02-07T21:20:31.000Z
|
2022-03-24T10:31:08.000Z
|
utils/tlm_decode.py
|
mjmccaskey/cFS-EDS-GroundStation
|
02995f26c649f423a5b7e2c95447c951f197b880
|
[
"Apache-2.0"
] | 3
|
2020-10-27T15:59:47.000Z
|
2021-11-04T15:23:39.000Z
|
'''
LEW-20210-1, Python Ground Station for a Core Flight System with CCSDS Electronic Data Sheets Support
Copyright (c) 2020 United States Government as represented by
the Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
tlm_decode.py
This module listens for UDP messages along a user-specified port
Messages that come in are decoded into an EDS object where the object's
contents are printed to the screen as they come in real time
Command line use:
python3 tlm_decode.py -m <mission_name> -p <port_number=5021>
'''
import sys
import getopt
import socket
import time
import EdsLib
import CFE_MissionLib
def decode_message(mission, intf_db, raw_message):
'''
Decodes a raw input message into an EdsObject
Inputs:
mission - User specified mission name
intf_db - CFE_MissionLib Interface Database
raw_message - Packed Bytes message
Outputs:
eds_entry - The EdsDb function to create the EDS object associated with the input message
eds_object - The Unpacked EdsDb Object
'''
eds_id, topic_id = intf_db.DecodeEdsId(raw_message)
eds_entry = EdsLib.DatabaseEntry(mission, eds_id)
eds_object = eds_entry(EdsLib.PackedObject(raw_message))
return (eds_entry, eds_object)
def display_entries(eds_db, base_object, base_name):
'''
Recursive function that iterates over an EDS object and prints the contents of
the sub-entries to the screen
Inputs:
eds_db - EDS Database
base_object - The EDS object to iterate over
base_name - The base name for the sub-entities printed to the screen
'''
# Array display string
if (eds_db.IsArray(base_object)):
for i in range(len(base_object)):
display_entries(eds_db, base_object[i], f"{base_name}[{i}]")
# Container display string
elif (eds_db.IsContainer(base_object)):
for item in base_object:
display_entries(eds_db, item[1], f"{base_name}.{item[0]}")
# Everything else (number, enumeration, string, etc.)
else:
print('{:<60} = {}'.format(base_name, base_object))
def hex_string(string, bytes_per_line):
'''
Converts a hex representation of a bytes string to a more human readable format
Inputs:
string - hex representation of a bytes string
bytes_per_line - Number specifying the number of hex bytes per line
Output:
hex_str - string that can be printed to the screen
'''
hex_str = ''
count = 0
for i in range(0, len(string), 2):
hex_str += "0x{}{} ".format(string[i].upper(), string[i+1].upper())
count += 1
if count % bytes_per_line == 0:
hex_str += '\n'
return hex_str
def main(argv):
"""
Gets the mission name and port number from command line arguments
Opens up the receive port and listens for telemetry messages
Each message is decoded into an EDS Object and the object's contents are printed to the screen
"""
try:
opts, args = getopt.getopt(argv, "hp:", ["port="])
except getopt.GetoptError:
print("tlm_decode.py -p <port number=1235>")
sys.exit(2)
udp_recv_port = 1235
mission = "@CFS_EDS_GS_MISSION_NAME@".lower()
for opt, arg in opts:
if opt == '-h':
print("tlm_decode.py -p <port number=1235>")
sys.exit()
elif opt in ('-p', '--port'):
udp_recv_port = int(arg)
try:
# Initialize databases
eds_db = EdsLib.Database(mission)
intf_db = CFE_MissionLib.Database(mission, eds_db)
except RuntimeError:
print("tlm_decode is not properly configured")
sys.exit(2)
print("Listening in on port {} for messages".format(udp_recv_port))
# Init udp socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', udp_recv_port))
# Wait for UDP messages
while True:
try:
# Receive message
datagram, host = sock.recvfrom(4096) # buffer size is 1024 bytes
# Ignore datagram if it is not long enough (i.e it doesnt contain tlm header)
if len(datagram) < 6:
continue
print(f"Telemetry Packet From: {host[0]}:UDP {host[1]}, {8*len(datagram)} bits :")
print(hex_string(datagram.hex(), 16))
eds_entry, eds_object = decode_message(mission, intf_db, datagram)
display_entries(eds_db, eds_object, eds_entry.Name)
print()
print()
# Handle errors
except socket.error:
print('Ignored socket error.')
time.sleep(1)
if __name__ == "__main__":
main(sys.argv[1:])
| 31.563636
| 101
| 0.670699
|
4450898c935a0afe1412ebff94dab7e24476d13f
| 3,863
|
py
|
Python
|
PyWidget3/shape/ellipse.py
|
galaxyjim/PyWidget3
|
eb3d269e4e7d8a68ca957d32bc704e31eca20015
|
[
"BSD-3-Clause"
] | null | null | null |
PyWidget3/shape/ellipse.py
|
galaxyjim/PyWidget3
|
eb3d269e4e7d8a68ca957d32bc704e31eca20015
|
[
"BSD-3-Clause"
] | 23
|
2015-03-14T00:03:11.000Z
|
2015-04-10T23:24:21.000Z
|
PyWidget3/shape/ellipse.py
|
galaxyjim/PyWidget3
|
eb3d269e4e7d8a68ca957d32bc704e31eca20015
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2009 Nicolas Rougier
# Copyright (c) 2015 James Gaston
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import math
import pyglet
from pyglet.gl import *
from .shape import Shape, arc_circle
class Ellipse(Shape):
''' Ellipse shape.
This is a class for rendering an ellipse.
'''
# _________________________________________________________________ __init__
def __init__(self, theta1=0, theta2=360, *args, **kwargs):
'''Create an ellipse '''
self._theta1 = theta1
self._theta2 = theta2
Shape.__init__(self, *args, **kwargs)
self._fill_mode = GL_TRIANGLES
self._line_mode = GL_LINE_LOOP
self._update_position()
self._update_shape()
# __________________________________________________________________ __str__
def __str__(self):
s = '<Ellipse %dx%d+%d+%d>' % (self._width,self._height,self._x,self._y)
return s
# ___________________________________________________________________ theta1
def _get_theta1(self):
return self._theta1
def _set_theta1(self, theta1):
self._theta1 = theta1
self._update_shape()
theta1 = property(_get_theta1, _set_theta1,
doc='''Starting angle in degrees
:type: float
''')
# ___________________________________________________________________ theta2
def _get_theta2(self):
return self._theta2
def _set_theta2(self, theta2):
self._theta2 = theta2
self._update_shape()
theta2 = property(_get_theta2, _set_theta2,
doc='''Ending angle in degrees
:type: float
''')
# ________________________________________________________ generate_vertices
def generate_vertices(self):
''' '''
x,y,w,h = 0,0,self._width-1,self._height-1
vertices = arc_circle (x+w/2,y+h/2,w/2,h/2,self._theta1,self._theta2,5)
v = []
for i in range(len(vertices)-1):
v += [vertices[i],]
v += [vertices[i+1],]
v += [(w/2,h/2),]
if math.fmod(self._theta1,360) != math.fmod(self._theta2,360):
vertices += [(w/2,h/2),]
return vertices, v
| 37.872549
| 80
| 0.665286
|
9fafc0deb551ba4e10e2569b60c9a6826c2b71fd
| 20,852
|
py
|
Python
|
src/sentry/migrations/0069_auto__add_lostpasswordhash.py
|
NickPresta/sentry
|
ed3e6034ef560e18f392ba30071de860557b7b43
|
[
"BSD-3-Clause"
] | 2
|
2015-10-14T12:45:32.000Z
|
2016-01-27T03:24:43.000Z
|
src/sentry/migrations/0069_auto__add_lostpasswordhash.py
|
NickPresta/sentry
|
ed3e6034ef560e18f392ba30071de860557b7b43
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/migrations/0069_auto__add_lostpasswordhash.py
|
NickPresta/sentry
|
ed3e6034ef560e18f392ba30071de860557b7b43
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LostPasswordHash'
db.create_table('sentry_lostpasswordhash', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
('hash', self.gf('django.db.models.fields.CharField')(max_length=32)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['LostPasswordHash'])
def backwards(self, orm):
# Deleting model 'LostPasswordHash'
db.delete_table('sentry_lostpasswordhash')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['auth.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| 80.2
| 182
| 0.55846
|
94a3fd07e2eca662089481ffb765beac7619a48d
| 2,383
|
py
|
Python
|
projects/eurostat/handlers/PackageFiles.py
|
oboforty/geogine
|
b2f865fa351f01608088325b3a43ce3d0a24df3b
|
[
"MIT"
] | null | null | null |
projects/eurostat/handlers/PackageFiles.py
|
oboforty/geogine
|
b2f865fa351f01608088325b3a43ce3d0a24df3b
|
[
"MIT"
] | null | null | null |
projects/eurostat/handlers/PackageFiles.py
|
oboforty/geogine
|
b2f865fa351f01608088325b3a43ce3d0a24df3b
|
[
"MIT"
] | null | null | null |
import os
import shutil
from geoprocessing.core import EmePiperApp as pipe
from geoprocessing.entities import GeogineObj, OriginReference, FileLabel
class PackageFiles(pipe.SyncDataProcesser):
requires = [
('dae file', FileLabel),
('fbx file', FileLabel)
]
provides = 'final product', str
# 'fbx file', str
def process_one(self, val, dtype):
pass
# @TODO: have a config for this as well: tmp folder location, dispose tmp folder? etc ...
def process_data_group(self, dae: FileLabel, fbx: FileLabel):
# TODO: ITT:
# todo: how to MERGE multiple collada into 1?
# todo: PREVIOUS step?
# todo: OFFSET_WORLD is renamed, and is fed to a new Processer
# todo: while LOCAL is fed into this processer
bundles = self.conf.conf.values()
bundles = filter(lambda bu: self._filter_bundle(bu, dae.geo), bundles)
for bundle in bundles:
self._copyfile(bundle['path'], dae.geo, dae.filepath)
#self._copyfile(bundle['path'], geo.iso, fbx_fn.label)
# delete tmp files
self.mark_produced('single country', 1)
yield 'zip_fn.zip'
def dispose(self):
dir_ = 'tmp'
#shutil.rmtree(dir_)
#os.makedirs(dir_)
def _filter_bundle(self, bundle: dict, obj: GeogineObj):
if not bundle['enabled']:
return False
# filter out
for _filtattr, _filtval in bundle.items():
if not _filtattr.startswith('filter_'):
continue
if _filtattr == 'filter_origin_reference':
_filtval = OriginReference(_filtval)
attr = _filtattr[7:]
value = getattr(obj, attr)
# check filter condition
if isinstance(_filtval, list):
filtered_out = value not in _filtval
else:
filtered_out = value != _filtval
if filtered_out:
return False
return True
def _copyfile(self, path, geo: GeogineObj, tmp_filename):
fn1 = path.format(**geo.view, filename=os.path.basename(tmp_filename))
dir1 = os.path.dirname(fn1)
if not os.path.exists(dir1):
os.makedirs(dir1)
shutil.copy(tmp_filename, fn1)
| 29.419753
| 94
| 0.579102
|
a549066fddd2ec9b0e3ea6ce292ad3e419489071
| 1,650
|
py
|
Python
|
setup.py
|
messari/messari-python-api-private
|
b027b691b570bc804e4b55a95cf1046ed1dbde86
|
[
"MIT"
] | 2
|
2022-03-16T06:28:57.000Z
|
2022-03-22T19:48:22.000Z
|
setup.py
|
messari/messari-python-api-private
|
b027b691b570bc804e4b55a95cf1046ed1dbde86
|
[
"MIT"
] | 2
|
2022-01-06T07:28:23.000Z
|
2022-01-31T03:55:20.000Z
|
setup.py
|
messari/messari-python-api-private
|
b027b691b570bc804e4b55a95cf1046ed1dbde86
|
[
"MIT"
] | 2
|
2022-03-16T06:29:00.000Z
|
2022-03-25T09:03:20.000Z
|
from setuptools import setup
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name='messari',
version='0.0.1',
packages=['messari',
'messari.defillama',
'messari.fred',
'messari.messari',
'messari.tokenterminal',
'messari.deepdao',
'messari.eventmonitor',
'messari.nfts',
'messari.nfts.upshot',
'messari.nfts.nftpricefloor',
'messari.nfts.nonfungible',
'messari.nfts.opensea',
'messari.blockexplorers',
'messari.blockexplorers.arbiscan',
'messari.blockexplorers.bscscan',
'messari.blockexplorers.etherscan',
'messari.blockexplorers.ftmscan',
'messari.blockexplorers.optimisticetherscan',
'messari.blockexplorers.polygonscan',
'messari.blockexplorers.snowtrace',
'messari.blockexplorers.solscan'],
url='',
long_description=long_description,
long_description_content_type='text/markdown',
package_data={'messari': ['mappings/messari_to_dl.json', 'mappings/messari_to_tt.json']},
license='MIT`',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
author='Roberto Talamas, Michael Kremer',
author_email='roberto.talamas@gmail.com, kremeremichael@gmail.com',
description='Messari API'
)
| 35.106383
| 93
| 0.610303
|
932788f2f342d9b5659b920dfff323e0a9d67c98
| 7,233
|
py
|
Python
|
src/ITN/srmg/common/util.py
|
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
|
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
|
[
"MIT"
] | 1
|
2022-03-24T06:54:36.000Z
|
2022-03-24T06:54:36.000Z
|
src/ITN/srmg/common/util.py
|
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
|
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
|
[
"MIT"
] | null | null | null |
src/ITN/srmg/common/util.py
|
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
|
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
'''
Author: Shuangchi He / Yulv
Email: yulvchi@qq.com
Date: 2022-03-19 10:33:38
Motto: Entities should not be multiplied unnecessarily.
LastEditors: Shuangchi He
LastEditTime: 2022-03-23 01:03:25
FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/common/util.py
Description: Modify here please
Init from https://github.com/yuanwei1989/plane-detection Author: Yuanwei Li (3 Oct 2018)
# Copyright (c) 2006-2017, Nina Milone, Bishesh Kanal, Benjamin Hou
# Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine
# Produced at Biomedical Image Analysis Group
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
Statistics on Riemannian Manifolds and Groups
---------------------------------------------
This is a set of codes to compare the computing of the different types of means
on Lie groups. These codes can be used to reproduce the experiments illustrated in the
video developed for the MICCAI Educational challenge 2014, available at:
url of the video.
:Authors:
`Nina Miolane <website>`
`Bishesh Khanal <website>`
:Organization:
Asclepios Team, INRIA Sophia Antipolis.
:Version:
2017.07.05
Requirements
------------
* `Numpy 1.11 <http://www.numpy.org>`_
Notes
-----
References
----------
(1) Defining a mean on Lie group.
Nina Miolane. Medical Imaging. 2013. <hal-00938320>
'''
import numpy
import math
EPS = 1e-5
def rotVect(R):
"""
Take a rotation matrix and convert it into a vector form.
Input:
R: Rotation matrix of size 3x3
Output:
r: vector with three elements. Norm of r is the rotation angle about an
axis which is the vector r itself.
"""
M = numpy.dot(R,R.T) - numpy.eye(3)
if (numpy.trace(M)>1e-20): # do we have numeric precision less than 1e-12?
R=getClosestRotMat(R)
c = (numpy.trace(R)-1.0)/2.0
if (c > 1):
c=1
if (c < -1):
c=-1
theta = numpy.arccos(c);
if (theta<EPS):
fact = 0.5 * (1.0 + theta**2 / 6.0)
Sr = fact * ( R - R.T)
r = numpy.array([Sr[2,1], Sr[0,2], Sr[1,0]]).T;
elif abs(theta-math.pi)<EPS:
print('attention r') # a remplir ?
else:
fact = 0.5 * theta / numpy.sin(theta)
Sr = fact * (R-R.T)
r=numpy.array([Sr[2,1], Sr[0,2], Sr[1,0]]).T;
return r
def getClosestRotMat(M):
"""
Computation of the closest rotation matrix R of a given matrix M
(avoids computational errors.)
Attributes:
M: rotation matrix
Return:
R: rotation matrix
"""
u , s , v = numpy.linalg.svd(M)
R = numpy.dot(u,v)
s = numpy.eye(3) * s
if (numpy.linalg.det(R)<0):
s[0,0] = 1
s[1,1] = 1
s[2,2] = -1
R=numpy.dot(numpy.dot(u,s),v)
return R
def rotMat(r):
"""
Converts rotation vector r to rotation matrix
Attributes:
r: rotation vector
Return:
R: rotation matrix
"""
r=regRot(r)
theta=numpy.linalg.norm(r)
Sr=skew(r)
if (theta<EPS): # if theta is small use Taylor expansion.
s = 1.0 - ((theta**2)/6.0) # to avoid numerical problems.
k = 1.0 / 2.0 - theta**2
R = numpy.eye(3) + s * Sr + k * Sr**2
else:
R = numpy.eye(3) + (numpy.sin(theta)/theta)*Sr + ((1-numpy.cos(theta))/(theta**2))*(numpy.dot(Sr,Sr))
return R
def jRotL(r):
"""
Differentials of the left and right translations for SO(3) in the principal chart
Attributes:
r: ?????
Return:
Jl: ?????
"""
r=regRot(r)
theta=numpy.linalg.norm(r)
if theta<EPS:
phi=1.0-(theta**2)/12.0
w=1.0/12.0+theta**2/720.0
elif (numpy.abs((theta-math.pi))<EPS):
phi=theta*(math.pi-theta)/4.0
w=(1.0-phi)/theta**2
else:
phi=(theta/2.0)/(numpy.tan(theta/2.0))
w=(1.0-phi)/theta**2
Jl=phi*numpy.eye(3) + (w*(numpy.outer(r,r))) + skew(r)/2.0
return Jl
def jRotR(r):
"""
Differentials of the left and right translations for SO(3) in the principal chart
Attributes:
r: ?????
Return:
Jl: ?????
"""
r=regRot(r)
theta=numpy.linalg.norm(r)
if theta<EPS:
phi=1.0-(theta**2)/12.0
w=1.0/12.0+theta**2/720.0
elif (numpy.abs((theta-math.pi))<EPS):
phi=theta*(math.pi-theta)/4.0
w=(1.0-phi)/theta**2
else:
phi=(theta/2.0)/(numpy.tan(theta/2.0))
w=(1.0-phi)/theta**2
Jr=phi*numpy.eye(3) + (w*(numpy.outer(r,r))) - skew(r)/2.0
return Jr
def skew(r):
"""
Calculates the Skew matrix
Attributes:
r: vector
Return:
S: Skew symmetric matrix
"""
S = numpy.array( [ [0, -r[2], r[1] ],
[ r[2], 0, -r[0] ],
[ -r[1], r[0], 0] ])
return S
def regRot(r):
"""
This function limits the angle of rotation between 0 to 2*pi
Attributes:
r: a rotation vector
Return:
u: a normalized rotation vector
"""
phi = numpy.linalg.norm(r)
u = r
if (phi != 0):
k0=numpy.double(numpy.floor( (phi/(2.0*math.pi)) + (1.0/2.0)) )
u=(phi-2.0*math.pi*k0)*r/phi
return u
def unifRnd():
"""
This function limits the angle of rotation between 0 to 2*pi
Attributes:
None
Return:
f: a random normalized SE3 vector
"""
f = numpy.zeros(6)
f[0:3] = regRot( numpy.random.rand(3) * 2 - 1 ) # rotation
f[3:6] = numpy.random.rand(3) * 2 - 1 # translation
return f
| 29.402439
| 109
| 0.597954
|
b325a6e6b06e03e5760616b75b1a6e3d90bb8b15
| 1,094
|
py
|
Python
|
common/src/stack/command/stack/commands/list/host/key/__init__.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 123
|
2015-05-12T23:36:45.000Z
|
2017-07-05T23:26:57.000Z
|
common/src/stack/command/stack/commands/list/host/key/__init__.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 177
|
2015-06-05T19:17:47.000Z
|
2017-07-07T17:57:24.000Z
|
common/src/stack/command/stack/commands/list/host/key/__init__.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 32
|
2015-06-07T02:25:03.000Z
|
2017-06-23T07:35:35.000Z
|
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
class Command(stack.commands.list.host.command):
"""
List the public keys for hosts.
<arg optional='1' type='string' name='host' repeat='1'>
Zero, one or more host names. If no host names are supplied,
information for all hosts will be listed.
</arg>
"""
def run(self, params, args):
self.beginOutput()
for host in self.getHostnames(args):
rows = self.db.select("""
id, public_key from public_keys
where node = (select id from nodes where name = %s)
""", (host,)
)
for key_id, key in rows:
for line in key.split('\n'):
self.addOutput(host, (key_id, line))
self.endOutput(header=['host', 'id', 'public key'], trimOwner=False)
| 26.682927
| 71
| 0.6883
|
a873800ee3061742a9cee90d3b366e407b3357af
| 1,041
|
py
|
Python
|
bridge_evolver.py
|
ihasdapie/Evolutionary_Bridge_Designer
|
45d81af60d64e2a7cb29a278e6c21b062e14be94
|
[
"MIT"
] | null | null | null |
bridge_evolver.py
|
ihasdapie/Evolutionary_Bridge_Designer
|
45d81af60d64e2a7cb29a278e6c21b062e14be94
|
[
"MIT"
] | null | null | null |
bridge_evolver.py
|
ihasdapie/Evolutionary_Bridge_Designer
|
45d81af60d64e2a7cb29a278e6c21b062e14be94
|
[
"MIT"
] | 1
|
2020-12-06T05:44:37.000Z
|
2020-12-06T05:44:37.000Z
|
from bridgelib2 import *
import math as math
import random as random
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
mandatory_length = 1280
# b1 = Bridge(1.27, 100.54, mandatory_length, 105, 2, 2, 1, 55, 91.45)
b1 = Bridge(1.27, 20.54, mandatory_length, 105, 2, 2, 1, 55, 91.45)
list_of_bearing = []
num_alphas = 1 #TODO: Define what an alpha is
legend_titles = []
for i in range(num_alphas):
list_of_bearing.append([])
for i in range(num_alphas):
# b_in = evolve(b1, 10000, i*4)#starting bridge, number of generation, alpha value
b_in = evolve(b1, 100000, 20)#starting bridge, number of generation, alpha value
b_final = b_in[0];
list_of_bearing[i] = b_in[1];
# print(list_of_bearing)
for i in range(num_alphas):
plt.plot(list_of_bearing[i])
legend_titles.append('Iteration '+str(i));
#plt.legend(legend_titles)
plt.xlabel('Generation Number')
plt.ylabel('Load Bearing Ability (N)')
plt.title('Doing Some Evolution on Bridges (Asexual Reproduction)')
plt.grid(True)
plt.show();
b_final.report()
| 24.209302
| 83
| 0.730067
|
8e47ecf80450e454dc3b0ec2524dde3f024c0596
| 871
|
py
|
Python
|
synthetics/maths.py
|
kristianeschenburg/DeepImageRecon
|
5192a7e5e535778d248878c2fbd4476a951490d9
|
[
"Apache-2.0"
] | null | null | null |
synthetics/maths.py
|
kristianeschenburg/DeepImageRecon
|
5192a7e5e535778d248878c2fbd4476a951490d9
|
[
"Apache-2.0"
] | null | null | null |
synthetics/maths.py
|
kristianeschenburg/DeepImageRecon
|
5192a7e5e535778d248878c2fbd4476a951490d9
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import nibabel as nb
def add(original, in_images, weights, filename=None):
"""
Method to combine linearly combine set of images.
Parameters:
- - - - -
original: string
input image file to add corruption to
in_images: list, string
images to corrupt original image with
weights: array, float
weights to apply to each corrupting image
filename: string
output filename of fully corrupted image
Returns:
- - - -
corrupted: array
noisy, corrupted original image volume
"""
og = nb.load(original)
og_data = og.get_data()
corrupted = np.zeros((og_data.shape)) + og_data
for ig, weight in zip(in_images, weights):
ig_obj = nb.load(ig)
ig_data = ig_obj.get_data()
corrupted += (ig_data*weight)
return [og_data, corrupted]
| 23.540541
| 53
| 0.637199
|
feb53a77964e6060664f35701ff4d84f2ae796a1
| 1,170
|
py
|
Python
|
tests/nlp_reasoning/test_data_utils.py
|
edmundmills/nlp-reasoning
|
7b804534d53287eb08db85d5f454bc65afc6834b
|
[
"MIT"
] | null | null | null |
tests/nlp_reasoning/test_data_utils.py
|
edmundmills/nlp-reasoning
|
7b804534d53287eb08db85d5f454bc65afc6834b
|
[
"MIT"
] | null | null | null |
tests/nlp_reasoning/test_data_utils.py
|
edmundmills/nlp-reasoning
|
7b804534d53287eb08db85d5f454bc65afc6834b
|
[
"MIT"
] | null | null | null |
from nlp_reasoning.data_utils import *
def test_parse_data():
generator = parse_data('data/Sarcasm_Headlines_Dataset_v2.json')
assert(next(generator) is not None)
class TestCleanText:
def test_already_clean(self):
text = 'I like to go to the store'
assert(clean_text(text) == text)
def test_empty(self):
text = ''
assert(clean_text(text) == text)
def test_none(self):
text = None
assert(clean_text(text) == text)
def test_url(self):
text = 'Test Text: '
url = 'https://www.google.com'
assert(clean_text(text + url) == text)
class TestTrimTrailingSentence:
def test_empty_string(self):
text = ''
assert(trim_trailing_sentence(text) == text)
def test_no_trail(self):
text = 'A full sentence.'
assert(trim_trailing_sentence(text) == text)
def test_two_sentences(self):
text = 'A full sentence. Another.'
assert(trim_trailing_sentence(text) == text)
def test_trail(self):
text = 'A full sentence.'
trail = ' A trail'
assert(trim_trailing_sentence(text + trail) == text)
| 28.536585
| 68
| 0.622222
|
001726d8e04c5eebe5f249e7ac5fd57252e20cdd
| 499
|
py
|
Python
|
Python_Exercicios/Mundo1/Condições em Python (if..else)/python_032.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
Python_Exercicios/Mundo1/Condições em Python (if..else)/python_032.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
Python_Exercicios/Mundo1/Condições em Python (if..else)/python_032.py
|
jbauermanncode/Curso_Em_Video_Python
|
330c207d7bed4e663fe1b9ab433ab57a9828b7f1
|
[
"MIT"
] | null | null | null |
'''
Faça um programa que leia um ano qualquer e mostre se ele é bissexto.
'''
# Importar a função date do módulo datetime
from datetime import date
# Ler ano
ano = int(input("Que ano você quer analisar? Coloque 0 para analisar: "))
# Para saber sobre o ano atual
if ano == 0:
ano = date.today().year
# Estrutura Condicional if/else
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('O ano {} é BISSEXTO.'.format(ano))
else:
print('O ano {} não é BISSEXTO.'.format(ano))
| 26.263158
| 73
| 0.659319
|
b5c3db598dfe77996183e8bd93bd21fa5648c45e
| 2,871
|
py
|
Python
|
lib/python2.6/site-packages/Sphinx-1.1.3-py2.6.egg/sphinx/util/inspect.py
|
stenwt/mediagoblin-quickstart-openshift
|
4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2
|
[
"CC0-1.0"
] | 1
|
2016-02-10T18:22:42.000Z
|
2016-02-10T18:22:42.000Z
|
docs/build/sphinx/sphinx/util/inspect.py
|
mjtamlyn/django-braces
|
8adc9bc4f5139e3d032d4e38657bf86413388b78
|
[
"BSD-3-Clause"
] | 1
|
2016-04-19T13:03:17.000Z
|
2016-04-19T13:03:17.000Z
|
docs/build/sphinx/sphinx/util/inspect.py
|
mjtamlyn/django-braces
|
8adc9bc4f5139e3d032d4e38657bf86413388b78
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sphinx.util.inspect
~~~~~~~~~~~~~~~~~~~
Helpers for inspecting Python modules.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
# this imports the standard library inspect module without resorting to
# relatively import this module
inspect = __import__('inspect')
from sphinx.util import force_decode
from sphinx.util.pycompat import bytes
if sys.version_info >= (2, 5):
from functools import partial
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.im_func
parts = 0, ()
if type(func) is partial:
parts = len(func.args), func.keywords.keys()
func = func.func
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
args, varargs, varkw = inspect.getargs(func.func_code)
func_defaults = func.func_defaults
if func_defaults:
func_defaults = list(func_defaults)
if parts[0]:
args = args[parts[0]:]
if parts[1]:
for arg in parts[1]:
i = args.index(arg) - len(args)
del args[i]
try:
del func_defaults[i]
except IndexError:
pass
return inspect.ArgSpec(args, varargs, varkw, func_defaults)
else:
getargspec = inspect.getargspec
def isdescriptor(x):
"""Check if the object is some kind of descriptor."""
for item in '__get__', '__set__', '__delete__':
if hasattr(safe_getattr(x, item, None), '__call__'):
return True
return False
def safe_getattr(obj, name, *defargs):
"""A getattr() that turns all exceptions into AttributeErrors."""
try:
return getattr(obj, name, *defargs)
except Exception:
# this is a catch-all for all the weird things that some modules do
# with attribute access
if defargs:
return defargs[0]
raise AttributeError(name)
def safe_getmembers(object, predicate=None):
"""A version of inspect.getmembers() that uses safe_getattr()."""
results = []
for key in dir(object):
try:
value = safe_getattr(object, key, None)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
def safe_repr(object):
"""A repr() implementation that returns text safe to use in reST context."""
try:
s = repr(object)
except Exception:
raise ValueError
if isinstance(s, bytes):
return force_decode(s, None).replace('\n', ' ')
return s.replace('\n', ' ')
| 29.90625
| 80
| 0.603622
|
7466580fc173fa42b1d85f7ff630396e4d824a99
| 7,400
|
py
|
Python
|
geoevents/operations/views.py
|
mcenirm/geoevents
|
f78cc09b8bcc8c6e4eee0d1de14e5becad7d49e6
|
[
"MIT"
] | 25
|
2015-01-06T15:37:31.000Z
|
2020-12-10T19:05:22.000Z
|
geoevents/operations/views.py
|
mcenirm/geoevents
|
f78cc09b8bcc8c6e4eee0d1de14e5becad7d49e6
|
[
"MIT"
] | 2
|
2015-01-31T02:36:58.000Z
|
2015-02-01T00:11:15.000Z
|
geoevents/operations/views.py
|
mcenirm/geoevents
|
f78cc09b8bcc8c6e4eee0d1de14e5becad7d49e6
|
[
"MIT"
] | 5
|
2016-01-01T15:04:49.000Z
|
2019-05-30T23:34:30.000Z
|
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django.contrib import messages
from django.core import serializers
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, DeleteView
from geoevents.common import menu, paginate
from geoevents.core.views import CreateViewWithMessages, PageHeaderMixin, UpdateViewWithMessages
from geoevents.core.models import Setting
from geoevents.maps.models import Map
from geoevents.operations.forms import EventForm, ServiceForm, LessonLearnedBasicForm
from geoevents.operations.models import Event, Deployment, Service, ServiceType, SitRep
from geoevents.timeline.forms import TimelineItemForm
class FormParametersFromGetParamsMixin(object):
"""
This mixin sets the initial value of the event field based on the event url parameter
"""
def set_initial_params(self):
n = {}
object_keys = None
if self.form_class:
object_keys = self.form_class.base_fields.keys()
elif self.model:
object_keys = [f.name for f in self.model._meta.fields]
for k, v in self.request.GET.items():
if k in object_keys:
n[k] = v
return n
def get(self, request, *args, **kwargs):
self.initial = self.set_initial_params()
self.object = None
return super(FormParametersFromGetParamsMixin, self).get(request, *args, **kwargs)
class EventPage(DetailView):
"""
View used for Event detail views.
"""
template_name = 'incident-detail.html'
model = Event
context_object_name = 'item'
def get_context_data(self, **kwargs):
cv = super(EventPage, self).get_context_data(**kwargs)
cv['lesson_learned_form'] = LessonLearnedBasicForm
cv['timeline_item_form'] = TimelineItemForm
return cv
class EventsDashboard(ListView):
context_object_name = 'items'
model = Event
paginate_by = 25
template_name = 'events-list-dashboard.html'
queryset = Event.active_objects.all().order_by('name')
try:
map = Map.objects.get(title='Dashboard')
except:
try:
map = Map.objects.get(title='Base Map')
except:
map = None
def get_context_data(self, **kwargs):
cv = super(EventsDashboard, self).get_context_data(**kwargs)
cv['map'] = self.map
cv['active_deployments'] = Deployment.active_objects.all()
cv['latest_sitreps'] = SitRep.objects.all().order_by('-created')[:5]
low_priority_events = ['exercise', 'special event']
page_categories = ['monitoring', 'low_priority_events', 'active']
for category in page_categories:
cv[category] = []
def categorize(i):
if i.posture.lower() == 'monitoring' and i.event_type.lower() not in low_priority_events:
cv['monitoring'].append(i)
elif i.event_type.lower() in low_priority_events:
cv['low_priority_events'].append(i)
else:
cv['active'].append(i)
map(categorize, self.object_list)
return cv
class SitRepCreateView(CreateViewWithMessages):
def form_valid(self, form):
form.instance.owner = self.request.user
return super(SitRepCreateView, self).form_valid(form)
#TODO: Write test cases for MustBeOwnerDeleteView
class MustBeOwnerDeleteView(DeleteView):
"""
Only allows the owner of an object, or a superuser to delete a record. The "owner" field can be passed in through owner_field variable.
"""
owner_field = 'owner'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if (request.user == self.object.__getattribute__(self.owner_field) or self.request.user.is_superuser):
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
else:
raise PermissionDenied
class NewDeploymentFromIncident(PageHeaderMixin, FormParametersFromGetParamsMixin, CreateView):
"""
Sets the initial value of the event field based on the event url parameter.
"""
page_header = 'Create Deployment'
class ServiceLists(ListView):
paginate_by = 25
model = Service
context_object_name = 'items'
allow_empty = True
def get_queryset(self):
kwargs = {}
fields = [f.name for f in self.model._meta.fields]
exclude_fields = []
def filter_fields(f):
'''Returns True if the name of the field is in the GET parameter'''
return True if (f in self.request.GET and f not in exclude_fields) else False
if 'event' in self.request.GET:
kwargs['event__id__exact'] = self.request.GET.get('event')
object_keys = filter(filter_fields, fields)
for k in object_keys:
kwargs[k] = self.request.GET.get(k)
return Service.objects.filter(**kwargs)
class CreateService(CreateViewWithMessages):
form_class = ServiceForm
template_name = 'service-manage.html'
def form_valid(self, form):
self.object = form.save()
if self.kwargs.get('model') and self.kwargs.get('model_pk'):
e = Event.objects.get(id=self.kwargs.get('model_pk'))
e.services.add(self.object)
e.save()
return super(CreateService, self).form_valid(form)
def get_context_data(self, **kwargs):
cv = super(CreateService, self).get_context_data(**kwargs)
cv['model'] = self.kwargs['model']
return cv
class KMLReponse(DetailView):
def render_to_response(self, context, **response_kwargs):
return super(KMLReponse, self).render_to_response(
context,
mimetype='application/vnd.google-earth.kml+xml',
**response_kwargs)
class DeploymentView(DetailView):
try:
map = Map.objects.get(title='Base Map')
except:
map = None
def get_context_data(self, **kwargs):
cv = super(DeploymentView, self).get_context_data(**kwargs)
cv['map'] = self.map
return cv
def service_type(request, name):
service_type = ServiceType.objects.filter(name__iexact=name).get()
if not service_type: return Http404
return HttpResponse(service_type.description)
def service_types(request):
x = serializers.serialize("json", ServiceType.objects.all())
return HttpResponse(x)
def view_service(request, pk):
service = get_object_or_404(Service, pk=pk)
return render_to_response('service-detail.html', {'menu_items': menu('Services'), 'item': service},
RequestContext(request))
def view_services(request):
services_list = Service.objects.all()
services = paginate(services_list, 50, request.GET.get('page'))
return render_to_response('service-list.html', {'menu_items': menu('Services'), 'items': services},
RequestContext(request))
| 33.183857
| 140
| 0.673784
|
cd48b64012d408a4cb5f4834cae8cd645e76d359
| 3,100
|
py
|
Python
|
Contest/LeetCode/WeeklyContest193/3.py
|
WatsonWangZh/CodingPractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 11
|
2019-09-01T22:36:00.000Z
|
2021-11-08T08:57:20.000Z
|
Contest/LeetCode/WeeklyContest193/3.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | null | null | null |
Contest/LeetCode/WeeklyContest193/3.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 2
|
2020-05-27T14:58:52.000Z
|
2020-05-27T15:04:17.000Z
|
# 5455. Minimum Number of Days to Make m Bouquets
# User Accepted:1394
# User Tried:3068
# Total Accepted:1442
# Total Submissions:6104
# Difficulty:Medium
# Given an integer array bloomDay, an integer m and an integer k.
# We need to make m bouquets. To make a bouquet, you need to use k adjacent flowers from the garden.
# The garden consists of n flowers, the ith flower will bloom in the bloomDay[i] and then can be used in exactly one bouquet.
# Return the minimum number of days you need to wait to be able to make m bouquets from the garden. If it is impossible to make m bouquets return -1.
# Example 1:
# Input: bloomDay = [1,10,3,10,2], m = 3, k = 1
# Output: 3
# Explanation: Let's see what happened in the first three days. x means flower bloomed and _ means flower didn't bloom in the garden.
# We need 3 bouquets each should contain 1 flower.
# After day 1: [x, _, _, _, _] // we can only make one bouquet.
# After day 2: [x, _, _, _, x] // we can only make two bouquets.
# After day 3: [x, _, x, _, x] // we can make 3 bouquets. The answer is 3.
# Example 2:
# Input: bloomDay = [1,10,3,10,2], m = 3, k = 2
# Output: -1
# Explanation: We need 3 bouquets each has 2 flowers, that means we need 6 flowers. We only have 5 flowers so it is impossible to get the needed bouquets and we return -1.
# Example 3:
# Input: bloomDay = [7,7,7,7,12,7,7], m = 2, k = 3
# Output: 12
# Explanation: We need 2 bouquets each should have 3 flowers.
# Here's the garden after the 7 and 12 days:
# After day 7: [x, x, x, x, _, x, x]
# We can make one bouquet of the first three flowers that bloomed. We cannot make another bouquet from the last three flowers that bloomed because they are not adjacent.
# After day 12: [x, x, x, x, x, x, x]
# It is obvious that we can make two bouquets in different ways.
# Example 4:
# Input: bloomDay = [1000000000,1000000000], m = 1, k = 1
# Output: 1000000000
# Explanation: You need to wait 1000000000 days to have a flower ready for a bouquet.
# Example 5:
# Input: bloomDay = [1,10,2,9,3,8,4,7,5,6], m = 4, k = 2
# Output: 9
# Constraints:
# bloomDay.length == n
# 1 <= n <= 10^5
# 1 <= bloomDay[i] <= 10^9
# 1 <= m <= 10^6
# 1 <= k <= n
class Solution:
def minDays(self, bloomDay: List[int], m: int, k: int) -> int:
# Got TLE.
if len(bloomDay) < m*k:
return -1
tmp = bloomDay[:]
sortedbloomDay= sorted(bloomDay)
if k == 1:
return sortedbloomDay[m-1]
def check(lst, m, k, n):
cnt = 0
for i in range(len(lst)):
if lst[i] <= m:
cnt += 1
if cnt >= k:
n -= 1
cnt -= k
else:
cnt = 0
if n == 0:
return True
return False
for ele in sorted(tmp):
if check(bloomDay, ele, k, m):
return ele
else:
checked[ele] = True
return -1
# TODO
| 35.227273
| 171
| 0.582581
|
2b6447d506661e648b4e777042d5d5acdcb7dc25
| 13,693
|
py
|
Python
|
mkdocs/commands/build.py
|
subbu588/Python-code-jvt1
|
5b9e3d836692d36756fd240b9599c58039b4b822
|
[
"BSD-2-Clause"
] | 9
|
2019-04-06T09:57:53.000Z
|
2022-03-17T09:48:50.000Z
|
mkdocs/commands/build.py
|
hufyhang/mkdocs
|
4c4ef7fa7224713e17d479742c2df1b2fc78edcb
|
[
"BSD-2-Clause"
] | null | null | null |
mkdocs/commands/build.py
|
hufyhang/mkdocs
|
4c4ef7fa7224713e17d479742c2df1b2fc78edcb
|
[
"BSD-2-Clause"
] | 9
|
2018-03-15T06:00:26.000Z
|
2022-03-10T14:54:36.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from datetime import datetime
from calendar import timegm
import io
import logging
import os
from jinja2.exceptions import TemplateNotFound
import jinja2
import json
from mkdocs import nav, search, utils
from mkdocs.utils import filters
from mkdocs.relative_path_ext import RelativePathExtension
import mkdocs
class DuplicateFilter(object):
''' Avoid logging duplicate messages. '''
def __init__(self):
self.msgs = set()
def filter(self, record):
rv = record.msg not in self.msgs
self.msgs.add(record.msg)
return rv
log = logging.getLogger(__name__)
log.addFilter(DuplicateFilter())
def get_complete_paths(config, page):
"""
Return the complete input/output paths for the supplied page.
"""
input_path = os.path.join(config['docs_dir'], page.input_path)
output_path = os.path.join(config['site_dir'], page.output_path)
return input_path, output_path
def convert_markdown(markdown_source, config, site_navigation=None):
"""
Convert the Markdown source file to HTML as per the config and
site_navigation. Return a tuple of the HTML as a string, the parsed table
of contents, and a dictionary of any metadata that was specified in the
Markdown file.
"""
extensions = [
RelativePathExtension(site_navigation, config['strict'])
] + config['markdown_extensions']
return utils.convert_markdown(
markdown_source=markdown_source,
extensions=extensions,
extension_configs=config['mdx_configs']
)
def get_global_context(nav, config):
"""
Given the SiteNavigation and config, generate the context which is relevant
to app pages.
"""
extra_javascript = utils.create_media_urls(nav, config['extra_javascript'])
extra_css = utils.create_media_urls(nav, config['extra_css'])
# Support SOURCE_DATE_EPOCH environment variable for "reproducible" builds.
# See https://reproducible-builds.org/specs/source-date-epoch/
timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timegm(datetime.utcnow().utctimetuple())))
return {
'nav': nav,
# base_url should never end with a slash.
'base_url': nav.url_context.make_relative('/').rstrip('/'),
'extra_css': extra_css,
'extra_javascript': extra_javascript,
'mkdocs_version': mkdocs.__version__,
'build_date_utc': datetime.utcfromtimestamp(timestamp),
'config': config,
# TODO: remove the rest in 1.0 as they are deprecated
'site_name': config['site_name'],
'site_url': config['site_url'],
'site_author': config['site_author'],
'homepage_url': nav.homepage.url,
'page_description': config['site_description'],
'favicon': config['site_favicon'],
'repo_url': config['repo_url'],
'repo_name': config['repo_name'],
'include_nav': config['include_nav'],
'include_next_prev': config['include_next_prev'],
'copyright': config['copyright'],
'google_analytics': config['google_analytics']
}
def get_page_context(page, content, toc, meta, config):
"""
Generate the page context by extending the global context and adding page
specific variables.
"""
if config['site_url']:
page.set_canonical_url(config['site_url'])
if config['repo_url']:
page.set_edit_url(config['repo_url'], config['edit_uri'])
page.content = content
page.toc = toc
page.meta = meta
# TODO: remove the rest in version 1.0 as they are deprecated
if page.is_homepage or page.title is None:
page_title = None
else:
page_title = page.title
if page.is_homepage:
page_description = config['site_description']
else:
page_description = None
return {
'page': page,
# TODO: remove the rest in version 1.0 as they are deprecated
'page_title': page_title,
'page_description': page_description,
'content': content,
'toc': toc,
'meta': meta,
'canonical_url': page.canonical_url,
'current_page': page,
'previous_page': page.previous_page,
'next_page': page.next_page
}
def build_template(template_name, env, config, site_navigation=None):
log.debug("Building template: %s", template_name)
try:
template = env.get_template(template_name)
except TemplateNotFound:
return False
context = {'page': None}
if site_navigation is not None:
context.update(get_global_context(site_navigation, config))
output_content = template.render(context)
output_path = os.path.join(config['site_dir'], template_name)
utils.write_file(output_content.encode('utf-8'), output_path)
return True
def _build_page(page, config, site_navigation, env, dump_json, dirty=False):
# Get the input/output paths
input_path, output_path = get_complete_paths(config, page)
# Read the input file
try:
input_content = io.open(input_path, 'r', encoding='utf-8').read()
except IOError:
log.error('file not found: %s', input_path)
raise
# Process the markdown text
html_content, table_of_contents, meta = convert_markdown(
markdown_source=input_content,
config=config,
site_navigation=site_navigation
)
context = get_global_context(site_navigation, config)
context.update(get_page_context(
page, html_content, table_of_contents, meta, config
))
# Allow 'template:' override in md source files.
if 'template' in meta:
template = env.get_template(meta['template'][0])
else:
try:
template = env.get_template('main.html')
except jinja2.TemplateNotFound:
# TODO: Remove this in version 1.0
template = env.get_template('base.html')
log.warn(
"Your theme does not appear to contain a 'main.html' template. "
"The 'base.html' template was used instead, which is deprecated. "
"Update your theme so that the primary entry point is 'main.html'."
)
# Render the template.
output_content = template.render(context)
# Write the output file.
if dump_json:
json_context = {
'content': context['content'],
'title': context['current_page'].title,
'url': context['current_page'].abs_url,
'language': 'en',
}
json_output = json.dumps(json_context, indent=4).encode('utf-8')
utils.write_file(json_output, output_path.replace('.html', '.json'))
else:
utils.write_file(output_content.encode('utf-8'), output_path)
return html_content, table_of_contents, meta
def build_extra_templates(extra_templates, config, site_navigation=None):
log.debug("Building extra_templates page")
for extra_template in extra_templates:
input_path = os.path.join(config['docs_dir'], extra_template)
with io.open(input_path, 'r', encoding='utf-8') as template_file:
template = jinja2.Template(template_file.read())
context = {'page': None}
if site_navigation is not None:
context.update(get_global_context(site_navigation, config))
output_content = template.render(context)
output_path = os.path.join(config['site_dir'], extra_template)
utils.write_file(output_content.encode('utf-8'), output_path)
def build_pages(config, dump_json=False, dirty=False):
"""
Builds all the pages and writes them into the build directory.
"""
site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])
loader = jinja2.FileSystemLoader(config['theme_dir'] + [config['mkdocs_templates'], ])
env = jinja2.Environment(loader=loader)
# TODO: remove DeprecationContext in v1.0 when all deprecated vars have been removed
from jinja2.runtime import Context
deprecated_vars = {
'page_title': 'page.title',
'content': 'page.content',
'toc': 'page.toc',
'meta': 'page.meta',
'canonical_url': 'page.canonical_url',
'previous_page': 'page.previous_page',
'next_page': 'page.next_page',
'current_page': 'page',
'include_nav': 'nav|length>1',
'include_next_prev': '(page.next_page or page.previous_page)',
'site_name': 'config.site_name',
'site_author': 'config.site_author',
'page_description': 'config.site_description',
'repo_url': 'config.repo_url',
'repo_name': 'config.repo_name',
'site_url': 'config.site_url',
'copyright': 'config.copyright',
'google_analytics': 'config.google_analytics',
'homepage_url': 'nav.homepage.url',
'favicon': '{{ base_url }}/img/favicon.ico',
}
class DeprecationContext(Context):
def resolve(self, key):
""" Log a warning when accessing any deprecated variable name. """
if key in deprecated_vars:
log.warn(
"Template variable warning: '{0}' is being deprecated "
"and will not be available in a future version. Use "
"'{1}' instead.".format(key, deprecated_vars[key])
)
return super(DeprecationContext, self).resolve(key)
env.context_class = DeprecationContext
# TODO: end remove DeprecationContext
env.filters['tojson'] = filters.tojson
search_index = search.SearchIndex()
# Force absolute URLs in the nav of error pages and account for the
# possability that the docs root might be different than the server root.
# See https://github.com/mkdocs/mkdocs/issues/77
site_navigation.url_context.force_abs_urls = True
default_base = site_navigation.url_context.base_path
site_navigation.url_context.base_path = utils.urlparse(config['site_url']).path
build_template('404.html', env, config, site_navigation)
# Reset nav behavior to the default
site_navigation.url_context.force_abs_urls = False
site_navigation.url_context.base_path = default_base
if not build_template('search.html', env, config, site_navigation):
log.debug("Search is enabled but the theme doesn't contain a "
"search.html file. Assuming the theme implements search "
"within a modal.")
build_template('sitemap.xml', env, config, site_navigation)
build_extra_templates(config['extra_templates'], config, site_navigation)
for page in site_navigation.walk_pages():
try:
# When --dirty is used, only build the page if the markdown has been modified since the
# previous build of the output.
input_path, output_path = get_complete_paths(config, page)
if dirty and (utils.modified_time(input_path) < utils.modified_time(output_path)):
continue
log.debug("Building page %s", page.input_path)
build_result = _build_page(page, config, site_navigation, env,
dump_json)
html_content, table_of_contents, _ = build_result
search_index.add_entry_from_context(
page, html_content, table_of_contents)
except Exception:
log.error("Error building page %s", page.input_path)
raise
search_index = search_index.generate_search_index()
json_output_path = os.path.join(config['site_dir'], 'mkdocs', 'search_index.json')
utils.write_file(search_index.encode('utf-8'), json_output_path)
def build(config, live_server=False, dump_json=False, dirty=False):
"""
Perform a full site build.
"""
if not dirty:
log.info("Cleaning site directory")
utils.clean_directory(config['site_dir'])
else:
# Warn user about problems that may occur with --dirty option
log.warning("A 'dirty' build is being performed, this will likely lead to inaccurate navigation and other"
" links within your site. This option is designed for site development purposes only.")
if not live_server:
log.info("Building documentation to directory: %s", config['site_dir'])
if dirty and site_directory_contains_stale_files(config['site_dir']):
log.info("The directory contains stale files. Use --clean to remove them.")
if dump_json:
build_pages(config, dump_json=True, dirty=dirty)
return
# Reversed as we want to take the media files from the builtin theme
# and then from the custom theme_dir so that the custom versions take
# precedence.
for theme_dir in reversed(config['theme_dir']):
log.debug("Copying static assets from theme: %s", theme_dir)
utils.copy_media_files(
theme_dir, config['site_dir'], exclude=['*.py', '*.pyc', '*.html'], dirty=dirty
)
log.debug("Copying static assets from the docs dir.")
utils.copy_media_files(config['docs_dir'], config['site_dir'], dirty=dirty)
log.debug("Building markdown pages.")
build_pages(config, dirty=dirty)
def site_directory_contains_stale_files(site_directory):
"""
Check if the site directory contains stale files from a previous build.
Right now the check returns true if the directory is not empty.
A more sophisticated approach should be found to trigger only if there are
files that won't be overwritten anyway.
"""
if os.path.exists(site_directory):
if os.listdir(site_directory):
return True
return False
| 34.753807
| 114
| 0.662601
|
a15a5f0f92b584357a4aeeafa0746b28c17bcd12
| 4,215
|
py
|
Python
|
examples/ReportService/ReportService_get.py
|
becomejapan/yahooads-python-lib
|
c2b00d271037291dfd00952685e48eda0e835df5
|
[
"Apache-2.0"
] | null | null | null |
examples/ReportService/ReportService_get.py
|
becomejapan/yahooads-python-lib
|
c2b00d271037291dfd00952685e48eda0e835df5
|
[
"Apache-2.0"
] | null | null | null |
examples/ReportService/ReportService_get.py
|
becomejapan/yahooads-python-lib
|
c2b00d271037291dfd00952685e48eda0e835df5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Become Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code for
Service : ReportService
Operation: get
API Reference: https://github.com/yahoojp-marketing/sponsored-search-api-documents/blob/201901/docs/en/api_reference/services/ReportService.md
Generated by 'api_reference_example_generator.py' using code template 'examples/sample_template.py.template'
"""
import logging
import json
from yahooads import promotionalads
logging.basicConfig(level=logging.INFO)
# logging.getLogger('suds.client').setLevel(logging.DEBUG)
# logging.getLogger('suds.transport').setLevel(logging.DEBUG)
SERVICE = 'ReportService'
OPERATION = 'get'
OPERAND = {
"accountId": "SAMPLE-ACCOUNT-ID",
"reportIds": [
"1111",
"1112"
],
"reportTypes": [
"ACCOUNT",
"CAMPAIGN",
"ADGROUP",
"AD",
"KEYWORDS",
"SEARCH_QUERY",
"GEO",
"FEED_ITEM",
"GEO_TARGET",
"SCHEDULE_TARGET",
"BID_STRATEGY",
"AD_CUSTOMIZERS",
"TARGET_LIST",
"LANDING_PAGE_URL"
],
"reportJobStatuses": [
"WAIT",
"COMPLETED",
"IN_PROGRESS",
"FAILED"
],
"paging": {
"startIndex": "1",
"numberResults": "10"
}
}
"""
SAMPLE RESPONSE = {
"rval": {
"totalNumEntries": "4",
"Page.Type": "ReportPage",
"values": [
{
"operationSucceeded": "true",
"reportRecord": {
"accountId": "SAMPLE-ACCOUNT-ID",
"reportId": "1111",
"reportName": "Sample LANDING_PAGE_URL Report",
"reportJobStatus": "COMPLETED",
"requestTime": "2017/11/27 14:32:48",
"completeTime": "2017/11/27 14:33:20",
"reportDownloadURL": "https://ss.yahooapis.jp/report/V201901/download/3CRAGObSahcIylBoDZS5ftx7qS4VM5jSHqs77QZqmpBFnJFP2jvKe3Dy72UEX3InsUoShWXa3YcX3AmbtqxGco6B"
}
},
{
"operationSucceeded": "true",
"reportRecord": {
"accountId": "SAMPLE-ACCOUNT-ID",
"reportId": "1112",
"reportName": "Sample LANDING_PAGE_URL Report2",
"reportJobStatus": "FAILED",
"reportJobErrorDetail": "INTERNAL_ERROR",
"requestTime": "2017/11/27 14:32:48"
}
},
{
"operationSucceeded": "true",
"reportRecord": {
"accountId": "SAMPLE-ACCOUNT-ID",
"reportId": "1113",
"reportName": "Sample LANDING_PAGE_URL Report3",
"reportJobStatus": "IN_PROGRESS",
"requestTime": "2017/11/27 14:32:48"
}
},
{
"operationSucceeded": "true",
"reportRecord": {
"accountId": "SAMPLE-ACCOUNT-ID",
"reportId": "1114",
"reportName": "Sample LANDING_PAGE_URL Report3",
"reportJobStatus": "WAIT",
"requestTime": "2017/11/27 14:32:48"
}
}
]
}
}
"""
def main():
client = promotionalads.PromotionalAdsClient.LoadFromConfiguration()
service = client.GetService(SERVICE)
print("REQUEST : {}.{}\n{}".format(SERVICE, OPERATION, json.dumps(OPERAND, indent=2)))
try:
if OPERATION == "get":
response = service.get(OPERAND)
elif OPERATION.startswith("get"):
get_method = getattr(service, OPERATION)
response = get_method(OPERAND)
elif OPERATION.startswith("mutate"):
response = service.mutate(OPERAND)
else:
raise("Unknown Operation '{}'".format(OPERATION))
print("RESPONSE :\n{}".format(response))
except Exception as e:
print("Exception at '{}' operations \n{}".format(SERVICE, e))
raise e
if __name__ == '__main__':
main()
| 28.869863
| 169
| 0.613523
|
745ad1baf30549a1305bcd6031346b5c3655dbe5
| 143
|
py
|
Python
|
entrypoint2/examples/hello.py
|
ponty/entrypoint2
|
0db6aa14b38124639fb7d324145352f37caaaac8
|
[
"BSD-2-Clause"
] | 5
|
2015-03-04T02:52:52.000Z
|
2021-03-08T08:22:06.000Z
|
entrypoint2/examples/hello.py
|
ponty/entrypoint2
|
0db6aa14b38124639fb7d324145352f37caaaac8
|
[
"BSD-2-Clause"
] | 6
|
2019-12-10T09:10:32.000Z
|
2021-04-03T11:51:06.000Z
|
entrypoint2/examples/hello.py
|
ponty/entrypoint2
|
0db6aa14b38124639fb7d324145352f37caaaac8
|
[
"BSD-2-Clause"
] | 3
|
2020-02-20T22:32:52.000Z
|
2020-05-18T09:00:31.000Z
|
from entrypoint2 import entrypoint
@entrypoint
def hello(message):
# type of 'message' is not defined, default is str
print(message)
| 17.875
| 54
| 0.734266
|
8b7096483a3ea7383ab14f47e0177c4ccc116584
| 3,673
|
py
|
Python
|
mikaponics/alert/management/commands/send_production_alert_email.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 2
|
2019-04-30T23:51:41.000Z
|
2019-05-04T00:35:52.000Z
|
mikaponics/alert/management/commands/send_production_alert_email.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 27
|
2019-04-30T20:22:28.000Z
|
2022-02-10T08:10:32.000Z
|
mikaponics/alert/management/commands/send_production_alert_email.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from django.core.mail import EmailMultiAlternatives # EMAILER
from django.conf import settings
from django.contrib.auth.models import Group
from django.db.models import Q
from django.template.loader import render_to_string # HTML to TXT
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from foundation.constants import *
from foundation.models import AlertItem
from foundation.utils import reverse_with_full_domain
class Command(BaseCommand):
help = _('Command will send alert email to user about their production.')
def add_arguments(self, parser):
"""
Run manually in console:
python manage.py send_production_alert_email 1
"""
parser.add_argument('id', nargs='+', type=int)
def handle(self, *args, **options):
"""
Either check the device for the inputted `id` value or check all devices.
"""
utc_today = timezone.now()
# For debugging purposes only.
self.stdout.write(
self.style.SUCCESS(_('%(dt)s | SPAE | Started running.') % {
'dt': str(timezone.now())
})
)
try:
for id in options['id']:
alert = AlertItem.objects.get(id=id)
self.begin_processing(alert)
except AlertItem.DoesNotExist:
# For debugging purposes only.
raise CommandError(_('%(dt)s | SPAE | Alert does not exist with the id.') % {
'dt': str(timezone.now())
})
# For debugging purposes only.
self.stdout.write(
self.style.SUCCESS(_('%(dt)s | SPAE | Finished running.') % {
'dt': str(timezone.now())
})
)
def begin_processing(self, alert):
me = alert.production.user
production = alert.production
# Generate the links.
url = settings.MIKAPONICS_FRONTEND_HTTP_PROTOCOL+settings.MIKAPONICS_FRONTEND_HTTP_DOMAIN+alert.production.get_absolute_url()
web_view_url = reverse_with_full_domain(
reverse_url_id='mikaponics_production_alert_items_email',
resolve_url_args=[alert.id]
)
subject = "Mikaponics: Alert Notification"
param = {
'alert': alert,
'url': url,
'web_view_url': web_view_url,
'me': me
}
# For debugging purposes only.
print("---------------------------------------------------------------")
print("URL", url)
print("WEB URL", web_view_url)
print("---------------------------------------------------------------")
# DEVELOPERS NOTE:
# https://templates.mailchimp.com/resources/inline-css/
# Plug-in the data into our templates and render the data.
text_content = render_to_string('alert/email/production_alert_email_view.txt', param)
html_content = render_to_string('alert/email/production_alert_email_view.html', param)
# Generate our address.
from_email = settings.DEFAULT_FROM_EMAIL
to = [me.email]
# Send the email.
msg = EmailMultiAlternatives(subject, text_content, from_email, to)
msg.attach_alternative(html_content, "text/html")
msg.send()
# For debugging purposes only.
self.stdout.write(
self.style.SUCCESS(_('%(dt)s | SPAE | Sent alert email to %(email)s.') % {
'dt': str(timezone.now()),
'email': me.email
})
)
| 34.980952
| 133
| 0.590798
|
48962588c756a231c5f7bec2dc4d68476aa17707
| 8,868
|
py
|
Python
|
grr/lib/rdfvalues/paths_test.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | 2
|
2019-06-02T13:11:16.000Z
|
2019-06-25T13:30:46.000Z
|
grr/lib/rdfvalues/paths_test.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
grr/lib/rdfvalues/paths_test.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
"""These are tests for the PathSpec implementation."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import test_base
from grr.proto import jobs_pb2
class PathSpecTest(test_base.RDFProtoTestCase):
"""Test the PathSpec implementation."""
rdfvalue_class = rdf_paths.PathSpec
def CheckRDFValue(self, rdfproto, sample):
"""Check that the rdfproto is the same as the sample."""
super(PathSpecTest, self).CheckRDFValue(rdfproto, sample)
self.assertEqual(rdfproto.path, sample.path)
self.assertEqual(rdfproto.pathtype, sample.pathtype)
def GenerateSample(self, number=0):
"""Make a sample PathSpec instance."""
return rdf_paths.PathSpec(path="/%s/" % number, pathtype=number)
def testPop(self):
"""Test we can pop arbitrary elements from the pathspec."""
sample = rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS)
for i in range(5):
sample.Append(path=str(i), pathtype=rdf_paths.PathSpec.PathType.OS)
self.assertEqual([x.path for x in sample], list("/01234"))
# Check we pop the right element.
popped = sample.Pop(2)
self.assertIsInstance(popped, rdf_paths.PathSpec)
self.assertEqual(popped.path, "1")
self.assertEqual([x.path for x in sample], list("/0234"))
# The first element needs special treatment.
self.assertEqual(sample.Pop(0).path, "/")
self.assertEqual([x.path for x in sample], list("0234"))
def testPathSpec(self):
"""Test that PathSpec works."""
# Make a template pathspec using a protobuf the hard way.
pathspec_pb = jobs_pb2.PathSpec(path="/", pathtype=1)
pathspec_pb.nested_path.path = "foo"
pathspec_pb.nested_path.pathtype = 2
reference_pathspec = rdf_paths.PathSpec.FromSerializedString(
pathspec_pb.SerializeToString())
# Create a new RDFPathspec from scratch.
pathspec = rdf_paths.PathSpec()
pathspec.path = "/"
pathspec.pathtype = 1
pathspec.Append(path="foo", pathtype=2)
self.assertRDFValuesEqual(pathspec, reference_pathspec)
# Create a new RDFPathspec from keywords.
pathspec = rdf_paths.PathSpec(path="/", pathtype=1)
pathspec.Append(path="foo", pathtype=2)
self.assertRDFValuesEqual(pathspec, reference_pathspec)
# Check that copies are ok
pathspec = pathspec.Copy()
self.assertRDFValuesEqual(pathspec, reference_pathspec)
# Accessors:
self.assertEqual(pathspec.path, "/")
self.assertEqual(pathspec.last.path, "foo")
pathspec.first.path = "test"
self.assertEqual(pathspec.last.path, "foo")
# Test Pathspec iterator.
self.assertEqual([x.path for x in pathspec], ["test", "foo"])
# Length.
self.assertEqual(len(pathspec), 2)
pathspec = rdf_paths.PathSpec(path="/foo", pathtype=1)
pathspec.Append(path="/", pathtype=0)
self.assertEqual(pathspec.Dirname().CollapsePath(), "/")
pathspec.Append(path="sdasda", pathtype=0)
self.assertEqual(pathspec.Dirname().CollapsePath(), "/foo")
pathspec = rdf_paths.PathSpec(path="/foo", pathtype=1)
pathspec_base = rdf_paths.PathSpec()
pathspec_base.Append(pathspec)
self.assertEqual(pathspec_base.CollapsePath(), "/foo")
pathspec_base = rdf_paths.PathSpec()
pathspec_base.Insert(0, path="/foo", pathtype=1)
self.assertEqual(pathspec_base.CollapsePath(), "/foo")
def testUnicodePaths(self):
"""Test that we can manipulate paths in unicode."""
sample = rdf_paths.PathSpec(pathtype=1, path=u"/dev/c/msn升级程序[1].exe")
# Ensure we can convert to a string.
str(sample)
unicode(sample)
def testCopy(self):
sample = rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS)
sample.Append(path="foo", pathtype=rdf_paths.PathSpec.PathType.TSK)
# Make a copy of the original and change it.
sample_copy = sample.Copy()
sample_copy.last.path = "bar"
# This should not change the original.
self.assertEqual(sample.last.path, "foo")
class GlobExpressionTest(test_base.RDFValueTestCase):
rdfvalue_class = rdf_paths.GlobExpression
USER_ACCOUNT = dict(
username=u"user",
full_name=u"John Smith",
comment=u"This is a user",
last_logon=10000,
domain=u"Some domain name",
homedir=u"/home/user",
sid=u"some sid")
def GenerateSample(self, number=0):
return self.rdfvalue_class("/home/%%User.username%%/*" + str(number))
def testGroupingInterpolation(self):
glob_expression = rdf_paths.GlobExpression()
interpolated = glob_expression.InterpolateGrouping("/home/*.{sh,deb}")
self.assertItemsEqual(interpolated, [u"/home/*.deb", u"/home/*.sh"])
interpolated = glob_expression.InterpolateGrouping("/home/*.{sh, deb}")
self.assertItemsEqual(interpolated, [u"/home/*. deb", u"/home/*.sh"])
interpolated = glob_expression.InterpolateGrouping(
"HKEY_CLASSES_ROOT/CLSID/{16d12736-7a9e-4765-bec6-f301d679caaa}")
self.assertItemsEqual(
interpolated,
[u"HKEY_CLASSES_ROOT/CLSID/{16d12736-7a9e-4765-bec6-f301d679caaa}"])
def testValidation(self):
glob_expression = rdf_paths.GlobExpression(
"/home/%%Users.username%%/**/.mozilla/")
glob_expression.Validate()
glob_expression = rdf_paths.GlobExpression("/home/**/**")
self.assertRaises(ValueError, glob_expression.Validate)
def testRegExIsCorrectForGlobWithoutStars(self):
glob_expression = rdf_paths.GlobExpression("/foo/bar/blah.txt")
regex = glob_expression.AsRegEx()
self.assertTrue(regex.Match("/foo/bar/blah.txt"))
self.assertFalse(regex.Match("/foo/bar/blah2.txt"))
self.assertFalse(regex.Match("/some/foo/bar/blah2.txt"))
self.assertFalse(regex.Match("/some/foo/bar/blah2.txt/other"))
def testRegExIsCorrectForGlobWithQuestion(self):
glob_expression = rdf_paths.GlobExpression("/foo/bar/???.txt")
regex = glob_expression.AsRegEx()
self.assertTrue(regex.Match("/foo/bar/bla.txt"))
self.assertFalse(regex.Match("/foo/bar/blah.txt"))
def testRegExIsCorrectForGlobWithGrouping(self):
glob_expression = rdf_paths.GlobExpression("/foo/{bar,other}/*{.txt,.exe}")
regex = glob_expression.AsRegEx()
self.assertTrue(regex.Match("/foo/bar/blah.txt"))
self.assertTrue(regex.Match("/foo/other/blah2.txt"))
self.assertTrue(regex.Match("/foo/bar/blah.exe"))
self.assertTrue(regex.Match("/foo/other/blah2.exe"))
self.assertFalse(regex.Match("/foo/other2/blah.txt"))
self.assertFalse(regex.Match("/foo/bar/blah.com"))
def testRegExIsCorrectForGlobWithSingleStar(self):
glob_expression = rdf_paths.GlobExpression("/foo/bar/*.txt")
regex = glob_expression.AsRegEx()
self.assertTrue(regex.Match("/foo/bar/blah.txt"))
self.assertFalse(regex.Match("/foo/bar/blah.plist"))
self.assertFalse(regex.Match("/foo/bar/blah/blah.txt"))
self.assertFalse(regex.Match("/foo/blah1/blah2/bar/blah.txt"))
def testRegExIsCorrectForGlobWithTwoStars(self):
glob_expression = rdf_paths.GlobExpression("/foo/**/bar.txt")
regex = glob_expression.AsRegEx()
self.assertTrue(regex.Match("/foo/bar.txt"))
self.assertTrue(regex.Match("/foo/blah/bar.txt"))
self.assertTrue(regex.Match("/foo/blah1/blah2/bar.txt"))
self.assertFalse(regex.Match("/foo/bar.plist"))
self.assertFalse(regex.Match("/foo/blah/bar.txt/res"))
self.assertFalse(regex.Match("/foo/blah1/blah2/bar.txt2"))
def testRegExIsCorrectForComplexGlob(self):
glob_expression = rdf_paths.GlobExpression("/foo/**/bar?/*{.txt,.exe}")
regex = glob_expression.AsRegEx()
self.assertTrue(regex.Match("/foo/bar1/blah.txt"))
self.assertTrue(regex.Match("/foo/bar2/blah.exe"))
self.assertTrue(regex.Match("/foo/c1/c2/c3/bar1/blah.txt"))
self.assertTrue(regex.Match("/foo/c1/c2/c3/bar2/blah.exe"))
self.assertFalse(regex.Match("/foo/bar/blah.txt"))
self.assertFalse(regex.Match("/foo/bar2/blah.com"))
self.assertFalse(regex.Match("/foo/c1/c2/c3/bar1/blah.txt/res.txt"))
self.assertFalse(regex.Match("/foo/c1/c2/c3/bar2/blah.exe/res.exe"))
def testRegExIsCaseInsensitive(self):
glob_expression = rdf_paths.GlobExpression("/foo/**/bar?/*{.txt,.exe}")
regex = glob_expression.AsRegEx()
self.assertTrue(regex.Match("/foo/bar1/blah.txt"))
self.assertTrue(regex.Match("/foO/bAr1/blah.txt"))
self.assertTrue(regex.Match("/foo/bar1/blah.TXT"))
self.assertFalse(regex.Match("/foo/bar2/blah.com"))
self.assertFalse(regex.Match("/foO/bAr2/blah.com"))
self.assertFalse(regex.Match("/foo/bar2/blah.COM"))
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 35.190476
| 79
| 0.704105
|
4581a612ed60d991f1570a04ef9d19334970c8cf
| 8,937
|
py
|
Python
|
tensorflow/python/data/experimental/ops/optimization.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/data/experimental/ops/optimization.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/data/experimental/ops/optimization.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for optimizing `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
def model():
"""A transformation that models performance.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset_ops._ModelDataset(dataset) # pylint: disable=protected-access
return _apply_fn
def optimize(optimizations=None):
"""A transformation that applies optimizations.
Args:
optimizations: (Optional.) A `tf.string` vector `tf.Tensor` identifying
optimizations to use. If not specified, the default set of optimizations
is applied.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
return dataset_ops._OptimizeDataset(dataset, optimizations) # pylint: disable=protected-access
return _apply_fn
class _ChooseFastestDataset(dataset_ops.DatasetV2):
"""A `Dataset` that merges two input datasets."""
def __init__(self, datasets, num_experiments=10):
"""Chooses the fastest of some input datasets.
Given input datasets, produces elements as quickly as the fastest of the
inputs. Note that this dataset assumes that input datasets have the same
elements in the same order, though this is not enforced besides checking
that the input datasets have compatible output types, output shapes, and
cardinality at runtime. The resulting dataset produces elements that are
identical to the input elements, and in the same order.
Note that the time to first iteration is longer when this dataset is used
due to the overhead of dynamically picking the faster dataset. Namely,
for the first num_experiments iterations, this dataset will pull from all
of its inputs simultaneously in order to determine which input is the
fastest. For all subsequent iterations, that input will be used.
Args:
datasets: A list of `Datasets` that all have the same elements in the same
order.
num_experiments: The number of experiments to run before deciding which
dataset is fastest. In each "experiment" iteration, the dataset will
call from all its inputs simultaneously, and update its knowledge of
which input is the fastest.
Returns:
A `Dataset` that has the same elements the inputs.
"""
self._datasets = list(datasets)
self._element_spec = self._datasets[0].element_spec
variant_tensor = (
gen_experimental_dataset_ops.choose_fastest_dataset(
[dataset._variant_tensor for dataset in self._datasets], # pylint: disable=protected-access
num_experiments=num_experiments,
**self._flat_structure))
super(_ChooseFastestDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._datasets
@property
def element_spec(self):
return self._element_spec
class _ChooseFastestBranchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that merges two input datasets."""
def __init__(self,
input_dataset,
functions,
ratio_numerator=1,
ratio_denominator=1,
num_elements_per_branch=None):
"""Chooses the fastest of some dataset functions.
Given dataset functions that take input_dataset as input and output
another dataset, produces elements as quickly as the fastest of these
output datasets. Note that datasets in the dataset functions are assumed
to be stateless, and the iterators created by the functions' output datasets
will, given the same input elements, all produce the same output elements.
Datasets in the functions are also expected to iterate over the input
dataset at most once. The violation of these conditions may lead to
undefined behavior.
For example:
```python
dataset = tf.data.Dataset.range(100)
dataset = _ChooseFastestDataset(
dataset,
[
lambda ds: ds.map(lambda x: tf.reshape(x, [1])).batch(10),
lambda ds: ds.batch(10).map(lambda x: tf.reshape(x, [10, 1]))
],
ratio=10,
num_elements_per_branch=10
)
```
The resulting dataset will produce elements equivalent to
`tf.data.Dataset.range(100).map(lambda x: tf.reshape(x, [1])).batch(10)`, or
`tf.data.Dataset.range(100).batch(10).map(lambda x: tf.reshape(x, [10, 1]))`
Note that the first `num_elements_per_branch` iterations may be slower due
to the
overhead of dynamically picking the fastest dataset. Namely, for these
iterations, the dataset will produce elements from any of branches to
determine which input is the fastest. For all subsequent iterations, that
input will be used.
Args:
input_dataset: A `Dataset` that can be used as input to `functions`.
functions: A list of callables, each of which takes a `Dataset` as input
and returns a `Dataset`.
ratio_numerator: The numerator in the ratio of input elements consumed to
output elements produced for each function. This should be the same for
all functions. For example, if the function is
`lambda ds: ds.batch(10)`, the ratio is 10:1, i.e. the input dataset
must produce 10 elements for every element of the output dataset. In
this case, ratio_numerator should be 10.
ratio_denominator: The denominator in the ratio of input elements consumed
to output elements produced for each function. This should be the same
for all functions. For example, if the function is
`lambda ds: ds.batch(10)`, the ratio is 10:1, i.e. the input dataset
must produce 10 elements for every element of the output dataset. In
this case, ratio_denominator should be 1.
num_elements_per_branch: The number of elements to get from each branch
before deciding which dataset is fastest. In the first len(functions) *
num_elements_per_branch iterations, the dataset will call from one of
the branches, and update its knowledge of which input is the fastest.
Note that (num_elements_per_branch * ratio) is expected to be an
integer.
Returns:
A `Dataset` that has the same elements the inputs.
"""
input_structure = dataset_ops.DatasetSpec(input_dataset.element_spec)
self._funcs = [
dataset_ops.StructuredFunctionWrapper(
f, "ChooseFastestV2", input_structure=input_structure)
for f in functions
]
self._element_spec = self._funcs[0].output_structure._element_spec # pylint: disable=protected-access
self._captured_arguments = []
for f in self._funcs:
self._captured_arguments.extend(f.function.captured_inputs)
self._capture_lengths = [
len(f.function.captured_inputs) for f in self._funcs
]
if ratio_numerator <= 0 or ratio_denominator <= 0:
raise ValueError("ratio must be positive.")
if num_elements_per_branch is None:
# Pick a sensible default based on `ratio_denominator`
num_elements_per_branch = 10 * ratio_denominator
variant_tensor = (
gen_experimental_dataset_ops.choose_fastest_branch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
ratio_numerator=ratio_numerator,
ratio_denominator=ratio_denominator,
other_arguments=self._captured_arguments,
num_elements_per_branch=num_elements_per_branch,
branches=[f.function for f in self._funcs],
other_arguments_lengths=self._capture_lengths,
**self._flat_structure))
super(_ChooseFastestBranchDataset, self).__init__(input_dataset,
variant_tensor)
@property
def element_spec(self):
return self._element_spec
| 41.761682
| 106
| 0.706949
|
eda4b6e0658aa80c470bbd1ec741e1a8ee0e2bc1
| 689
|
py
|
Python
|
apps/slackbot/log.py
|
dlzou/csua-backend
|
a2a6642017b81c2fe2bcc497ecc772e9b7dfe210
|
[
"MIT"
] | null | null | null |
apps/slackbot/log.py
|
dlzou/csua-backend
|
a2a6642017b81c2fe2bcc497ecc772e9b7dfe210
|
[
"MIT"
] | null | null | null |
apps/slackbot/log.py
|
dlzou/csua-backend
|
a2a6642017b81c2fe2bcc497ecc772e9b7dfe210
|
[
"MIT"
] | null | null | null |
from logging import Formatter, StreamHandler
from .client import SLACK_BOT_USER_TOKEN, app
CSUA_PHILBOT_TESTING_CHANNEL_ID = "CCU09PNGL"
CSUA_WEBSITE_UPDATES_CHANNEL_ID = "CG49A3UF8"
class SlackMessageHandler(StreamHandler):
def __init__(self):
super().__init__()
def emit(self, record):
text = self.format(record)
app.client.chat_postMessage(channel=CSUA_WEBSITE_UPDATES_CHANNEL_ID, text=text)
def formatter():
return Formatter("*{levelname}* {name}.{funcName}:{lineno} {message}", style="{")
def enabled():
"""Used by apps.csua_backend.settings.LOGGING"""
def f(record):
return SLACK_BOT_USER_TOKEN is not None
return f
| 23.758621
| 87
| 0.719884
|
2e397a069bdf7f5e0fdf30c64532ca6f56bf823c
| 942
|
py
|
Python
|
setup.py
|
john-pierce/kms-client
|
97e93c6d5a5dff345305274fd7cf8f3c4a4d9bf6
|
[
"MIT"
] | null | null | null |
setup.py
|
john-pierce/kms-client
|
97e93c6d5a5dff345305274fd7cf8f3c4a4d9bf6
|
[
"MIT"
] | null | null | null |
setup.py
|
john-pierce/kms-client
|
97e93c6d5a5dff345305274fd7cf8f3c4a4d9bf6
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from os import *
here = path.abspath(path.dirname(__file__))
setup(
name='kms-client',
version='0.0.2',
license='MIT',
description='Encrypt/decrypt data using keys stored in Amazon KMS',
url='https://github.com/john-pierce/kms-client',
author='John Pierce',
author_email='john@killterm.com',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Utilities',
],
packages=['kms_client'],
install_requires=[
'boto>=2.36.0',
'pycrypto'
],
entry_points = {
'console_scripts': [
'kms-client = kms_client:main',
],
}
)
# vi: set ts=2 sw=2 et ai:
| 23.55
| 69
| 0.642251
|
82902de12ec01459c85e3968d3a0f846b21e5f25
| 4,386
|
py
|
Python
|
potnanny_api/apps/setting/api.py
|
jeffleary00/greenery
|
cb5b5d037b6fd297463633d2d3315c722851161f
|
[
"MIT"
] | null | null | null |
potnanny_api/apps/setting/api.py
|
jeffleary00/greenery
|
cb5b5d037b6fd297463633d2d3315c722851161f
|
[
"MIT"
] | null | null | null |
potnanny_api/apps/setting/api.py
|
jeffleary00/greenery
|
cb5b5d037b6fd297463633d2d3315c722851161f
|
[
"MIT"
] | 1
|
2018-02-25T17:29:37.000Z
|
2018-02-25T17:29:37.000Z
|
import json
from flask import Blueprint, request, current_app
from flask_restful import Api, Resource
from flask_jwt_extended import jwt_required
from potnanny_core.schemas.keychain import KeychainSchema
from potnanny_core.models.setting import (PollingInterval, TemperatureDisplay,
VesyncAccount, PrimitiveWirelessSetting, TimeDisplay)
from potnanny_core.schemas.setting import (PollingIntervalSchema,
TemperatureDisplaySchema, PrimitiveWirelessSettingSchema,
VesyncAccountSchema, TimeDisplaySchema)
bp = Blueprint('settings_api', __name__, url_prefix='/api/1.0/settings')
api = Api(bp)
class SettingListApi(Resource):
# @jwt_required
def get(self):
data = []
possibles = ['polling_interval', 'temperature_display',
'primitive_wireless', 'vesync_account', 'time_display']
keys = Keychain.query.all()
if len(keys) < 1:
return {"msg": "no data"}, 404
for obj in keys:
if obj.name in possibles:
data.append(obj)
serialized, errors = KeychainSchema(many=True).dump(data)
if errors:
return errors, 400
else:
return serialized, 200
class SettingApi(Resource):
# @jwt_required
def get(self, name):
obj = None
serialized = None
errors = None
if name == 'polling_interval':
obj = PollingInterval.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = PollingIntervalSchema().load(json.loads(obj.data))
elif name == 'temperature_display':
obj = TemperatureDisplay.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = TemperatureDisplaySchema().load(json.loads(obj.data))
elif name == 'time_display':
obj = TimeDisplay.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = TimeDisplaySchema().load(json.loads(obj.data))
elif name == 'primitive_wireless':
obj = PrimitiveWirelessSetting.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = PrimitiveWirelessSettingSchema().load(json.loads(obj.data))
elif name == 'vesync_account':
obj = VesyncAccount.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = VesyncAccountSchema().load(json.loads(obj.data))
else:
return {"msg": "Unexpected setting type"}, 404
if errors:
return errors, 400
return serialized, 200
# @jwt_required
def put(self, name):
data = None
errors = None
if name == 'polling_interval':
data, errors = PollingIntervalSchema().load(request.get_json())
if errors:
return errors, 400
PollingInterval.set()
obj = PollingInterval.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = PollingIntervalSchema().dump(json.loads(obj.data))
elif name == 'temperature_display':
obj = TemperatureDisplay.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = TemperatureDisplaySchema().dump(json.loads(obj.data))
elif name == 'time_display':
obj = TimeDisplay.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = TimeDisplaySchema().dump(json.loads(obj.data))
elif name == 'primitive_wireless':
obj = PrimitiveWirelessSetting.get()
if not obj:
return {"msg": "object not found"}, 404
serialized, errors = PrimitiveWirelessSettingSchema().dump(json.loads(obj.data))
else:
return {"msg": "Unexpected setting type"}, 404
# @jwt_required
def delete(self, name):
obj = Keychain.query.filter_by(name=name).first()
if obj:
db_session.delete(obj)
db_session.commit()
return "", 204
api.add_resource(SettingListApi, '/settings')
api.add_resource(SettingApi, '/settings/<name>')
| 30.041096
| 92
| 0.592795
|
24a2b118b4f3465c44183bfc1fc9b687f0f44ff0
| 450
|
py
|
Python
|
plotly/validators/bar/_customdata.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/bar/_customdata.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/bar/_customdata.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='customdata', parent_name='bar', **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'data'),
**kwargs
)
| 32.142857
| 78
| 0.657778
|
d96cdde322482111aad2b543ec0ee91edbd29734
| 1,484
|
py
|
Python
|
tests/daemon/unit/test_files.py
|
mayank-kr/jina
|
4667e9cdf682b4c53003bd24fa0bca1af6612907
|
[
"Apache-2.0"
] | 1
|
2021-06-29T15:53:39.000Z
|
2021-06-29T15:53:39.000Z
|
tests/daemon/unit/test_files.py
|
nandiniinj/jina
|
1abf8740878de3db6f0531875114aaf36e8e12fe
|
[
"Apache-2.0"
] | null | null | null |
tests/daemon/unit/test_files.py
|
nandiniinj/jina
|
1abf8740878de3db6f0531875114aaf36e8e12fe
|
[
"Apache-2.0"
] | null | null | null |
import os
import tempfile
from fastapi import UploadFile
from daemon import daemon_logger
from daemon.models import DaemonID
from daemon.files import DaemonFile
from daemon.files import workspace_files
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
cur_filename = os.path.basename(__file__)
@pytest.mark.parametrize(
'workdir, expected_response',
[
('good_ws', ('devel', '3.7', 'echo Hello', [12345, 12344])),
('good_ws_no_file', ('default', '3.8', '', [])),
('good_ws_emptyfile', ('default', '3.8', '', [])),
('good_ws_multiple_files', ('devel', '3.7', 'echo Hello', [12345, 123456])),
('good_ws_wrong_values', ('default', '3.8', '', [])),
],
)
def test_jinad_file_workspace(workdir, expected_response):
d = DaemonFile(workdir=f'{cur_dir}/models/{workdir}')
assert d.build == expected_response[0]
assert d.python == expected_response[1]
assert d.run == expected_response[2]
assert d.ports == expected_response[3]
def _test_workspace_files():
with tempfile.NamedTemporaryFile() as fp1, tempfile.NamedTemporaryFile() as fp2:
fp1.write(b'Hello world1!')
fp2.write(b'Hello world2!')
fp1.flush()
fp2.flush()
fp1.seek(0, 0)
fp2.seek(0, 0)
id = DaemonID('jworkspace')
print(fp1.read())
files = [UploadFile(filename='a.txt'), UploadFile(filename='b.txt', file=fp2)]
workspace_files(id, files, daemon_logger)
| 32.26087
| 86
| 0.65027
|
1c2e7fe6679d6cec764138b6a3d8661cf7766095
| 2,645
|
py
|
Python
|
UVa Online Judge/v3/397.py
|
mjenrungrot/algorithm
|
e0e8174eb133ba20931c2c7f5c67732e4cb2b703
|
[
"MIT"
] | 1
|
2021-12-08T08:58:43.000Z
|
2021-12-08T08:58:43.000Z
|
UVa Online Judge/v3/397.py
|
mjenrungrot/algorithm
|
e0e8174eb133ba20931c2c7f5c67732e4cb2b703
|
[
"MIT"
] | null | null | null |
UVa Online Judge/v3/397.py
|
mjenrungrot/algorithm
|
e0e8174eb133ba20931c2c7f5c67732e4cb2b703
|
[
"MIT"
] | null | null | null |
# =============================================================================
# Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/
# FileName: 397.py
# Description: UVa Online Judge - 397
# =============================================================================
def parse_number(x, offset):
output = ""
curr = offset
while curr < len(x):
if x[curr] == " ":
curr += 1
continue
if len(output) == 0 and x[curr] in "-+":
output += x[curr]
curr += 1
elif x[curr].isdigit():
output += x[curr]
curr += 1
else:
break
return str(int(output)), curr
def parse_operand(x, offset):
output = ""
curr = offset
while curr < len(x):
if x[curr] == " ":
curr += 1
continue
output += x[curr]
curr += 1
break
return output, curr
newline = False
while True:
try:
line = input()
except EOFError:
break
if newline:
print("")
newline = True
eqs, var = line.split("=")
# Parsing
parsed_eqs = []
curr = 0
x, curr = parse_number(eqs, curr)
parsed_eqs.append(x)
while curr < len(eqs):
if eqs[curr] == " ":
curr += 1
continue
x, curr = parse_operand(eqs, curr)
parsed_eqs.append(x)
x, curr = parse_number(eqs, curr)
parsed_eqs.append(x)
# Run
while True:
print("{} = {}".format(" ".join(parsed_eqs), var))
if len(parsed_eqs) == 1:
break
# check * /
passed = False
for i, token in enumerate(parsed_eqs):
if token in "*/":
lhs = int(parsed_eqs[i - 1])
rhs = int(parsed_eqs[i + 1])
if token == "*":
val = str(lhs * rhs)
else:
val = str(lhs // rhs)
del parsed_eqs[i - 1 : i + 2]
parsed_eqs.insert(i - 1, val)
passed = True
break
if passed:
continue
# check + -
for i, token in enumerate(parsed_eqs):
if token in "+-":
lhs = int(parsed_eqs[i - 1])
rhs = int(parsed_eqs[i + 1])
if token == "+":
val = str(lhs + rhs)
else:
val = str(lhs - rhs)
del parsed_eqs[i - 1 : i + 2]
parsed_eqs.insert(i - 1, val)
passed = True
break
| 23.201754
| 79
| 0.404537
|
5f3895562e2285b6e3e365dd2ae1146f7a7ecefe
| 303
|
py
|
Python
|
ui_complements/Constants.py
|
abaron10/Pathfinding_visualizer
|
9de0695bbf05d5e9e0e492c2c50a41b2cc33c9a9
|
[
"Unlicense",
"MIT"
] | 1
|
2022-02-04T12:53:00.000Z
|
2022-02-04T12:53:00.000Z
|
ui_complements/Constants.py
|
abaron10/Pathfinding_visualizer
|
9de0695bbf05d5e9e0e492c2c50a41b2cc33c9a9
|
[
"Unlicense",
"MIT"
] | null | null | null |
ui_complements/Constants.py
|
abaron10/Pathfinding_visualizer
|
9de0695bbf05d5e9e0e492c2c50a41b2cc33c9a9
|
[
"Unlicense",
"MIT"
] | null | null | null |
from enum import Enum
class Colors(Enum):
NAVY = (0, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 255, 0)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
BLACK = (12,54,71,255)
RED = (255, 0, 0)
ORANGE = (255, 165 ,0)
GREY = (175, 216, 248,255)
FUCSIA = (255, 0, 255)
| 23.307692
| 30
| 0.508251
|
d2fec1e15d95df3b96a4bdcfcf3ac320674f02a8
| 49,805
|
py
|
Python
|
Tests/Marketplace/upload_packs.py
|
FFrozTT/content
|
af728a40f9b51130d75768942d7eadeb6a85f257
|
[
"MIT"
] | null | null | null |
Tests/Marketplace/upload_packs.py
|
FFrozTT/content
|
af728a40f9b51130d75768942d7eadeb6a85f257
|
[
"MIT"
] | null | null | null |
Tests/Marketplace/upload_packs.py
|
FFrozTT/content
|
af728a40f9b51130d75768942d7eadeb6a85f257
|
[
"MIT"
] | 2
|
2020-12-10T12:02:45.000Z
|
2020-12-15T09:20:01.000Z
|
import json
import os
import sys
import argparse
import shutil
import uuid
import prettytable
import glob
import git
import requests
import logging
from datetime import datetime
from zipfile import ZipFile
from typing import Any, Tuple, Union
from Tests.Marketplace.marketplace_services import init_storage_client, init_bigquery_client, Pack, PackStatus, \
GCPConfig, PACKS_FULL_PATH, IGNORED_FILES, PACKS_FOLDER, IGNORED_PATHS, Metadata, CONTENT_ROOT_PATH, \
get_packs_statistics_dataframe, PACKS_RESULTS_FILE
from demisto_sdk.commands.common.tools import run_command, str2bool
from Tests.scripts.utils.log_util import install_logging
def get_packs_names(target_packs: str, previous_commit_hash: str = "HEAD^") -> set:
"""Detects and returns packs names to upload.
In case that `Modified` is passed in target_packs input, checks the git difference between two commits,
current and previous and greps only ones with prefix Packs/.
By default this function will receive `All` as target_packs and will return all packs names from content repo.
Args:
target_packs (str): csv packs names or `All` for all available packs in content
or `Modified` for only modified packs (currently not in use).
previous_commit_hash (str): the previous commit to diff with.
Returns:
set: unique collection of packs names to upload.
"""
if target_packs.lower() == "all":
if os.path.exists(PACKS_FULL_PATH):
all_packs = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(all_packs)}")
# return all available packs names
return all_packs
else:
logging.error(f"Folder {PACKS_FOLDER} was not found at the following path: {PACKS_FULL_PATH}")
sys.exit(1)
elif target_packs.lower() == "modified":
cmd = f"git diff --name-only HEAD..{previous_commit_hash} | grep 'Packs/'"
modified_packs_path = run_command(cmd).splitlines()
modified_packs = {p.split('/')[1] for p in modified_packs_path if p not in IGNORED_PATHS}
logging.info(f"Number of modified packs is: {len(modified_packs)}")
# return only modified packs between two commits
return modified_packs
elif target_packs and isinstance(target_packs, str):
modified_packs = {p.strip() for p in target_packs.split(',') if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(modified_packs)}")
# return only packs from csv list
return modified_packs
else:
logging.critical("Not correct usage of flag -p. Please check help section of upload packs script.")
sys.exit(1)
def extract_packs_artifacts(packs_artifacts_path: str, extract_destination_path: str):
"""Extracts all packs from content pack artifact zip.
Args:
packs_artifacts_path (str): full path to content artifacts zip file.
extract_destination_path (str): full path to directory where to extract the packs.
"""
with ZipFile(packs_artifacts_path) as packs_artifacts:
packs_artifacts.extractall(extract_destination_path)
logging.info("Finished extracting packs artifacts")
def download_and_extract_index(storage_bucket: Any, extract_destination_path: str) -> Tuple[str, Any, int]:
"""Downloads and extracts index zip from cloud storage.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
extract_destination_path (str): the full path of extract folder.
Returns:
str: extracted index folder full path.
Blob: google cloud storage object that represents index.zip blob.
str: downloaded index generation.
"""
if storage_bucket.name == GCPConfig.PRODUCTION_PRIVATE_BUCKET:
index_storage_path = os.path.join(GCPConfig.PRIVATE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
else:
index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
download_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
index_blob = storage_bucket.blob(index_storage_path)
index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
index_generation = 0 # Setting to 0 makes the operation succeed only if there are no live versions of the blob
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not index_blob.exists():
os.mkdir(index_folder_path)
logging.error(f"{storage_bucket.name} index blob does not exists")
return index_folder_path, index_blob, index_generation
index_blob.reload()
index_generation = index_blob.generation
index_blob.download_to_filename(download_index_path, if_generation_match=index_generation)
if os.path.exists(download_index_path):
with ZipFile(download_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(index_folder_path):
logging.critical(f"Failed creating {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_index_path)
logging.success(f"Finished downloading and extracting {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return index_folder_path, index_blob, index_generation
else:
logging.critical(f"Failed to download {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1)
def update_index_folder(index_folder_path: str, pack_name: str, pack_path: str, pack_version: str = '',
hidden_pack: bool = False) -> bool:
"""
Copies pack folder into index folder.
Args:
index_folder_path (str): full path to index folder.
pack_name (str): pack folder name to copy.
pack_path (str): pack folder full path.
pack_version (str): pack latest version.
hidden_pack (bool): whether pack is hidden/internal or regular pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
index_folder_subdirectories = [d for d in os.listdir(index_folder_path) if
os.path.isdir(os.path.join(index_folder_path, d))]
index_pack_path = os.path.join(index_folder_path, pack_name)
metadata_files_in_index = glob.glob(f"{index_pack_path}/metadata-*.json")
new_metadata_path = os.path.join(index_pack_path, f"metadata-{pack_version}.json")
if pack_version:
# Update the latest metadata
if new_metadata_path in metadata_files_in_index:
metadata_files_in_index.remove(new_metadata_path)
# Remove old files but keep metadata files
if pack_name in index_folder_subdirectories:
for d in os.scandir(index_pack_path):
if d.path not in metadata_files_in_index:
os.remove(d.path)
# skipping index update in case hidden is set to True
if hidden_pack:
if os.path.exists(index_pack_path):
shutil.rmtree(index_pack_path) # remove pack folder inside index in case that it exists
logging.warning(f"Skipping updating {pack_name} pack files to index")
return True
# Copy new files and add metadata for latest version
for d in os.scandir(pack_path):
if not os.path.exists(index_pack_path):
os.mkdir(index_pack_path)
logging.info(f"Created {pack_name} pack folder in {GCPConfig.INDEX_NAME}")
shutil.copy(d.path, index_pack_path)
if pack_version and Pack.METADATA == d.name:
shutil.copy(d.path, new_metadata_path)
task_status = True
except Exception:
logging.exception(f"Failed in updating index folder for {pack_name} pack.")
finally:
return task_status
def clean_non_existing_packs(index_folder_path: str, private_packs: list, storage_bucket: Any) -> bool:
""" Detects packs that are not part of content repo or from private packs bucket.
In case such packs were detected, problematic pack is deleted from index and from content/packs/{target_pack} path.
Args:
index_folder_path (str): full path to downloaded index folder.
private_packs (list): priced packs from private bucket.
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
Returns:
bool: whether cleanup was skipped or not.
"""
if ('CI' not in os.environ) or (
os.environ.get('CIRCLE_BRANCH') != 'master' and storage_bucket.name == GCPConfig.PRODUCTION_BUCKET) or (
os.environ.get('CIRCLE_BRANCH') == 'master' and storage_bucket.name not in
(GCPConfig.PRODUCTION_BUCKET, GCPConfig.CI_BUILD_BUCKET)):
logging.info("Skipping cleanup of packs in gcs.") # skipping execution of cleanup in gcs bucket
return True
public_packs_names = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
private_packs_names = {p.get('id', '') for p in private_packs}
valid_packs_names = public_packs_names.union(private_packs_names)
# search for invalid packs folder inside index
invalid_packs_names = {(entry.name, entry.path) for entry in os.scandir(index_folder_path) if
entry.name not in valid_packs_names and entry.is_dir()}
if invalid_packs_names:
try:
logging.warning(f"Detected {len(invalid_packs_names)} non existing pack inside index, starting cleanup.")
for invalid_pack in invalid_packs_names:
invalid_pack_name = invalid_pack[0]
invalid_pack_path = invalid_pack[1]
# remove pack from index
shutil.rmtree(invalid_pack_path)
logging.warning(f"Deleted {invalid_pack_name} pack from {GCPConfig.INDEX_NAME} folder")
# important to add trailing slash at the end of path in order to avoid packs with same prefix
invalid_pack_gcs_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, invalid_pack_name, "") # by design
for invalid_blob in [b for b in storage_bucket.list_blobs(prefix=invalid_pack_gcs_path)]:
logging.warning(f"Deleted invalid {invalid_pack_name} pack under url {invalid_blob.public_url}")
invalid_blob.delete() # delete invalid pack in gcs
except Exception:
logging.exception("Failed to cleanup non existing packs.")
else:
logging.info(f"No invalid packs detected inside {GCPConfig.INDEX_NAME} folder")
return False
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
def upload_core_packs_config(storage_bucket: Any, build_number: str, index_folder_path: str):
"""Uploads corepacks.json file configuration to bucket. Corepacks file includes core packs for server installation.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): circleCI build number.
index_folder_path (str): The index folder path.
"""
core_packs_public_urls = []
found_core_packs = set()
for pack in os.scandir(index_folder_path):
if pack.is_dir() and pack.name in GCPConfig.CORE_PACKS_LIST:
pack_metadata_path = os.path.join(index_folder_path, pack.name, Pack.METADATA)
if not os.path.exists(pack_metadata_path):
logging.critical(f"{pack.name} pack {Pack.METADATA} is missing in {GCPConfig.INDEX_NAME}")
sys.exit(1)
with open(pack_metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
pack_current_version = metadata.get('currentVersion', Pack.PACK_INITIAL_VERSION)
core_pack_relative_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, pack.name,
pack_current_version, f"{pack.name}.zip")
core_pack_public_url = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name, core_pack_relative_path)
if not storage_bucket.blob(core_pack_relative_path).exists():
logging.critical(f"{pack.name} pack does not exist under {core_pack_relative_path} path")
sys.exit(1)
core_packs_public_urls.append(core_pack_public_url)
found_core_packs.add(pack.name)
if len(found_core_packs) != len(GCPConfig.CORE_PACKS_LIST):
missing_core_packs = set(GCPConfig.CORE_PACKS_LIST) ^ found_core_packs
logging.critical(f"Number of defined core packs are: {len(GCPConfig.CORE_PACKS_LIST)}")
logging.critical(f"Actual number of found core packs are: {len(found_core_packs)}")
logging.critical(f"Missing core packs are: {missing_core_packs}")
sys.exit(1)
# construct core pack data with public gcs urls
core_packs_data = {
'corePacks': core_packs_public_urls,
'buildNumber': build_number
}
# upload core pack json file to gcs
core_packs_config_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, GCPConfig.CORE_PACK_FILE_NAME)
blob = storage_bucket.blob(core_packs_config_path)
blob.upload_from_string(json.dumps(core_packs_data, indent=4))
logging.success(f"Finished uploading {GCPConfig.CORE_PACK_FILE_NAME} to storage.")
def upload_id_set(storage_bucket: Any, id_set_local_path: str = None):
"""
Uploads the id_set.json artifact to the bucket.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
id_set_local_path: path to the id_set.json file
"""
if not id_set_local_path:
logging.info("Skipping upload of id set to gcs.")
return
id_set_gcs_path = os.path.join(os.path.dirname(GCPConfig.STORAGE_BASE_PATH), 'id_set.json')
blob = storage_bucket.blob(id_set_gcs_path)
with open(id_set_local_path, mode='r') as f:
blob.upload_from_file(f)
logging.success("Finished uploading id_set.json to storage.")
def _build_summary_table(packs_input_list: list, include_pack_status: bool = False) -> Any:
"""Build summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
PrettyTable: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Aggregated Pack Versions"]
if include_pack_status:
table_fields.append("Status")
table = prettytable.PrettyTable()
table.field_names = table_fields
for index, pack in enumerate(packs_input_list, start=1):
pack_status_message = PackStatus[pack.status].value
row = [index, pack.name, pack.display_name, pack.latest_version,
pack.aggregation_str if pack.aggregated and pack.aggregation_str else "False"]
if include_pack_status:
row.append(pack_status_message)
table.add_row(row)
return table
def build_summary_table_md(packs_input_list: list, include_pack_status: bool = False) -> str:
"""Build markdown summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
Markdown table: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Status"] if include_pack_status \
else ["Index", "Pack ID", "Pack Display Name", "Latest Version"]
table = ['|', '|']
for key in table_fields:
table[0] = f'{table[0]} {key} |'
table[1] = f'{table[1]} :- |'
for index, pack in enumerate(packs_input_list):
pack_status_message = PackStatus[pack.status].value if include_pack_status else ''
row = [index, pack.name, pack.display_name, pack.latest_version, pack_status_message] if include_pack_status \
else [index, pack.name, pack.display_name, pack.latest_version]
row_hr = '|'
for _value in row:
row_hr = f'{row_hr} {_value}|'
table.append(row_hr)
return '\n'.join(table)
def load_json(file_path: str) -> dict:
""" Reads and loads json file.
Args:
file_path (str): full path to json file.
Returns:
dict: loaded json file.
"""
try:
if file_path:
with open(file_path, 'r') as json_file:
result = json.load(json_file)
else:
result = {}
return result
except json.decoder.JSONDecodeError:
return {}
def get_content_git_client(content_repo_path: str):
""" Initializes content repo client.
Args:
content_repo_path (str): content repo full path
Returns:
git.repo.base.Repo: content repo object.
"""
return git.Repo(content_repo_path)
def get_recent_commits_data(content_repo: Any, index_folder_path: str, is_bucket_upload_flow: bool,
is_private_build: bool = False, circle_branch: str = "master"):
""" Returns recent commits hashes (of head and remote master)
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: last commit hash of head.
str: previous commit depending on the flow the script is running
"""
return content_repo.head.commit.hexsha, get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow,
is_private_build, circle_branch)
def update_index_with_priced_packs(private_storage_bucket: Any, extract_destination_path: str,
index_folder_path: str, pack_names: set) \
-> Tuple[Union[list, list], str, Any]:
""" Updates index with priced packs and returns list of priced packs data.
Args:
private_storage_bucket (google.cloud.storage.bucket.Bucket): google storage private bucket.
extract_destination_path (str): full path to extract directory.
index_folder_path (str): downloaded index folder directory path.
pack_names (set): Collection of pack names.
Returns:
list: priced packs from private bucket.
"""
private_index_path = ""
private_packs = []
try:
(private_index_path, private_index_blob, _) = \
download_and_extract_index(private_storage_bucket,
os.path.join(extract_destination_path,
'private'))
logging.info("get_private_packs")
private_packs = get_private_packs(private_index_path, pack_names,
extract_destination_path)
logging.info("add_private_packs_to_index")
add_private_packs_to_index(index_folder_path, private_index_path)
logging.info("Finished updating index with priced packs")
except Exception:
logging.exception('Could not add private packs to the index.')
finally:
shutil.rmtree(os.path.dirname(private_index_path), ignore_errors=True)
return private_packs, private_index_path, private_index_blob
def get_private_packs(private_index_path: str, pack_names: set = set(),
extract_destination_path: str = '') -> list:
"""
Gets a list of private packs.
:param private_index_path: Path to where the private index is located.
:param pack_names: Collection of pack names.
:param extract_destination_path: Path to where the files should be extracted to.
:return: List of dicts containing pack metadata information.
"""
try:
metadata_files = glob.glob(f"{private_index_path}/**/metadata.json")
except Exception:
logging.exception(f'Could not find metadata files in {private_index_path}.')
return []
if not metadata_files:
logging.warning(f'No metadata files found in [{private_index_path}]')
private_packs = []
for metadata_file_path in metadata_files:
try:
with open(metadata_file_path, "r") as metadata_file:
metadata = json.load(metadata_file)
pack_id = metadata.get('id')
is_changed_private_pack = pack_id in pack_names
if is_changed_private_pack: # Should take metadata from artifacts.
with open(os.path.join(extract_destination_path, pack_id, "pack_metadata.json"),
"r") as metadata_file:
metadata = json.load(metadata_file)
if metadata:
private_packs.append({
'id': metadata.get('id') if not is_changed_private_pack else metadata.get('name'),
'price': metadata.get('price'),
'vendorId': metadata.get('vendorId'),
'vendorName': metadata.get('vendorName'),
})
except ValueError:
logging.exception(f'Invalid JSON in the metadata file [{metadata_file_path}].')
return private_packs
def add_private_packs_to_index(index_folder_path: str, private_index_path: str):
""" Add the private packs to the index folder.
Args:
index_folder_path: The index folder path.
private_index_path: The path for the index of the private packs.
"""
for d in os.scandir(private_index_path):
if os.path.isdir(d.path):
update_index_folder(index_folder_path, d.name, d.path)
def check_if_index_is_updated(index_folder_path: str, content_repo: Any, current_commit_hash: str,
previous_commit_hash: str, storage_bucket: Any):
""" Checks stored at index.json commit hash and compares it to current commit hash. In case no packs folders were
added/modified/deleted, all other steps are not performed.
Args:
index_folder_path (str): index folder full path.
content_repo (git.repo.base.Repo): content repo object.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with
storage_bucket: public storage bucket.
"""
skipping_build_task_message = "Skipping Upload Packs To Marketplace Storage Step."
try:
if storage_bucket.name not in (GCPConfig.CI_BUILD_BUCKET, GCPConfig.PRODUCTION_BUCKET):
logging.info("Skipping index update check in non production/build bucket")
return
if not os.path.exists(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")):
# will happen only in init bucket run
logging.warning(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
return
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")) as index_file:
index_json = json.load(index_file)
index_commit_hash = index_json.get('commit', previous_commit_hash)
try:
index_commit = content_repo.commit(index_commit_hash)
except Exception:
# not updated build will receive this exception because it is missing more updated commit
logging.exception(f"Index is already updated. {skipping_build_task_message}")
sys.exit()
current_commit = content_repo.commit(current_commit_hash)
if current_commit.committed_datetime <= index_commit.committed_datetime:
logging.warning(f"Current commit {current_commit.hexsha} committed time: {current_commit.committed_datetime}")
logging.warning(f"Index commit {index_commit.hexsha} committed time: {index_commit.committed_datetime}")
logging.warning("Index is already updated.")
logging.warning(skipping_build_task_message)
sys.exit()
for changed_file in current_commit.diff(index_commit):
if changed_file.a_path.startswith(PACKS_FOLDER):
logging.info(f"Found changed packs between index commit {index_commit.hexsha} and {current_commit.hexsha}")
break
else:
logging.warning(f"No changes found between index commit {index_commit.hexsha} and {current_commit.hexsha}")
logging.warning(skipping_build_task_message)
sys.exit()
except Exception:
logging.exception("Failed in checking status of index")
sys.exit(1)
def print_packs_summary(successful_packs: list, skipped_packs: list, failed_packs: list,
fail_build: bool = True):
"""Prints summary of packs uploaded to gcs.
Args:
successful_packs (list): list of packs that were successfully uploaded.
skipped_packs (list): list of packs that were skipped during upload.
failed_packs (list): list of packs that were failed during upload.
fail_build (bool): indicates whether to fail the build upon failing pack to upload or not
"""
logging.info(
f"""\n
------------------------------------------ Packs Upload Summary ------------------------------------------
Total number of packs: {len(successful_packs + skipped_packs + failed_packs)}
----------------------------------------------------------------------------------------------------------""")
if successful_packs:
successful_packs_table = _build_summary_table(successful_packs)
logging.success(f"Number of successful uploaded packs: {len(successful_packs)}")
logging.success(f"Uploaded packs:\n{successful_packs_table}")
with open('pack_list.txt', 'w') as f:
f.write(successful_packs_table.get_string())
if skipped_packs:
skipped_packs_table = _build_summary_table(skipped_packs, include_pack_status=True)
logging.warning(f"Number of skipped packs: {len(skipped_packs)}")
logging.warning(f"Skipped packs:\n{skipped_packs_table}")
if failed_packs:
failed_packs_table = _build_summary_table(failed_packs, include_pack_status=True)
logging.critical(f"Number of failed packs: {len(failed_packs)}")
logging.critical(f"Failed packs:\n{failed_packs_table}")
if fail_build:
# We don't want the bucket upload flow to fail in Prepare Content step if a pack has failed to upload.
sys.exit(1)
# for external pull requests - when there is no failed packs, add the build summary to the pull request
branch_name = os.environ.get('CIRCLE_BRANCH')
if branch_name and branch_name.startswith('pull/'):
successful_packs_table = build_summary_table_md(successful_packs)
build_num = os.environ['CIRCLE_BUILD_NUM']
bucket_path = f'https://console.cloud.google.com/storage/browser/' \
f'marketplace-ci-build/content/builds/{branch_name}/{build_num}'
pr_comment = f'Number of successful uploaded packs: {len(successful_packs)}\n' \
f'Uploaded packs:\n{successful_packs_table}\n\n' \
f'Browse to the build bucket with this address:\n{bucket_path}'
add_pr_comment(pr_comment)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-e', '--extract_path', help="Full path of folder to extract wanted packs", required=True)
parser.add_argument('-b', '--bucket_name', help="Storage bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-i', '--id_set_path', help="The full path of id_set.json", required=False)
parser.add_argument('-d', '--pack_dependencies', help="Full path to pack dependencies json file.", required=False)
parser.add_argument('-p', '--pack_names',
help=("Target packs to upload to gcs. Optional values are: `All`, "
"`Modified` or csv list of packs "
"Default is set to `All`"),
required=False, default="All")
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=False)
parser.add_argument('-o', '--override_all_packs', help="Override all existing packs in cloud storage",
type=str2bool, default=False, required=True)
parser.add_argument('-k', '--key_string', help="Base64 encoded signature key used for signing packs.",
required=False)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-rt', '--remove_test_playbooks', type=str2bool,
help='Should remove test playbooks from content packs or not.', default=True)
parser.add_argument('-bu', '--bucket_upload', help='is bucket upload build?', type=str2bool, required=True)
parser.add_argument('-pb', '--private_bucket_name', help="Private storage bucket name", required=False)
parser.add_argument('-c', '--circle_branch', help="CircleCi branch of current build", required=True)
parser.add_argument('-f', '--force_upload', help="is force upload build?", type=str2bool, required=True)
# disable-secrets-detection-end
return parser.parse_args()
def add_pr_comment(comment: str):
"""Add comment to the pull request.
Args:
comment (string): The comment text.
"""
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = f'?q={sha1}+repo:demisto/content+is:pr+is:open+head:{branch_name}+is:open'
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
logging.warning(
f'Add pull request comment failed: There is more then one open pull request for branch {branch_name}.')
except Exception:
logging.exception('Add pull request comment failed.')
def handle_github_response(response: json) -> dict:
"""
Handles the response from the GitHub server after making a request.
:param response: Response from the server.
:return: The returned response.
"""
res_dict = response.json()
if not res_dict.get('ok'):
logging.warning(f'Add pull request comment failed: {res_dict.get("message")}')
return res_dict
def get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow, is_private_build, circle_branch):
""" If running in bucket upload workflow we want to get the commit in the index which is the index
We've last uploaded to production bucket. Otherwise, we are in a commit workflow and the diff should be from the
head of origin/master
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: previous commit depending on the flow the script is running
"""
if is_bucket_upload_flow:
return get_last_upload_commit_hash(content_repo, index_folder_path)
elif is_private_build:
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
logging.info(f"Using origin/master HEAD~1 commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
else:
if circle_branch == 'master':
head_str = "HEAD~1"
# if circle branch is master than current commit is origin/master HEAD, so we need to diff with HEAD~1
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
else:
head_str = "HEAD"
# else we are on a regular branch and the diff should be done with origin/master HEAD
previous_master_head_commit = content_repo.commit('origin/master').hexsha
logging.info(f"Using origin/master {head_str} commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
def get_last_upload_commit_hash(content_repo, index_folder_path):
"""
Returns the last origin/master commit hash that was uploaded to the bucket
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path: The path to the index folder
Returns:
The commit hash
"""
inner_index_json_path = os.path.join(index_folder_path, f'{GCPConfig.INDEX_NAME}.json')
if not os.path.exists(inner_index_json_path):
logging.critical(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
sys.exit(1)
else:
inner_index_json_file = load_json(inner_index_json_path)
if 'commit' in inner_index_json_file:
last_upload_commit_hash = inner_index_json_file['commit']
logging.info(f"Retrieved the last commit that was uploaded to production: {last_upload_commit_hash}")
else:
logging.critical(f"No commit field in {GCPConfig.INDEX_NAME}.json, content: {str(inner_index_json_file)}")
sys.exit(1)
try:
last_upload_commit = content_repo.commit(last_upload_commit_hash).hexsha
logging.info(f"Using commit hash {last_upload_commit} from index.json to diff with.")
return last_upload_commit
except Exception as e:
logging.critical(f'Commit {last_upload_commit_hash} in {GCPConfig.INDEX_NAME}.json does not exist in content '
f'repo. Additional info:\n {e}')
sys.exit(1)
def get_packs_summary(packs_list):
""" Returns the packs list divided into 3 lists by their status
Args:
packs_list (list): The full packs list
Returns: 3 lists of packs - successful_packs, skipped_packs & failed_packs
"""
successful_packs = [pack for pack in packs_list if pack.status == PackStatus.SUCCESS.name]
skipped_packs = [pack for pack in packs_list if
pack.status == PackStatus.PACK_ALREADY_EXISTS.name
or pack.status == PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name]
failed_packs = [pack for pack in packs_list if pack not in successful_packs and pack not in skipped_packs]
return successful_packs, skipped_packs, failed_packs
def store_successful_and_failed_packs_in_ci_artifacts(circle_artifacts_path, successful_packs, failed_packs):
""" Saves successful and failed packs to circle ci env - to be used in Upload Packs To Marketplace job (Bucket Upload flow)
Args:
circle_artifacts_path (str): The path to the circle artifacts dir path
failed_packs: The list of all failed packs
successful_packs: The list of all successful packs
"""
packs_results = dict()
if failed_packs:
failed_packs_dict = {
"failed_packs": {
pack.name: {
"status": PackStatus[pack.status].value,
"aggregated": pack.aggregation_str if pack.aggregated and pack.aggregation_str else "False"
} for pack in failed_packs
}
}
packs_results.update(failed_packs_dict)
if successful_packs:
successful_packs_dict = {
"successful_packs": {
pack.name: {
"status": PackStatus[pack.status].value,
"aggregated": pack.aggregation_str if pack.aggregated and pack.aggregation_str else "False"
} for pack in successful_packs
}
}
packs_results.update(successful_packs_dict)
if packs_results:
with open(os.path.join(circle_artifacts_path, PACKS_RESULTS_FILE), "w") as f:
f.write(json.dumps(packs_results, indent=4))
def main():
install_logging('Prepare Content Packs For Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
if private_bucket_name: # Add private packs to the index
private_storage_bucket = storage_client.bucket(private_bucket_name)
private_packs, _, _ = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
index_folder_path, pack_names)
else: # skipping private packs
logging.debug("Skipping index update of priced packs")
private_packs = []
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, pack_was_modified = pack.detect_modified(content_repo, index_folder_path, current_commit_hash,
previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, skipped further steps
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
store_successful_and_failed_packs_in_ci_artifacts(os.path.dirname(packs_artifacts_path), successful_packs,
failed_packs)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
if __name__ == '__main__':
main()
| 45.236149
| 127
| 0.671419
|
7155ce04d9055a84f25945caa5c668a267cb8c3d
| 2,542
|
py
|
Python
|
smartmemorizer/word/models.py
|
younseunghyun/smart-memorizer
|
28b12c03e04df4d683c7af0afadd6493a32daebe
|
[
"BSD-3-Clause"
] | null | null | null |
smartmemorizer/word/models.py
|
younseunghyun/smart-memorizer
|
28b12c03e04df4d683c7af0afadd6493a32daebe
|
[
"BSD-3-Clause"
] | null | null | null |
smartmemorizer/word/models.py
|
younseunghyun/smart-memorizer
|
28b12c03e04df4d683c7af0afadd6493a32daebe
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""word models."""
import datetime as dt
from smartmemorizer.database import Column, Model, SurrogatePK, db
from sqlalchemy import func
class Word(SurrogatePK, Model):
"""A word of the app."""
__tablename__ = 'words'
__table_args__ = {'sqlite_autoincrement': True}
index = Column(db.Integer, autoincrement=True)
username = Column(db.String(80), db.ForeignKey('users.username'), unique=False)
group = Column(db.String(80), unique=False, nullable=False)
word = Column(db.String(80), unique=False, nullable=False)
mean = Column(db.String(80), unique=False, nullable=False)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
error_count = Column(db.Integer, nullable=True)
def __init__(self, username, group, word, mean):
"""Create instance."""
if db.session.query(func.count('*')).\
select_from(Word_book).\
filter(Word_book.group == group,
Word_book.username == username).\
distinct().scalar() == 0:
Word_book.create(username=username, group=group, description='')
index = db.session.query(Word).count() + 1
db.Model.__init__(self,index=index, username=username, group=group, word=word, mean=mean, error_count=0)
def increase_error_count(self):
if self.error_count is None:
self.error_count = 0
self.error_count += 1
self.save()
@property
def full_name(self):
"""Full user name."""
return '{0} {1}'.format(self.first_name, self.last_name)
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({username!r})>'.format(username=self.username)
class Word_book(SurrogatePK, Model):
"""A word book of the app"""
__tablename__ = 'word_books'
username = Column(db.String(80), db.ForeignKey('users.username'), unique=False)
group = Column(db.String(80), unique=False, nullable=False)
description = Column(db.String(200), unique=False, nullable=True)
def __init__(self, username, group, description, **kwargs):
db.Model.__init__(self, username=username, group=group, description=description, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Word_book({username!r, group!r})>'.format(username=self.username, group=self.group)
def modify_description(self, description):
self.description = description
self.save()
| 35.802817
| 112
| 0.651456
|
032d9be304ecfe223d64836f9d70f7c1cf07ef55
| 1,140
|
py
|
Python
|
plantcv/plantcv/get_nir.py
|
Howzit123/plantcv
|
b4ff6ad765da36353f40827ce3816b33d1d3596a
|
[
"MIT"
] | 2
|
2021-08-20T14:56:48.000Z
|
2021-08-24T23:12:56.000Z
|
plantcv/plantcv/get_nir.py
|
Howzit123/plantcv
|
b4ff6ad765da36353f40827ce3816b33d1d3596a
|
[
"MIT"
] | null | null | null |
plantcv/plantcv/get_nir.py
|
Howzit123/plantcv
|
b4ff6ad765da36353f40827ce3816b33d1d3596a
|
[
"MIT"
] | 1
|
2021-06-15T15:01:46.000Z
|
2021-06-15T15:01:46.000Z
|
# Find NIR image
import os
import re
import numpy as np
def get_nir(path, filename):
"""Find a corresponding NIR image from the same snapshot as the VIS image.
Inputs:
path = path to vis image
filename = vis image file name
Returns:
nirpath = NIR image filename and path
:param path: str
:param filename: str
:return nirpath: str
"""
visname = filename.split("_")
allfiles = np.array(os.listdir(path))
nirfiles = []
cam = visname[1].upper()
if cam == "SV":
angle = visname[2]
for n in allfiles:
if re.search("NIR", n) is not None:
nirfiles.append(n)
if cam == "TV":
for n in nirfiles:
if re.search("TV", n) is not None:
nirpath = os.path.join(str(path), str(n))
if cam == "SV":
for n in nirfiles:
if re.search("SV", n) is not None:
nsplit = n.split("_")
exangle = '\\b' + str(angle) + '\\b'
if re.search(exangle, nsplit[2]) is not None:
nirpath = os.path.join(str(path), str(n))
return nirpath
| 22.8
| 78
| 0.539474
|
28c62e5988770b75878d6c44a3649136def5edcc
| 6,185
|
py
|
Python
|
tests/dot11/test_Dot11HierarchicalUpdate.py
|
kamnsv/impacket
|
83a581e4ba0cb3b7ba5dfa3018b87f9bf1a2cb58
|
[
"Apache-1.1"
] | 6,612
|
2018-10-10T22:45:11.000Z
|
2022-03-31T18:13:01.000Z
|
tests/dot11/test_Dot11HierarchicalUpdate.py
|
kamnsv/impacket
|
83a581e4ba0cb3b7ba5dfa3018b87f9bf1a2cb58
|
[
"Apache-1.1"
] | 703
|
2018-10-11T11:38:30.000Z
|
2022-03-31T14:59:22.000Z
|
tests/dot11/test_Dot11HierarchicalUpdate.py
|
kamnsv/impacket
|
83a581e4ba0cb3b7ba5dfa3018b87f9bf1a2cb58
|
[
"Apache-1.1"
] | 2,172
|
2018-10-11T10:51:26.000Z
|
2022-03-31T04:45:49.000Z
|
#!/usr/bin/env python
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# sorry, this is very ugly, but I'm in python 2.5
import sys
sys.path.insert(0,"../..")
from impacket.dot11 import ProtocolPacket
import unittest
class TestPacket(ProtocolPacket):
def __init__(self, aBuffer = None):
header_size = 7
tail_size = 5
ProtocolPacket.__init__(self, header_size,tail_size)
if(aBuffer):
self.load_packet(aBuffer)
class TestDot11HierarchicalUpdate(unittest.TestCase):
def setUp(self):
self.rawpacket1 = b"" \
b"Header1"\
b"Body1"\
b"Tail1"
self.rawpacket2 = b"" \
b"Header2"+\
self.rawpacket1+ \
b"Tail2"
self.rawpacket3 = b"" \
b"Header3"+\
self.rawpacket2+ \
b"Tail3"
self.packet1=TestPacket(self.rawpacket1)
self.packet2=TestPacket(self.rawpacket2)
self.packet2.contains(self.packet1)
self.packet3=TestPacket(self.rawpacket3)
self.packet3.contains(self.packet2)
def test_01_StartupPacketsStringTest(self):
"ProtocolPacket - get_packet initial string test"
self.assertEqual(self.packet1.get_packet(), b"Header1Body1Tail1")
self.assertEqual(self.packet2.get_packet(), b"Header2Header1Body1Tail1Tail2")
self.assertEqual(self.packet3.get_packet(), b"Header3Header2Header1Body1Tail1Tail2Tail3")
def test_02_StartupPacketsSizeTest(self):
"ProtocolPacket - Initial size getters test"
self.assertEqual(self.packet1.get_size(), 7+5+5)
self.assertEqual(self.packet1.get_header_size(), 7)
self.assertEqual(self.packet1.get_body_size(), 5)
self.assertEqual(self.packet1.get_tail_size(), 5)
self.assertEqual(self.packet2.get_size(), 7+ (7+5+5) + 5)
self.assertEqual(self.packet2.get_header_size(), 7)
self.assertEqual(self.packet2.get_body_size(), 7+5+5)
self.assertEqual(self.packet2.get_tail_size(), 5)
self.assertEqual(self.packet3.get_size(), 7+ (7+ (7+5+5) +5) +5 )
self.assertEqual(self.packet3.get_header_size(), 7)
self.assertEqual(self.packet3.get_body_size(), 7+ 7+5+5 +5)
self.assertEqual(self.packet3.get_tail_size(), 5)
def test_03_ChildModificationTest(self):
"ProtocolPacket - get_packet hierarchical update test"
self.packet1.load_body(b"**NewBody**")
self.assertEqual(self.packet1.get_packet(), b"Header1**NewBody**Tail1")
self.assertEqual(self.packet2.get_packet(), b"Header2Header1**NewBody**Tail1Tail2")
self.assertEqual(self.packet3.get_packet(), b"Header3Header2Header1**NewBody**Tail1Tail2Tail3")
def test_04_ChildModificationTest(self):
"ProtocolPacket - size getters hierarchical update test"
self.packet1.load_body(b"**NewBody**")
#self.packet1 => "Header1**NewBody**Tail1"
#self.packet2 => "Header2Header1**NewBody**Tail1Tail2"
#self.packet3 => "Header3Header2Header1**NewBody**Tail1Tail2Tail3"
self.assertEqual(self.packet1.get_size(), 7+11+5 )
self.assertEqual(self.packet1.get_header_size(), 7)
self.assertEqual(self.packet1.get_body_size(), 11)
self.assertEqual(self.packet1.get_tail_size(), 5)
self.assertEqual(self.packet2.get_size(), 7+ (7+11+5) +5 )
self.assertEqual(self.packet2.get_header_size(), 7)
self.assertEqual(self.packet2.get_body_size(), 7+11+5)
self.assertEqual(self.packet2.get_tail_size(), 5)
self.assertEqual(self.packet3.get_size(), 7+ (7+ (7+11+5) +5) +5 )
self.assertEqual(self.packet3.get_header_size(), 7)
self.assertEqual(self.packet3.get_body_size(), 7+ (7+11+5) +5)
self.assertEqual(self.packet3.get_tail_size(), 5)
def test_05_ChildModificationTest(self):
"ProtocolPacket - body packet hierarchical update test"
self.packet1.load_body(b"**NewBody**")
self.assertEqual(self.packet1.body.get_buffer_as_string(), b"**NewBody**")
self.assertEqual(self.packet2.body.get_buffer_as_string(), b"Header1**NewBody**Tail1")
self.assertEqual(self.packet3.body.get_buffer_as_string(), b"Header2Header1**NewBody**Tail1Tail2")
def test_06_ChildModificationTest(self):
"ProtocolPacket - get_body_as_string packet hierarchical update test"
self.packet1.load_body(b"**NewBody**")
self.assertEqual(self.packet1.get_body_as_string(), b"**NewBody**")
self.assertEqual(self.packet2.get_body_as_string(), b"Header1**NewBody**Tail1")
self.assertEqual(self.packet3.get_body_as_string(), b"Header2Header1**NewBody**Tail1Tail2")
def test_07_ChildModificationTest(self):
"ProtocolPacket - load_body child hierarchy update test"
self.assertEqual(self.packet1.parent(), self.packet2)
self.assertEqual(self.packet2.parent(), self.packet3)
self.assertEqual(self.packet3.child(), self.packet2)
self.assertEqual(self.packet2.child(), self.packet1)
self.packet2.load_body(b"Header1**NewBody**Tail1")
self.assertEqual(self.packet1.parent(), None)
self.assertEqual(self.packet2.parent(), self.packet3)
self.assertEqual(self.packet3.child(), self.packet2)
self.assertEqual(self.packet2.child(), None)
self.assertEqual(self.packet1.body.get_buffer_as_string(), b"Body1")
self.assertEqual(self.packet2.body.get_buffer_as_string(), b"Header1**NewBody**Tail1")
self.assertEqual(self.packet3.body.get_buffer_as_string(), b"Header2Header1**NewBody**Tail1Tail2")
suite = unittest.TestLoader().loadTestsFromTestCase(TestDot11HierarchicalUpdate)
unittest.main(defaultTest='suite')
| 44.496403
| 106
| 0.671625
|
06f301dd737987eeacd50c9b9296d4455961ace6
| 2,867
|
py
|
Python
|
tests/test_positional.py
|
Americanwallace0/NMTGMinor-1
|
f932c15817425f0f113011d00e681091461fc4f7
|
[
"MIT"
] | null | null | null |
tests/test_positional.py
|
Americanwallace0/NMTGMinor-1
|
f932c15817425f0f113011d00e681091461fc4f7
|
[
"MIT"
] | null | null | null |
tests/test_positional.py
|
Americanwallace0/NMTGMinor-1
|
f932c15817425f0f113011d00e681091461fc4f7
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from nmtg.models.transformer.transformer import Transformer
from nmtg.modules.positional_encoding import SinusoidalPositionalEncoding
from onmt.modules.Transformer.Layers import PositionalEncoding
def make_mask_random(size, fill):
total = int(np.prod(size))
ones = int(fill * total)
mask = torch.cat([torch.ones(ones), torch.zeros(total - ones)]).byte()
return mask[torch.randperm(total)].view(*size)
def make_mask_seq(size, fill):
maxlen = size[1]
avg_len = int(fill * maxlen)
lens = torch.randint(avg_len - 1, avg_len + 2, (size[0],))
return sequence_mask(lens, maxlen)
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (sequence_length.unsqueeze(1)
.expand_as(seq_range_expand))
return seq_range_expand < seq_length_expand
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
cuda = args.cuda
quan_encoding = PositionalEncoding(512, len_max=512)
felix_encoding = SinusoidalPositionalEncoding(512, True, 512)
inputs = torch.zeros(50, 60, 512)
mask = make_mask_seq((50, 60), .9).eq_(0)
if cuda:
quan_encoding.cuda()
felix_encoding.cuda()
inputs = inputs.cuda()
mask = mask.cuda()
# correctness
quan_output = quan_encoding(inputs)
felix_output = felix_encoding(inputs)
if not torch.allclose(felix_output, quan_output):
print(felix_output[0, :5, :10])
print(quan_output[0, :5, :10])
else:
print("Tensors match")
# masked_indices = torch.nonzero(mask.view(-1)).squeeze(1)
# felix_output2 = felix_encoding(inputs, mask).view(-1).index_select(0, masked_indices)
# quan_output2 = quan_output.view(-1).index_select(0, masked_indices)
#
# if not torch.allclose(felix_output2, quan_output2):
# print(felix_output2[0, :5, :10])
# print(quan_output2[0, :5, :10])
# else:
# print("Tensors match")
# speed
repeats = (5, 10)
quan_command = 'quan_encoding(inputs)'
felix_command = 'felix_encoding(inputs)'
if cuda:
repeats = (10, 100)
torch.cuda.synchronize()
quan_command += '; torch.cuda.synchronize()'
felix_command += '; torch.cuda.synchronize()'
import timeit
time = min(timeit.Timer(quan_command, globals=globals()).repeat(*repeats))
print("Quan: {:.3f}ms".format(time * 1000 / repeats[1]))
time = min(timeit.Timer(felix_command, globals=globals()).repeat(*repeats))
print("Felix: {:.3f}ms".format(time * 1000 / repeats[1]))
| 31.163043
| 87
| 0.716777
|
c223f51a0afd073b8205d04565c71942b60f7fda
| 444
|
py
|
Python
|
support_files/scraping/entries/proj_1929/proj_1929/pipelines.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_1929/proj_1929/pipelines.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_1929/proj_1929/pipelines.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
import scrapy
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class Proj1929Pipeline:
def process_item(self, item, spider):
image_urls = scrapy.Field()
images = scrapy.Field()
return item
| 26.117647
| 66
| 0.738739
|
c98f13084ddd426240081c1a51be227f8b889a7d
| 2,898
|
py
|
Python
|
scripts/data/create_prompts_dataset.py
|
allenai/real-toxicity-prompts
|
dd44ab77ed8bb36d6a62f8205768fda8382eb7ba
|
[
"Apache-2.0"
] | 30
|
2020-09-30T03:16:23.000Z
|
2022-03-16T17:16:40.000Z
|
realtoxicityprompts/scripts/data/create_prompts_dataset.py
|
ml-research/MoRT_NMI
|
98dc14f42714b1b794d685507c01b593cde5638c
|
[
"MIT"
] | 2
|
2021-05-06T03:41:00.000Z
|
2021-10-07T12:24:22.000Z
|
realtoxicityprompts/scripts/data/create_prompts_dataset.py
|
ml-research/MoRT_NMI
|
98dc14f42714b1b794d685507c01b593cde5638c
|
[
"MIT"
] | 5
|
2021-06-01T03:09:04.000Z
|
2021-11-10T13:33:23.000Z
|
from pathlib import Path
from typing import Union
import pandas as pd
import spacy
from spacy.tokens.doc import Doc
from sqlalchemy.sql.functions import random
from utils.constants import TEXTS_DIR, OPENWEBTEXT_DB
from utils.db import SpanScore, corpus_db_session
import click
# Span constants
MIN_SPAN_LEN = 64
MAX_SPAN_LEN = 1024
MAX_PROMPT_LEN = 128
def split_prompt(doc: Doc, n: int):
if isinstance(n, float):
# TODO: round n rather than flooring it
n = round(n * len(doc))
# Split text into prompt and continuation
prompt = str(doc[:n])
continuation = str(doc)[len(prompt):] # Rather than taking remaining tokens, take the remainder of the string
if len(prompt) == 0 or len(continuation) == 0 or len(prompt) > MAX_PROMPT_LEN:
return None
return prompt, continuation
def load_span_example(row: pd.Series, n: Union[int, float], nlp):
# Load text from file
text_file = TEXTS_DIR / row.filename
try:
text = text_file.read_text(encoding='utf-8', errors='strict')
except UnicodeDecodeError:
return None
# Trim text
text = text[row.begin:row.end].strip()
if not (MIN_SPAN_LEN <= len(text) <= MAX_SPAN_LEN):
return None
# Tokenize text
doc = nlp(text)
return split_prompt(doc, n)
@click.command()
@click.option('--out_file', required=True, type=str)
@click.option('--n', required=True, type=float)
def create_prompts_dataset(out_file: str, n: float):
out_file = Path(out_file)
if out_file.exists():
raise FileExistsError("Output file already exists.")
if not OPENWEBTEXT_DB.exists():
raise FileNotFoundError("Perspective database was not found.")
session = corpus_db_session()
# TODO: do this for all four ranges of toxicity
query = (
session.query(SpanScore)
.filter(SpanScore.toxicity < 0.25)
.filter(SpanScore.end - SpanScore.begin >= MIN_SPAN_LEN)
.filter(SpanScore.end - SpanScore.begin <= MAX_SPAN_LEN)
.order_by(random())
.limit(30_000)
)
# Load dataframe from query and select relevant columns
print("Reading from database...")
df = pd.read_sql(query.statement, con=query.session.bind)
df = df[['filename', 'begin', 'end', 'toxicity']]
print(f"Returned {len(df)} rows")
# Get prompts and continuations
print("Loading text and tokenizing...")
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner', 'tagger'])
examples = df.apply(lambda row: load_span_example(row, n, nlp), axis=1)
# Add prompts and continuations to dataframe
df = df[examples.notna()]
df['prompt'], df['continuation'] = zip(*examples.dropna())
print(f'Limited to {len(df)} rows after preprocessing')
df = df.head(25_000)
df.to_pickle(out_file)
return df
if __name__ == '__main__':
create_prompts_dataset()
| 29.876289
| 114
| 0.671843
|
a68a396ae61a1be229daba28fd8b851ab05ae4a1
| 15,454
|
py
|
Python
|
gridpath/system/reserves/requirement/reserve_requirements.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
gridpath/system/reserves/requirement/reserve_requirements.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
gridpath/system/reserves/requirement/reserve_requirements.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os.path
from pyomo.environ import Param, Set, NonNegativeReals, PercentFraction, Expression
def generic_add_model_components(
m,
d,
reserve_zone_set,
reserve_requirement_tmp_param,
reserve_requirement_percent_param,
reserve_zone_load_zone_set,
ba_prj_req_contribution_set,
prj_power_param,
prj_capacity_param,
reserve_requirement_expression,
):
"""
:param m:
:param d:
:param reserve_zone_set:
:param reserve_requirement_tmp_param:
:param reserve_requirement_percent_param:
:param reserve_zone_load_zone_set:
:param ba_prj_req_contribution_set:
:param prj_power_param:
:param prj_capacity_param:
:param reserve_requirement_expression:
:return:
Generic treatment of reserves. This function creates model components
related to a particular reserve requirement, including
1) the reserve requirement by zone and timepoint, if any
2) the reserve requirement as a percent of load and map for which load
zones' load to consider
3) the contributions to the reserve requirement from projects: there are two
types of these contributions, those based on the power output in the timepoint
and those based on the project capacity.
"""
# Magnitude of the requirement by reserve zone and timepoint
# If not specified for a reserve zone - timepoint combination,
# will default to 0
setattr(
m,
reserve_requirement_tmp_param,
Param(getattr(m, reserve_zone_set), m.TMPS, within=NonNegativeReals, default=0),
)
# Requirement as percentage of load
setattr(
m,
reserve_requirement_percent_param,
Param(getattr(m, reserve_zone_set), within=PercentFraction, default=0),
)
# Load zones included in the reserve percentage requirement
setattr(
m,
reserve_zone_load_zone_set,
Set(dimen=2, within=getattr(m, reserve_zone_set) * m.LOAD_ZONES),
)
# Projects contributing to BA requirement based on power output in the timepoint
# and on capacity in the period
setattr(
m,
ba_prj_req_contribution_set,
Set(dimen=2, within=getattr(m, reserve_zone_set) * m.PROJECTS),
)
setattr(
m,
prj_power_param,
Param(
getattr(m, ba_prj_req_contribution_set), within=PercentFraction, default=0
),
)
setattr(
m,
prj_capacity_param,
Param(
getattr(m, ba_prj_req_contribution_set), within=PercentFraction, default=0
),
)
def reserve_requirement_rule(mod, reserve_zone, tmp):
# If we have a map of reserve zones to load zones, apply the percentage
# target; if no map provided, the percentage_target is 0
if getattr(mod, reserve_zone_load_zone_set):
percentage_target = sum(
getattr(mod, reserve_requirement_percent_param)[reserve_zone]
* mod.static_load_mw[lz, tmp]
for (_reserve_zone, lz) in getattr(mod, reserve_zone_load_zone_set)
if _reserve_zone == reserve_zone
)
else:
percentage_target = 0
# Project contributions, if any projects in the respective set
if getattr(mod, ba_prj_req_contribution_set):
# Project contributions to requirement based on power output
prj_pwr_contribution = sum(
getattr(mod, prj_power_param)[reserve_zone, prj]
* mod.Power_Provision_MW[prj, tmp]
for (_reserve_zone, prj) in getattr(mod, ba_prj_req_contribution_set)
if _reserve_zone == reserve_zone
if (prj, tmp) in mod.PRJ_OPR_TMPS
)
# Project contributions to requirement based on (available) capacity
# We are not holding the extra reserves when projects are unavailable
prj_cap_contribution = sum(
getattr(mod, prj_capacity_param)[reserve_zone, prj]
* mod.Capacity_MW[prj, mod.period[tmp]]
* mod.Availability_Derate[prj, tmp]
for (_reserve_zone, prj) in getattr(mod, ba_prj_req_contribution_set)
if _reserve_zone == reserve_zone
if (prj, tmp) in mod.PRJ_OPR_TMPS
)
else:
prj_pwr_contribution = 0
prj_cap_contribution = 0
return (
getattr(mod, reserve_requirement_tmp_param)[reserve_zone, tmp]
+ percentage_target
+ prj_pwr_contribution
+ prj_cap_contribution
)
setattr(
m,
reserve_requirement_expression,
Expression(
getattr(m, reserve_zone_set) * m.TMPS, rule=reserve_requirement_rule
),
)
def generic_load_model_data(
m,
d,
data_portal,
scenario_directory,
subproblem,
stage,
reserve_requirement_param,
reserve_zone_load_zone_set,
reserve_requirement_percent_param,
ba_prj_req_contribution_set,
prj_power_param,
prj_capacity_param,
reserve_type,
):
"""
:param m:
:param d:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:param reserve_requirement_param:
:param reserve_zone_load_zone_set:
:param reserve_requirement_percent_param
:param ba_prj_req_contribution_set
:param prj_power_param
:param prj_capacity_param
:param reserve_type:
:return:
"""
input_dir = os.path.join(scenario_directory, str(subproblem), str(stage), "inputs")
# Load by-tmp requriement if input file was written
by_tmp_req_filename = os.path.join(
input_dir, "{}_tmp_requirement.tab".format(reserve_type)
)
if os.path.exists(by_tmp_req_filename):
tmp_params_to_load = (
(
getattr(m, reserve_requirement_param),
m.frequency_response_requirement_partial_mw,
)
if reserve_type == "frequency_response"
else getattr(m, reserve_requirement_param)
)
data_portal.load(filename=by_tmp_req_filename, param=tmp_params_to_load)
# If we have a RPS zone to load zone map input file, load it and the
# percent requirement; otherwise, initialize the set as an empty list (
# the param defaults to 0)
map_filename = os.path.join(input_dir, "{}_percent_map.tab".format(reserve_type))
if os.path.exists(map_filename):
data_portal.load(
filename=map_filename, set=getattr(m, reserve_zone_load_zone_set)
)
data_portal.load(
filename=os.path.join(
input_dir, "{}_percent_requirement.tab".format(reserve_type)
),
param=getattr(m, reserve_requirement_percent_param),
)
else:
data_portal.data()[reserve_zone_load_zone_set] = {None: []}
# If we have a project contributions file, load it into the respective
prj_contr_filename = os.path.join(
input_dir, "{}_requirement_project_contributions.tab".format(reserve_type)
)
if os.path.exists(prj_contr_filename):
data_portal.load(
filename=prj_contr_filename,
index=getattr(m, ba_prj_req_contribution_set),
param=(getattr(m, prj_power_param), getattr(m, prj_capacity_param)),
)
else:
data_portal.data()[ba_prj_req_contribution_set] = {None: []}
def generic_get_inputs_from_database(
scenario_id,
subscenarios,
subproblem,
stage,
conn,
reserve_type,
reserve_type_ba_subscenario_id,
reserve_type_req_subscenario_id,
):
"""
:param subscenarios:
:param subproblem:
:param stage:
:param conn:
:param reserve_type:
:param reserve_type_ba_subscenario_id:
:param reserve_type_req_subscenario_id:
:return:
"""
subproblem = 1 if subproblem == "" else subproblem
stage = 1 if stage == "" else stage
c = conn.cursor()
partial_freq_resp_extra_column = (
", frequency_response_partial_mw"
if reserve_type == "frequency_response"
else ""
)
tmp_req = c.execute(
"""SELECT {}_ba, timepoint, {}_mw{}
FROM inputs_system_{}
INNER JOIN
(SELECT timepoint
FROM inputs_temporal
WHERE temporal_scenario_id = {}
AND subproblem_id = {}
AND stage_id = {}) as relevant_timepoints
USING (timepoint)
INNER JOIN
(SELECT {}_ba
FROM inputs_geography_{}_bas
WHERE {}_ba_scenario_id = {}) as relevant_bas
USING ({}_ba)
WHERE {}_scenario_id = {}
AND stage_id = {}
""".format(
reserve_type,
reserve_type,
partial_freq_resp_extra_column,
reserve_type,
subscenarios.TEMPORAL_SCENARIO_ID,
subproblem,
stage,
reserve_type,
reserve_type,
reserve_type,
reserve_type_ba_subscenario_id,
reserve_type,
reserve_type,
reserve_type_req_subscenario_id,
stage,
)
)
c2 = conn.cursor()
# Get any percentage requirement
percentage_req = c2.execute(
"""
SELECT {}_ba, percent_load_req
FROM inputs_system_{}_percent
WHERE {}_scenario_id = {}
""".format(
reserve_type, reserve_type, reserve_type, reserve_type_req_subscenario_id
)
)
# Get any reserve zone to load zone mapping for the percent target
c3 = conn.cursor()
lz_mapping = c3.execute(
"""
SELECT {}_ba, load_zone
FROM inputs_system_{}_percent_lz_map
JOIN
(SELECT {}_ba
FROM inputs_geography_{}_bas
WHERE {}_ba_scenario_id = {}) as relevant_bas
USING ({}_ba)
WHERE {}_scenario_id = {}
""".format(
reserve_type,
reserve_type,
reserve_type,
reserve_type,
reserve_type,
reserve_type_ba_subscenario_id,
reserve_type,
reserve_type,
reserve_type_req_subscenario_id,
)
)
# Get any project contributions to the magnitude of the reserve requirement
c4 = conn.cursor()
project_contributions = c4.execute(
"""
SELECT {reserve_type}_ba, project, percent_power_req, percent_capacity_req
FROM inputs_system_{reserve_type}_project
JOIN (
SELECT {reserve_type}_ba
FROM inputs_geography_{reserve_type}_bas
WHERE {reserve_type}_ba_scenario_id = {reserve_type_ba_subscenario_id}
) as relevant_bas
USING ({reserve_type}_ba)
JOIN (
SELECT project
FROM inputs_project_portfolios
WHERE project_portfolio_scenario_id = (
SELECT project_portfolio_scenario_id
FROM scenarios
WHERE scenario_id = {scenario_id}
)
) as relevant_prj
USING (project)
WHERE {reserve_type}_scenario_id = {reserve_type_req_subscenario_id}
""".format(
reserve_type=reserve_type,
reserve_type_ba_subscenario_id=reserve_type_ba_subscenario_id,
scenario_id=scenario_id,
reserve_type_req_subscenario_id=reserve_type_req_subscenario_id,
)
)
return tmp_req, percentage_req, lz_mapping, project_contributions
def generic_write_model_inputs(
scenario_directory,
subproblem,
stage,
timepoint_req,
percent_req,
percent_map,
project_contributions,
reserve_type,
):
"""
Get inputs from database and write out the model input
lf_reserves_down_requirement.tab file.
:param scenario_directory: string, the scenario directory
:param subproblem:
:param stage:
:param timepoint_req:
:param percent_req:
:param percent_map:
:param project_contributions:
:param reserve_type:
:return:
"""
inputs_dir = os.path.join(scenario_directory, str(subproblem), str(stage), "inputs")
# Write the by-timepoint requirement file if by-tmp requirement specified
timepoint_req = timepoint_req.fetchall()
if timepoint_req:
with open(
os.path.join(inputs_dir, "{}_tmp_requirement.tab".format(reserve_type)),
"w",
newline="",
) as tmp_req_file:
writer = csv.writer(tmp_req_file, delimiter="\t", lineterminator="\n")
# Write header
extra_column = (
["partial_requirement"] if reserve_type == "frequency_response" else []
)
writer.writerow(["ba", "timepoint", "requirement"] + extra_column)
for row in timepoint_req:
writer.writerow(row)
# Write the percent requirement files only if there's a mapping
ba_lz_map_list = [row for row in percent_map]
if ba_lz_map_list:
with open(
os.path.join(inputs_dir, "{}_percent_requirement.tab".format(reserve_type)),
"w",
newline="",
) as percent_req_file:
writer = csv.writer(percent_req_file, delimiter="\t", lineterminator="\n")
# Write header
writer.writerow(["ba", "percent_requirement"])
for row in percent_req:
writer.writerow(row)
with open(
os.path.join(inputs_dir, "{}_percent_map.tab".format(reserve_type)),
"w",
newline="",
) as percent_map_file:
writer = csv.writer(percent_map_file, delimiter="\t", lineterminator="\n")
# Write header
writer.writerow(["ba", "load_zone"])
for row in ba_lz_map_list:
writer.writerow(row)
else:
pass
# Project contributions to the magnitude requirement
project_contributions = project_contributions.fetchall()
prj_contributions = False
for (ba, prj, pwr, cap) in project_contributions:
if pwr is not None or cap is not None:
prj_contributions = True
if prj_contributions:
with open(
os.path.join(
inputs_dir,
"{}_requirement_project_contributions.tab".format(reserve_type),
),
"w",
newline="",
) as prj_file:
writer = csv.writer(prj_file, delimiter="\t", lineterminator="\n")
# Write header
writer.writerow(
["ba", "project", "percent_power_req", "percent_capacity_req"]
)
for (ba, prj, pwr, cap) in project_contributions:
if pwr is None:
pwr = "."
if cap is None:
cap = "."
writer.writerow([ba, prj, pwr, cap])
| 32.330544
| 88
| 0.630516
|
877dfd94f270c83d67d9918cf35ea602fb3b591d
| 182
|
py
|
Python
|
bossutils/__init__.py
|
aplbrain/boss-tools
|
1e6db46795fe2e31c601690da399728fd0138e95
|
[
"Apache-2.0"
] | null | null | null |
bossutils/__init__.py
|
aplbrain/boss-tools
|
1e6db46795fe2e31c601690da399728fd0138e95
|
[
"Apache-2.0"
] | null | null | null |
bossutils/__init__.py
|
aplbrain/boss-tools
|
1e6db46795fe2e31c601690da399728fd0138e95
|
[
"Apache-2.0"
] | 1
|
2018-05-31T16:46:26.000Z
|
2018-05-31T16:46:26.000Z
|
# force the automatic loading of utilities
from . import aws
from . import configuration
from . import utils
from . import vault
from . import logger
from . import migration_manager
| 22.75
| 42
| 0.791209
|
192f6532a148d39ca6b57a871eae0e3b50e773cd
| 39,917
|
py
|
Python
|
pandas/sparse/tests/test_frame.py
|
jackieleng/pandas
|
ccec504e31ce74f8016952ac75add1cc4bec7080
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/sparse/tests/test_frame.py
|
jackieleng/pandas
|
ccec504e31ce74f8016952ac75add1cc4bec7080
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/sparse/tests/test_frame.py
|
jackieleng/pandas
|
ccec504e31ce74f8016952ac75add1cc4bec7080
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 1
|
2021-01-02T02:27:25.000Z
|
2021-01-02T02:27:25.000Z
|
# pylint: disable-msg=E1101,W0612
import operator
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range, Panel
from pandas.tseries.index import DatetimeIndex
import pandas.core.datetools as datetools
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas import compat
import pandas.sparse.frame as spf
from pandas._sparse import BlockIndex, IntIndex
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparseArray
from pandas.tests.frame.test_misc_api import SharedWithSparse
class TestSparseDataFrame(tm.TestCase, SharedWithSparse):
klass = SparseDataFrame
_multiprocess_can_split_ = True
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=np.float64),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.orig = pd.DataFrame(self.data, index=self.dates)
self.iorig = pd.DataFrame(self.data, index=self.dates)
self.frame = SparseDataFrame(self.data, index=self.dates)
self.iframe = SparseDataFrame(self.data, index=self.dates,
default_kind='integer')
values = self.frame.values.copy()
values[np.isnan(values)] = 0
self.zorig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.zframe = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0, index=self.dates)
values = self.frame.values.copy()
values[np.isnan(values)] = 2
self.fill_orig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.fill_frame = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=self.dates)
self.empty = SparseDataFrame()
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = SparseDataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_sp_frame_equal(res, exp)
def test_as_matrix(self):
empty = self.empty.as_matrix()
self.assertEqual(empty.shape, (0, 0))
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.as_matrix()
self.assertEqual(mat.shape, (10, 0))
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.as_matrix()
self.assertEqual(mat.shape, (0, 10))
def test_copy(self):
cp = self.frame.copy()
tm.assertIsInstance(cp, SparseDataFrame)
tm.assert_sp_frame_equal(cp, self.frame)
# as of v0.15.0
# this is now identical (but not is_a )
self.assertTrue(cp.index.identical(self.frame.index))
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
tm.assertIsInstance(series, SparseSeries)
tm.assertIsInstance(self.iframe['A'].sp_index, IntIndex)
# constructed zframe from matrix above
self.assertEqual(self.zframe['A'].fill_value, 0)
tm.assert_numpy_array_equal(pd.SparseArray([1., 2., 3., 4., 5., 6.]),
self.zframe['A'].values)
tm.assert_numpy_array_equal(np.array([0., 0., 0., 0., 1., 2.,
3., 4., 5., 6.]),
self.zframe['A'].to_dense().values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
tm.assertIsInstance(series, SparseSeries)
# construct from nested dict
data = {}
for c, s in compat.iteritems(self.frame):
data[c] = s.to_dict()
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, self.frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = self.frame.index[:5]
cons = SparseDataFrame(
self.frame, index=idx, columns=self.frame.columns,
default_fill_value=self.frame.default_fill_value,
default_kind=self.frame.default_kind, copy=True)
reindexed = self.frame.reindex(idx)
tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
with tm.assertRaises(TypeError):
self.frame.reindex(idx, level=0)
repr(self.frame)
def test_constructor_ndarray(self):
# no index or columns
sp = SparseDataFrame(self.frame.values)
# 1d
sp = SparseDataFrame(self.data['A'], index=self.dates, columns=['A'])
tm.assert_sp_frame_equal(sp, self.frame.reindex(columns=['A']))
# raise on level argument
self.assertRaises(TypeError, self.frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
with tm.assertRaisesRegexp(ValueError, "^Index length"):
SparseDataFrame(self.frame.values, index=self.frame.index[:-1])
with tm.assertRaisesRegexp(ValueError, "^Column length"):
SparseDataFrame(self.frame.values, columns=self.frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
self.assertEqual(len(sp.index), 0)
self.assertEqual(len(sp.columns), 0)
def test_constructor_dataframe(self):
dense = self.frame.to_dense()
sp = SparseDataFrame(dense)
tm.assert_sp_frame_equal(sp, self.frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
self.assertTrue(sdf[0].index is sdf[1].index)
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
tm.assertIsInstance(x, SparseSeries)
df = SparseDataFrame(x)
tm.assertIsInstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.ix[:9998] = np.NaN
# TODO: x_sparse is unused...fix
x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.ix[:9998] = 0
# TODO: y_sparse is unsused...fix
y_sparse = y.to_sparse(fill_value=0) # noqa
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_constructor_preserve_attr(self):
# GH 13866
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
df = pd.SparseDataFrame({'x': arr})
self.assertEqual(df['x'].dtype, np.int64)
self.assertEqual(df['x'].fill_value, 0)
s = pd.SparseSeries(arr, name='x')
self.assertEqual(s.dtype, np.int64)
self.assertEqual(s.fill_value, 0)
df = pd.SparseDataFrame(s)
self.assertEqual(df['x'].dtype, np.int64)
self.assertEqual(df['x'].fill_value, 0)
df = pd.SparseDataFrame({'x': s})
self.assertEqual(df['x'].dtype, np.int64)
self.assertEqual(df['x'].fill_value, 0)
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'float64': 4})
tm.assert_series_equal(result, expected)
def test_shape(self):
# GH 10452
self.assertEqual(self.frame.shape, (10, 4))
self.assertEqual(self.iframe.shape, (10, 4))
self.assertEqual(self.zframe.shape, (10, 4))
self.assertEqual(self.fill_frame.shape, (10, 4))
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self):
res = np.sqrt(self.frame)
dres = np.sqrt(self.frame.to_dense())
tm.assert_frame_equal(res.to_dense(), dres)
def test_pickle(self):
def _test_roundtrip(frame, orig):
result = self.round_trip_pickle(frame)
tm.assert_sp_frame_equal(frame, result)
tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
_test_roundtrip(SparseDataFrame(), DataFrame())
self._check_all(_test_roundtrip)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
tm.assertIsInstance(sdf, SparseDataFrame)
self.assertTrue(np.isnan(sdf.default_fill_value))
tm.assertIsInstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
tm.assertIsInstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
self.assertEqual(sdf.default_fill_value, 0)
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(df.density, 0.7)
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
self.assertEqual(df.density, 0.75)
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self):
self._check_frame_ops(self.frame)
def test_sparse_series_ops_i(self):
self._check_frame_ops(self.iframe)
def test_sparse_series_ops_z(self):
self._check_frame_ops(self.zframe)
def test_sparse_series_ops_fill(self):
self._check_frame_ops(self.fill_frame)
def _check_frame_ops(self, frame):
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
fill = sparse_result.default_fill_value
dense_result = dense_result.to_sparse(fill_value=fill)
tm.assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
tm.assertIsInstance(mixed_result, SparseDataFrame)
tm.assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'], frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]),
SparseSeries(
[], index=[])]
for op in opnames:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), getattr(operator, op))
# 2304, no auto-broadcasting
for i, s in enumerate(series):
f = lambda a, b: getattr(a, op)(b, axis='index')
_compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
# rops are not implemented
# _compare_to_dense(s, frame, s.to_dense(),
# frame.to_dense(), f)
# cross-sectional operations
series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]),
frame.xs(fidx[7]), frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(), s, op)
_compare_to_dense(s, frame, s, frame.to_dense(), op)
# it works!
result = self.frame + self.frame.ix[:, ['A', 'B']] # noqa
def test_op_corners(self):
empty = self.empty + self.empty
self.assertTrue(empty.empty)
foo = self.frame + self.empty
tm.assertIsInstance(foo.index, DatetimeIndex)
tm.assert_frame_equal(foo, self.frame * np.nan)
foo = self.empty + self.frame
tm.assert_frame_equal(foo, self.frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
tm.assert_sp_frame_equal(result, exp)
self.assertRaises(Exception, sdf.__getitem__, ['a', 'd'])
def test_icol(self):
# 10711 deprecated
# 2227
result = self.frame.iloc[:, 0]
self.assertTrue(isinstance(result, SparseSeries))
tm.assert_sp_series_equal(result, self.frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
self.assertEqual(type(iframe['A'].sp_index),
type(iframe.iloc[:, 0].sp_index))
def test_set_value(self):
# ok as the index gets conver to object
frame = self.frame.copy()
res = frame.set_value('foobar', 'B', 1.5)
self.assertEqual(res.index.dtype, 'object')
res = self.frame
res.index = res.index.astype(object)
res = self.frame.set_value('foobar', 'B', 1.5)
self.assertIsNot(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 1.5)
res2 = res.set_value('foobar', 'qux', 1.5)
self.assertIsNot(res2, res)
self.assert_index_equal(res2.columns,
pd.Index(list(self.frame.columns) + ['qux']))
self.assertEqual(res2.get_value('foobar', 'qux'), 1.5)
def test_fancy_index_misc(self):
# axis = 0
sliced = self.frame.ix[-2:, :]
expected = self.frame.reindex(index=self.frame.index[-2:])
tm.assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = self.frame.ix[:, -2:]
expected = self.frame.reindex(columns=self.frame.columns[-2:])
tm.assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self):
# slicing
sl = self.frame[:20]
tm.assert_sp_frame_equal(sl, self.frame.reindex(self.frame.index[:20]))
# boolean indexing
d = self.frame.index[5]
indexer = self.frame.index > d
subindex = self.frame.index[indexer]
subframe = self.frame[indexer]
self.assert_index_equal(subindex, subframe.index)
self.assertRaises(Exception, self.frame.__getitem__, indexer[:-1])
def test_setitem(self):
def _check_frame(frame, orig):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
tm.assertIsInstance(frame['E'], SparseSeries)
tm.assert_sp_series_equal(frame['E'], frame['A'],
check_names=False)
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(frame.index)
result = frame['E'].to_dense()
tm.assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'E')
# insert Series
frame['F'] = frame['A'].to_dense()
tm.assertIsInstance(frame['F'], SparseSeries)
tm.assert_sp_series_equal(frame['F'], frame['A'],
check_names=False)
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(frame.index)
expected.name = 'G'
tm.assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
tm.assertIsInstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
self.assertEqual(len(frame['I'].sp_values), N // 2)
# insert ndarray wrong size
self.assertRaises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
self.assertEqual(len(frame['J'].sp_values), N)
self.assertTrue((frame['J'].sp_values == 5).all())
frame['K'] = frame.default_fill_value
self.assertEqual(len(frame['K'].sp_values), 0)
self._check_all(_check_frame)
def test_setitem_corner(self):
self.frame['a'] = self.frame['B']
tm.assert_sp_series_equal(self.frame['a'], self.frame['B'],
check_names=False)
def test_setitem_array(self):
arr = self.frame['B']
self.frame['E'] = arr
tm.assert_sp_series_equal(self.frame['E'], self.frame['B'],
check_names=False)
self.frame['F'] = arr[:-1]
index = self.frame.index[:-1]
tm.assert_sp_series_equal(self.frame['E'].reindex(index),
self.frame['F'].reindex(index),
check_names=False)
def test_delitem(self):
A = self.frame['A']
C = self.frame['C']
del self.frame['B']
self.assertNotIn('B', self.frame)
tm.assert_sp_series_equal(self.frame['A'], A)
tm.assert_sp_series_equal(self.frame['C'], C)
del self.frame['D']
self.assertNotIn('D', self.frame)
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_set_columns(self):
self.frame.columns = self.frame.columns
self.assertRaises(Exception, setattr, self.frame, 'columns',
self.frame.columns[:-1])
def test_set_index(self):
self.frame.index = self.frame.index
self.assertRaises(Exception, setattr, self.frame, 'index',
self.frame.index[:-1])
def test_append(self):
a = self.frame[:5]
b = self.frame[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended, self.frame, exact_indices=False)
a = self.frame.ix[:5, :3]
b = self.frame.ix[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended.ix[:, :3], self.frame.ix[:, :3],
exact_indices=False)
def test_apply(self):
applied = self.frame.apply(np.sqrt)
tm.assertIsInstance(applied, SparseDataFrame)
tm.assert_almost_equal(applied.values, np.sqrt(self.frame.values))
applied = self.fill_frame.apply(np.sqrt)
self.assertEqual(applied['A'].fill_value, np.sqrt(2))
# agg / broadcast
broadcasted = self.frame.apply(np.sum, broadcast=True)
tm.assertIsInstance(broadcasted, SparseDataFrame)
exp = self.frame.to_dense().apply(np.sum, broadcast=True)
tm.assert_frame_equal(broadcasted.to_dense(), exp)
self.assertIs(self.empty.apply(np.sqrt), self.empty)
from pandas.core import nanops
applied = self.frame.apply(np.sum)
tm.assert_series_equal(applied,
self.frame.to_dense().apply(nanops.nansum))
def test_apply_nonuq(self):
orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=['a', 'a', 'c'])
sparse = orig.to_sparse()
res = sparse.apply(lambda s: s[0], axis=1)
exp = orig.apply(lambda s: s[0], axis=1)
# dtype must be kept
self.assertEqual(res.dtype, np.int64)
# ToDo: apply must return subclassed dtype
self.assertIsInstance(res, pd.Series)
tm.assert_series_equal(res.to_dense(), exp)
# df.T breaks
sparse = orig.T.to_sparse()
res = sparse.apply(lambda s: s[0], axis=0) # noqa
exp = orig.T.apply(lambda s: s[0], axis=0)
# TODO: no non-unique columns supported in sparse yet
# tm.assert_series_equal(res.to_dense(), exp)
def test_applymap(self):
# just test that it works
result = self.frame.applymap(lambda x: x * 2)
tm.assertIsInstance(result, SparseDataFrame)
def test_astype(self):
sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4],
dtype=np.int64),
'B': SparseArray([4, 5, 6, 7],
dtype=np.int64)})
self.assertEqual(sparse['A'].dtype, np.int64)
self.assertEqual(sparse['B'].dtype, np.int64)
res = sparse.astype(np.float64)
exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],
fill_value=0.),
'B': SparseArray([4., 5., 6., 7.],
fill_value=0.)},
default_fill_value=np.nan)
tm.assert_sp_frame_equal(res, exp)
self.assertEqual(res['A'].dtype, np.float64)
self.assertEqual(res['B'].dtype, np.float64)
sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4],
dtype=np.int64),
'B': SparseArray([0, 5, 0, 7],
dtype=np.int64)},
default_fill_value=0)
self.assertEqual(sparse['A'].dtype, np.int64)
self.assertEqual(sparse['B'].dtype, np.int64)
res = sparse.astype(np.float64)
exp = pd.SparseDataFrame({'A': SparseArray([0., 2., 0., 4.],
fill_value=0.),
'B': SparseArray([0., 5., 0., 7.],
fill_value=0.)},
default_fill_value=0.)
tm.assert_sp_frame_equal(res, exp)
self.assertEqual(res['A'].dtype, np.float64)
self.assertEqual(res['B'].dtype, np.float64)
def test_astype_bool(self):
sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4],
fill_value=0,
dtype=np.int64),
'B': SparseArray([0, 5, 0, 7],
fill_value=0,
dtype=np.int64)},
default_fill_value=0)
self.assertEqual(sparse['A'].dtype, np.int64)
self.assertEqual(sparse['B'].dtype, np.int64)
res = sparse.astype(bool)
exp = pd.SparseDataFrame({'A': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False),
'B': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False)},
default_fill_value=False)
tm.assert_sp_frame_equal(res, exp)
self.assertEqual(res['A'].dtype, np.bool)
self.assertEqual(res['B'].dtype, np.bool)
def test_fillna(self):
df = self.zframe.reindex(lrange(5))
dense = self.zorig.reindex(lrange(5))
result = df.fillna(0)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result.fillna(0, inplace=True)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result = df['A']
result.fillna(0, inplace=True)
expected = dense['A'].fillna(0)
# this changes internal SparseArray repr
# tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))
tm.assert_series_equal(result.to_dense(), expected)
def test_fillna_fill_value(self):
df = pd.DataFrame({'A': [1, 0, 0], 'B': [np.nan, np.nan, 4]})
sparse = pd.SparseDataFrame(df)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
sparse = pd.SparseDataFrame(df, default_fill_value=0)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
def test_rename(self):
# just check this works
renamed = self.frame.rename(index=str) # noqa
renamed = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x))) # noqa
def test_corr(self):
res = self.frame.corr()
tm.assert_frame_equal(res, self.frame.to_dense().corr())
def test_describe(self):
self.frame['foo'] = np.nan
self.frame.get_dtype_counts()
str(self.frame)
desc = self.frame.describe() # noqa
def test_join(self):
left = self.frame.ix[:, ['A', 'B']]
right = self.frame.ix[:, ['C', 'D']]
joined = left.join(right)
tm.assert_sp_frame_equal(joined, self.frame, exact_indices=False)
right = self.frame.ix[:, ['B', 'D']]
self.assertRaises(Exception, left.join, right)
with tm.assertRaisesRegexp(ValueError,
'Other Series must have a name'):
self.frame.join(Series(
np.random.randn(len(self.frame)), index=self.frame.index))
def test_reindex(self):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5] # noqa
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
tm.assert_frame_equal(sparse_result.to_dense(), dense_result)
tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(),
dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(index)
tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
tm.assert_almost_equal(sparse_result.default_fill_value,
frame.default_fill_value)
tm.assert_almost_equal(sparse_result['A'].fill_value,
frame['A'].fill_value)
# length zero
length_zero = frame.reindex([])
self.assertEqual(len(length_zero), 0)
self.assertEqual(len(length_zero.columns), len(frame.columns))
self.assertEqual(len(length_zero['A']), 0)
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
self.assertEqual(len(length_n), len(frame))
self.assertEqual(len(length_n.columns), len(frame.columns))
self.assertEqual(len(length_n['A']), len(frame))
# reindex columns
reindexed = frame.reindex(columns=['A', 'B', 'Z'])
self.assertEqual(len(reindexed.columns), 3)
tm.assert_almost_equal(reindexed['Z'].fill_value,
frame.default_fill_value)
self.assertTrue(np.isnan(reindexed['Z'].sp_values).all())
_check_frame(self.frame)
_check_frame(self.iframe)
_check_frame(self.zframe)
_check_frame(self.fill_frame)
# with copy=False
reindexed = self.frame.reindex(self.frame.index, copy=False)
reindexed['F'] = reindexed['A']
self.assertIn('F', self.frame)
reindexed = self.frame.reindex(self.frame.index)
reindexed['G'] = reindexed['A']
self.assertNotIn('G', self.frame)
def test_reindex_fill_value(self):
rng = bdate_range('20110110', periods=20)
result = self.zframe.reindex(rng, fill_value=0)
exp = self.zorig.reindex(rng, fill_value=0)
exp = exp.to_sparse(self.zframe.default_fill_value)
tm.assert_sp_frame_equal(result, exp)
def test_take(self):
result = self.frame.take([1, 0, 2], axis=1)
expected = self.frame.reindex(columns=['B', 'A', 'C'])
tm.assert_sp_frame_equal(result, expected)
def test_to_dense(self):
def _check(frame, orig):
dense_dm = frame.to_dense()
tm.assert_frame_equal(frame, dense_dm)
tm.assert_frame_equal(dense_dm, orig, check_dtype=False)
self._check_all(_check)
def test_stack_sparse_frame(self):
def _check(frame):
dense_frame = frame.to_dense() # noqa
wp = Panel.from_dict({'foo': frame})
from_dense_lp = wp.to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
self.assert_numpy_array_equal(from_dense_lp.values,
from_sparse_lp.values)
_check(self.frame)
_check(self.iframe)
# for now
self.assertRaises(Exception, _check, self.zframe)
self.assertRaises(Exception, _check, self.fill_frame)
def test_transpose(self):
def _check(frame, orig):
transposed = frame.T
untransposed = transposed.T
tm.assert_sp_frame_equal(frame, untransposed)
tm.assert_frame_equal(frame.T.to_dense(), orig.T)
tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T)
tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False)
self._check_all(_check)
def test_shift(self):
def _check(frame, orig):
shifted = frame.shift(0)
exp = orig.shift(0)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(1)
exp = orig.shift(1)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(-2)
exp = orig.shift(-2)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq='B')
exp = orig.shift(2, freq='B')
exp = exp.to_sparse(frame.default_fill_value)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq=datetools.bday)
exp = orig.shift(2, freq=datetools.bday)
exp = exp.to_sparse(frame.default_fill_value)
tm.assert_frame_equal(shifted, exp)
self._check_all(_check)
def test_count(self):
dense_result = self.frame.to_dense().count()
result = self.frame.count()
tm.assert_series_equal(result, dense_result)
result = self.frame.count(axis=None)
tm.assert_series_equal(result, dense_result)
result = self.frame.count(axis=0)
tm.assert_series_equal(result, dense_result)
result = self.frame.count(axis=1)
dense_result = self.frame.to_dense().count(axis=1)
# win32 don't check dtype
tm.assert_series_equal(result, dense_result, check_dtype=False)
def _check_all(self, check_func):
check_func(self.frame, self.orig)
check_func(self.iframe, self.iorig)
check_func(self.zframe, self.zorig)
check_func(self.fill_frame, self.fill_orig)
def test_numpy_transpose(self):
sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=['a'])
result = np.transpose(np.transpose(sdf))
tm.assert_sp_frame_equal(result, sdf)
msg = "the 'axes' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.transpose, sdf, axes=1)
def test_combine_first(self):
df = self.frame
result = df[::2].combine_first(df)
result2 = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, result2)
tm.assert_sp_frame_equal(result, expected)
def test_combine_add(self):
df = self.frame.to_dense()
df2 = df.copy()
df2['C'][:3] = np.nan
df['A'][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)
xp = sparse_df[sparse_df.flag == 1.]
rs = sparse_df[sparse_df.flag.isin([1.])]
tm.assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1**df
r1 = result.take([0], 1)['A']
r2 = result['A']
self.assertEqual(len(r2.sp_values), len(r1.sp_values))
def test_as_blocks(self):
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},
dtype='float64')
df_blocks = df.blocks
self.assertEqual(list(df_blocks.keys()), ['float64'])
tm.assert_frame_equal(df_blocks['float64'], df)
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
self.assertTrue(np.isnan(nan_colname_sparse.columns[0]))
def test_isnull(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.isnull()
exp = pd.SparseDataFrame({'A': [True, True, False, False, True],
'B': [False, True, True, False, True]},
default_fill_value=True)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.isnull()
tm.assertIsInstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [False, False, False, False, True],
'B': [False, True, False, False, True]})
tm.assert_frame_equal(res.to_dense(), exp)
def test_isnotnull(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.isnotnull()
exp = pd.SparseDataFrame({'A': [False, False, True, True, False],
'B': [True, False, False, True, False]},
default_fill_value=False)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.isnotnull()
tm.assertIsInstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [True, True, True, True, False],
'B': [True, False, True, True, False]})
tm.assert_frame_equal(res.to_dense(), exp)
class TestSparseDataFrameArithmetic(tm.TestCase):
def test_numeric_op_scalar(self):
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse())
def test_comparison_op_scalar(self):
# GH 13001
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
# comparison changes internal repr, compare with dense
res = sparse > 1
tm.assertIsInstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df > 1)
res = sparse != 0
tm.assertIsInstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df != 0)
class TestSparseDataFrameAnalytics(tm.TestCase):
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=float),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.frame = SparseDataFrame(self.data, index=self.dates)
def test_cumsum(self):
expected = SparseDataFrame(self.frame.to_dense().cumsum())
result = self.frame.cumsum()
tm.assert_sp_frame_equal(result, expected)
result = self.frame.cumsum(axis=None)
tm.assert_sp_frame_equal(result, expected)
result = self.frame.cumsum(axis=0)
tm.assert_sp_frame_equal(result, expected)
def test_numpy_cumsum(self):
result = np.cumsum(self.frame)
expected = SparseDataFrame(self.frame.to_dense().cumsum())
tm.assert_sp_frame_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
self.frame, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
self.frame, out=result)
def test_numpy_func_call(self):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var',
'mean', 'prod', 'cumprod',
'std', 'min', 'max']
for func in funcs:
getattr(np, func)(self.frame)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 37.236007
| 83
| 0.557457
|
6f6b592656d225e6b9c873f5efd32263ea968fa8
| 27,896
|
py
|
Python
|
neutron/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py
|
venkataanil/juno_neutron
|
2e62e150c264ccae2dd75fb78caae453eaa77e9f
|
[
"Apache-2.0"
] | 1
|
2021-02-19T05:54:04.000Z
|
2021-02-19T05:54:04.000Z
|
neutron/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py
|
venkataanil/juno_neutron
|
2e62e150c264ccae2dd75fb78caae453eaa77e9f
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/cisco/cfg_agent/service_helpers/routing_svc_helper.py
|
venkataanil/juno_neutron
|
2e62e150c264ccae2dd75fb78caae453eaa77e9f
|
[
"Apache-2.0"
] | 2
|
2016-11-29T11:22:58.000Z
|
2016-11-29T11:54:41.000Z
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import eventlet
import netaddr
from neutron.common import constants as l3_constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils as common_utils
from neutron import context as n_context
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
from neutron.plugins.cisco.cfg_agent.device_drivers import driver_mgr
from neutron.plugins.cisco.cfg_agent import device_status
from neutron.plugins.cisco.common import cisco_constants as c_constants
LOG = logging.getLogger(__name__)
N_ROUTER_PREFIX = 'nrouter-'
class RouterInfo(object):
"""Wrapper class around the (neutron) router dictionary.
Information about the neutron router is exchanged as a python dictionary
between plugin and config agent. RouterInfo is a wrapper around that dict,
with attributes for common parameters. These attributes keep the state
of the current router configuration, and are used for detecting router
state changes when an updated router dict is received.
This is a modified version of the RouterInfo class defined in the
(reference) l3-agent implementation, for use with cisco config agent.
"""
def __init__(self, router_id, router):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = []
self._router = None
self.router = router
self.routes = []
self.ha_info = router.get('ha_info')
@property
def router(self):
return self._router
@property
def id(self):
return self.router_id
@property
def snat_enabled(self):
return self._snat_enabled
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
def router_name(self):
return N_ROUTER_PREFIX + self.router_id
class CiscoRoutingPluginApi(n_rpc.RpcProxy):
"""RoutingServiceHelper(Agent) side of the routing RPC API."""
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic, host):
super(CiscoRoutingPluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def get_routers(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids : hosting device ids, only routers assigned to these
hosting devices will be returned.
"""
return self.call(context,
self.make_msg('cfg_sync_routers',
host=self.host,
router_ids=router_ids,
hosting_device_ids=hd_ids),
topic=self.topic)
class RoutingServiceHelper():
def __init__(self, host, conf, cfg_agent):
self.conf = conf
self.cfg_agent = cfg_agent
self.context = n_context.get_admin_context_without_session()
self.plugin_rpc = CiscoRoutingPluginApi(topics.L3PLUGIN, host)
self._dev_status = device_status.DeviceStatus()
self._drivermgr = driver_mgr.DeviceDriverManager()
self.router_info = {}
self.updated_routers = set()
self.removed_routers = set()
self.sync_devices = set()
self.fullsync = True
self.topic = '%s.%s' % (c_constants.CFG_AGENT_L3_ROUTING, host)
self._setup_rpc()
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [self]
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
### Notifications from Plugin ####
def router_deleted(self, context, routers):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', routers)
self.removed_routers.update(routers)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
self.updated_routers.update(routers)
def router_removed_from_agent(self, context, payload):
LOG.debug('Got router removed from agent :%r', payload)
self.removed_routers.add(payload['router_id'])
def router_added_to_agent(self, context, payload):
LOG.debug('Got router added to agent :%r', payload)
self.routers_updated(context, payload)
# Routing service helper public methods
def process_service(self, device_ids=None, removed_devices_info=None):
try:
LOG.debug("Routing service processing started")
resources = {}
routers = []
removed_routers = []
all_routers_flag = False
if self.fullsync:
LOG.debug("FullSync flag is on. Starting fullsync")
# Setting all_routers_flag and clear the global full_sync flag
all_routers_flag = True
self.fullsync = False
self.updated_routers.clear()
self.removed_routers.clear()
self.sync_devices.clear()
routers = self._fetch_router_info(all_routers=True)
else:
if self.updated_routers:
router_ids = list(self.updated_routers)
LOG.debug("Updated routers:%s", router_ids)
self.updated_routers.clear()
routers = self._fetch_router_info(router_ids=router_ids)
if device_ids:
LOG.debug("Adding new devices:%s", device_ids)
self.sync_devices = set(device_ids) | self.sync_devices
if self.sync_devices:
sync_devices_list = list(self.sync_devices)
LOG.debug("Fetching routers on:%s", sync_devices_list)
routers.extend(self._fetch_router_info(
device_ids=sync_devices_list))
self.sync_devices.clear()
if removed_devices_info:
if removed_devices_info.get('deconfigure'):
ids = self._get_router_ids_from_removed_devices_info(
removed_devices_info)
self.removed_routers = self.removed_routers | set(ids)
if self.removed_routers:
removed_routers_ids = list(self.removed_routers)
LOG.debug("Removed routers:%s", removed_routers_ids)
for r in removed_routers_ids:
if r in self.router_info:
removed_routers.append(self.router_info[r].router)
# Sort on hosting device
if routers:
resources['routers'] = routers
if removed_routers:
resources['removed_routers'] = removed_routers
hosting_devices = self._sort_resources_per_hosting_device(
resources)
# Dispatch process_services() for each hosting device
pool = eventlet.GreenPool()
for device_id, resources in hosting_devices.items():
routers = resources.get('routers')
removed_routers = resources.get('removed_routers')
pool.spawn_n(self._process_routers, routers, removed_routers,
device_id, all_routers=all_routers_flag)
pool.waitall()
if removed_devices_info:
for hd_id in removed_devices_info['hosting_data']:
self._drivermgr.remove_driver_for_hosting_device(hd_id)
LOG.debug("Routing service processing successfully completed")
except Exception:
LOG.exception(_("Failed processing routers"))
self.fullsync = True
def collect_state(self, configurations):
"""Collect state from this helper.
A set of attributes which summarizes the state of the routers and
configurations managed by this config agent.
:param configurations: dict of configuration values
:return dict of updated configuration values
"""
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
num_hd_routers = collections.defaultdict(int)
for ri in router_infos:
ex_gw_port = ri.router.get('gw_port')
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(
l3_constants.INTERFACE_KEY, []))
num_floating_ips += len(ri.router.get(
l3_constants.FLOATINGIP_KEY, []))
hd = ri.router['hosting_device']
if hd:
num_hd_routers[hd['id']] += 1
routers_per_hd = dict((hd_id, {'routers': num})
for hd_id, num in num_hd_routers.items())
non_responding = self._dev_status.get_backlogged_hosting_devices()
configurations['total routers'] = num_routers
configurations['total ex_gw_ports'] = num_ex_gw_ports
configurations['total interfaces'] = num_interfaces
configurations['total floating_ips'] = num_floating_ips
configurations['hosting_devices'] = routers_per_hd
configurations['non_responding_hosting_devices'] = non_responding
return configurations
# Routing service helper internal methods
def _fetch_router_info(self, router_ids=None, device_ids=None,
all_routers=False):
"""Fetch router dict from the routing plugin.
:param router_ids: List of router_ids of routers to fetch
:param device_ids: List of device_ids whose routers to fetch
:param all_routers: If True fetch all the routers for this agent.
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....]
"""
try:
if all_routers:
return self.plugin_rpc.get_routers(self.context)
if router_ids:
return self.plugin_rpc.get_routers(self.context,
router_ids=router_ids)
if device_ids:
return self.plugin_rpc.get_routers(self.context,
hd_ids=device_ids)
except n_rpc.RPCException:
LOG.exception(_("RPC Error in fetching routers from plugin"))
self.fullsync = True
@staticmethod
def _get_router_ids_from_removed_devices_info(removed_devices_info):
"""Extract router_ids from the removed devices info dict.
:param removed_devices_info: Dict of removed devices and their
associated resources.
Format:
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]},
...
},
'deconfigure': True/False
}
:return removed_router_ids: List of removed router ids
"""
removed_router_ids = []
for hd_id, resources in removed_devices_info['hosting_data'].items():
removed_router_ids += resources.get('routers', [])
return removed_router_ids
@staticmethod
def _sort_resources_per_hosting_device(resources):
"""This function will sort the resources on hosting device.
The sorting on hosting device is done by looking up the
`hosting_device` attribute of the resource, and its `id`.
:param resources: a dict with key of resource name
:return dict sorted on the hosting device of input resource. Format:
hosting_devices = {
'hd_id1' : {'routers':[routers],
'removed_routers':[routers], .... }
'hd_id2' : {'routers':[routers], .. }
.......
}
"""
hosting_devices = {}
for key in resources.keys():
for r in resources.get(key) or []:
hd_id = r['hosting_device']['id']
hosting_devices.setdefault(hd_id, {})
hosting_devices[hd_id].setdefault(key, []).append(r)
return hosting_devices
def _process_routers(self, routers, removed_routers,
device_id=None, all_routers=False):
"""Process the set of routers.
Iterating on the set of routers received and comparing it with the
set of routers already in the routing service helper, new routers
which are added are identified. Before processing check the
reachability (via ping) of hosting device where the router is hosted.
If device is not reachable it is backlogged.
For routers which are only updated, call `_process_router()` on them.
When all_routers is set to True (because of a full sync),
this will result in the detection and deletion of routers which
have been removed.
Whether the router can only be assigned to a particular hosting device
is decided and enforced by the plugin. No checks are done here.
:param routers: The set of routers to be processed
:param removed_routers: the set of routers which where removed
:param device_id: Id of the hosting device
:param all_routers: Flag for specifying a partial list of routers
:return: None
"""
try:
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
try:
if not r['admin_state_up']:
continue
cur_router_ids.add(r['id'])
hd = r['hosting_device']
if not self._dev_status.is_hosting_device_reachable(hd):
LOG.info(_("Router: %(id)s is on an unreachable "
"hosting device. "), {'id': r['id']})
continue
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
self._process_router(ri)
except KeyError as e:
LOG.exception(_("Key Error, missing key: %s"), e)
self.updated_routers.add(r['id'])
continue
except cfg_exceptions.DriverException as e:
LOG.exception(_("Driver Exception on router:%(id)s. "
"Error is %(e)s"), {'id': r['id'], 'e': e})
self.updated_routers.update(r['id'])
continue
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
self._router_removed(router_id)
if removed_routers:
for router in removed_routers:
self._router_removed(router['id'])
except Exception:
LOG.exception(_("Exception in processing routers on device:%s"),
device_id)
self.sync_devices.add(device_id)
def _process_router(self, ri):
"""Process a router, apply latest configuration and update router_info.
Get the router dict from RouterInfo and proceed to detect changes
from the last known state. When new ports or deleted ports are
detected, `internal_network_added()` or `internal_networks_removed()`
are called accordingly. Similarly changes in ex_gw_port causes
`external_gateway_added()` or `external_gateway_removed()` calls.
Next, floating_ips and routes are processed. Also, latest state is
stored in ri.internal_ports and ri.ex_gw_port for future comparisons.
:param ri : RouterInfo object of the router being processed.
:return:None
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
if the configuration operation fails.
"""
try:
ex_gw_port = ri.router.get('gw_port')
ri.ha_info = ri.router.get('ha_info', None)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports
if
p['id'] in (current_port_ids - existing_port_ids)]
old_ports = [p for p in ri.internal_ports
if p['id'] not in current_port_ids]
for p in new_ports:
self._set_subnet_info(p)
self._internal_network_added(ri, p, ex_gw_port)
ri.internal_ports.append(p)
for p in old_ports:
self._internal_network_removed(ri, p, ri.ex_gw_port)
ri.internal_ports.remove(p)
if ex_gw_port and not ri.ex_gw_port:
self._set_subnet_info(ex_gw_port)
self._external_gateway_added(ri, ex_gw_port)
elif not ex_gw_port and ri.ex_gw_port:
self._external_gateway_removed(ri, ri.ex_gw_port)
if ex_gw_port:
self._process_router_floating_ips(ri, ex_gw_port)
ri.ex_gw_port = ex_gw_port
self._routes_updated(ri)
except cfg_exceptions.DriverException as e:
with excutils.save_and_reraise_exception():
self.updated_routers.update(ri.router_id)
LOG.error(e)
def _process_router_floating_ips(self, ri, ex_gw_port):
"""Process a router's floating ips.
Compare current floatingips (in ri.floating_ips) with the router's
updated floating ips (in ri.router.floating_ips) and detect
flaoting_ips which were added or removed. Notify driver of
the change via `floating_ip_added()` or `floating_ip_removed()`.
:param ri: RouterInfo object of the router being processed.
:param ex_gw_port: Port dict of the external gateway port.
:return: None
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
if the configuration operation fails.
"""
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
existing_floating_ip_ids = set(
[fip['id'] for fip in ri.floating_ips])
cur_floating_ip_ids = set([fip['id'] for fip in floating_ips])
id_to_fip_map = {}
for fip in floating_ips:
if fip['port_id']:
# store to see if floatingip was remapped
id_to_fip_map[fip['id']] = fip
if fip['id'] not in existing_floating_ip_ids:
ri.floating_ips.append(fip)
self._floating_ip_added(ri, ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
floating_ip_ids_to_remove = (existing_floating_ip_ids -
cur_floating_ip_ids)
for fip in ri.floating_ips:
if fip['id'] in floating_ip_ids_to_remove:
ri.floating_ips.remove(fip)
self._floating_ip_removed(ri, ri.ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
else:
# handle remapping of a floating IP
new_fip = id_to_fip_map[fip['id']]
new_fixed_ip = new_fip['fixed_ip_address']
existing_fixed_ip = fip['fixed_ip_address']
if (new_fixed_ip and existing_fixed_ip and
new_fixed_ip != existing_fixed_ip):
floating_ip = fip['floating_ip_address']
self._floating_ip_removed(ri, ri.ex_gw_port,
floating_ip,
existing_fixed_ip)
self._floating_ip_added(ri, ri.ex_gw_port,
floating_ip, new_fixed_ip)
ri.floating_ips.remove(fip)
ri.floating_ips.append(new_fip)
def _router_added(self, router_id, router):
"""Operations when a router is added.
Create a new RouterInfo object for this router and add it to the
service helpers router_info dictionary. Then `router_added()` is
called on the device driver.
:param router_id: id of the router
:param router: router dict
:return: None
"""
ri = RouterInfo(router_id, router)
driver = self._drivermgr.set_driver(router)
driver.router_added(ri)
self.router_info[router_id] = ri
def _router_removed(self, router_id, deconfigure=True):
"""Operations when a router is removed.
Get the RouterInfo object corresponding to the router in the service
helpers's router_info dict. If deconfigure is set to True,
remove this router's configuration from the hosting device.
:param router_id: id of the router
:param deconfigure: if True, the router's configuration is deleted from
the hosting device.
:return: None
"""
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_("Info for router %s was not found. "
"Skipping router removal"), router_id)
return
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
try:
if deconfigure:
self._process_router(ri)
driver = self._drivermgr.get_driver(router_id)
driver.router_removed(ri, deconfigure)
self._drivermgr.remove_driver(router_id)
del self.router_info[router_id]
self.removed_routers.discard(router_id)
except cfg_exceptions.DriverException:
LOG.warn(_("Router remove for router_id: %s was incomplete. "
"Adding the router to removed_routers list"), router_id)
self.removed_routers.add(router_id)
# remove this router from updated_routers if it is there. It might
# end up there too if exception was thrown earlier inside
# `_process_router()`
self.updated_routers.discard(router_id)
def _internal_network_added(self, ri, port, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
driver.internal_network_added(ri, port)
if ri.snat_enabled and ex_gw_port:
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
def _internal_network_removed(self, ri, port, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
driver.internal_network_removed(ri, port)
if ri.snat_enabled and ex_gw_port:
driver.disable_internal_network_NAT(ri, port, ex_gw_port)
def _external_gateway_added(self, ri, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
driver.external_gateway_added(ri, ex_gw_port)
if ri.snat_enabled and ri.internal_ports:
for port in ri.internal_ports:
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
def _external_gateway_removed(self, ri, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
if ri.snat_enabled and ri.internal_ports:
for port in ri.internal_ports:
driver.disable_internal_network_NAT(ri, port, ex_gw_port)
driver.external_gateway_removed(ri, ex_gw_port)
def _floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
driver = self._drivermgr.get_driver(ri.id)
driver.floating_ip_added(ri, ex_gw_port, floating_ip, fixed_ip)
def _floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
driver = self._drivermgr.get_driver(ri.id)
driver.floating_ip_removed(ri, ex_gw_port, floating_ip, fixed_ip)
def _routes_updated(self, ri):
"""Update the state of routes in the router.
Compares the current routes with the (configured) existing routes
and detect what was removed or added. Then configure the
logical router in the hosting device accordingly.
:param ri: RouterInfo corresponding to the router.
:return: None
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
if the configuration operation fails.
"""
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
driver = self._drivermgr.get_driver(ri.id)
driver.routes_updated(ri, 'replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
driver = self._drivermgr.get_driver(ri.id)
driver.routes_updated(ri, 'delete', route)
ri.routes = new_routes
@staticmethod
def _set_subnet_info(port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"), port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
| 43.724138
| 79
| 0.603456
|
2c5c6e5f9e3235b0137cb34a02a201e3bbb85aab
| 15,566
|
py
|
Python
|
torchaudio/models/wav2vec2/model.py
|
z-a-f/audio
|
d64648b6ea4e5ec4e000d4eb26daa3f2499ef3ca
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/models/wav2vec2/model.py
|
z-a-f/audio
|
d64648b6ea4e5ec4e000d4eb26daa3f2499ef3ca
|
[
"BSD-2-Clause"
] | null | null | null |
torchaudio/models/wav2vec2/model.py
|
z-a-f/audio
|
d64648b6ea4e5ec4e000d4eb26daa3f2499ef3ca
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import Optional, Tuple, List
import torch
from torch import Tensor
from torch.nn import Module
from . import components
class Wav2Vec2Model(Module):
"""Encoder model used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`].
Note:
To build the model, please use one of the factory functions.
Args:
feature_extractor (torch.nn.Module):
Feature extractor that extracts feature vectors from raw audio Tensor.
encoder (torch.nn.Module):
Encoder that converts the audio features into the sequence of probability
distribution (in negative log-likelihood) over labels.
aux (torch.nn.Module or None, optional):
Auxiliary module. If provided, the output from encoder is passed to this module.
"""
def __init__(
self,
feature_extractor: Module,
encoder: Module,
aux: Optional[Module] = None,
):
super().__init__()
self.feature_extractor = feature_extractor
self.encoder = encoder
self.aux = aux
@torch.jit.export
def extract_features(
self,
waveforms: Tensor,
lengths: Optional[Tensor] = None,
num_layers: Optional[int] = None,
) -> Tuple[List[Tensor], Optional[Tensor]]:
"""Extract feature vectors from raw waveforms
This returns the list of outputs from the intermediate layers of
transformer block in encoder.
Args:
waveforms (Tensor): Audio tensor of shape ``(batch, frames)``.
lengths (Tensor or None, optional):
Indicates the valid length of each audio sample in the batch.
Shape: ``(batch, )``.
num_layers (int or None, optional):
If given, limit the number of intermediate layers to go through.
Providing `1` will stop the computation after going through one
intermediate layers. If not given, the outputs from all the
intermediate layers are returned.
Returns:
List of Tensors and an optional Tensor:
List of Tensors
Features from requested layers.
Each Tensor is of shape: ``(batch, frames, feature dimention)``
Tensor or None
If ``lengths`` argument was provided, a Tensor of shape ``(batch, )``
is retuned. It indicates the valid length of each feature in the batch.
"""
x, lengths = self.feature_extractor(waveforms, lengths)
x = self.encoder.extract_features(x, lengths, num_layers)
return x, lengths
def forward(
self,
waveforms: Tensor,
lengths: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Compute the sequence of probability distribution over labels.
Args:
waveforms (Tensor): Audio tensor of shape ``(batch, frames)``.
lengths (Tensor or None, optional):
Indicates the valid length of each audio sample in the batch.
Shape: ``(batch, )``.
Returns:
Tensor and an optional Tensor:
Tensor
The sequences of probability distribution (in logit) over labels.
Shape: ``(batch, frames, num labels)``.
Tensor or None
If ``lengths`` argument was provided, a Tensor of shape ``(batch, )``
is retuned. It indicates the valid length of each feature in the batch.
"""
x, lengths = self.feature_extractor(waveforms, lengths)
x = self.encoder(x, lengths)
if self.aux is not None:
x = self.aux(x)
return x, lengths
def _get_model(
extractor_mode: str,
extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]],
extractor_conv_bias: bool,
encoder_embed_dim: int,
encoder_projection_dropout: float,
encoder_pos_conv_kernel: int,
encoder_pos_conv_groups: int,
encoder_num_layers: int,
encoder_num_heads: int,
encoder_attention_dropout: float,
encoder_ff_interm_features: int,
encoder_ff_interm_dropout: float,
encoder_dropout: float,
encoder_layer_norm_first: bool,
encoder_layer_drop: float,
aux_num_out: Optional[int],
) -> Wav2Vec2Model:
if extractor_conv_layer_config is None:
extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2
feature_extractor = components._get_feature_extractor(
extractor_mode, extractor_conv_layer_config, extractor_conv_bias)
encoder = components._get_encoder(
in_features=extractor_conv_layer_config[-1][0],
embed_dim=encoder_embed_dim,
dropout_input=encoder_projection_dropout,
pos_conv_kernel=encoder_pos_conv_kernel,
pos_conv_groups=encoder_pos_conv_groups,
num_layers=encoder_num_layers,
num_heads=encoder_num_heads,
attention_dropout=encoder_attention_dropout,
ff_interm_features=encoder_ff_interm_features,
ff_interm_dropout=encoder_ff_interm_dropout,
dropout=encoder_dropout,
layer_norm_first=encoder_layer_norm_first,
layer_drop=encoder_layer_drop,
)
aux = None
if aux_num_out is not None:
aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out)
return Wav2Vec2Model(feature_extractor, encoder, aux)
def wav2vec2_base() -> Wav2Vec2Model:
"""Build wav2vec2 model with "base" configuration
This is one of the model architecture used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for pretraining.
Returns:
Wav2Vec2Model:
"""
return _get_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=768,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=12,
encoder_num_heads=12,
encoder_attention_dropout=0.1,
encoder_ff_interm_features=3072,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.1,
encoder_layer_norm_first=False,
encoder_layer_drop=0.1,
aux_num_out=None,
)
def wav2vec2_ft_base(num_out: int) -> Wav2Vec2Model:
"""Build "base" wav2vec2 with an extra linear module
This is one of the model architectures used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for fine-tuning for ASR task.
Args:
num_out: int
The number of output labels.
Returns:
Wav2Vec2Model:
"""
return _get_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=768,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=12,
encoder_num_heads=12,
encoder_attention_dropout=0.1,
encoder_ff_interm_features=3072,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.1,
encoder_layer_norm_first=False,
encoder_layer_drop=0.1,
aux_num_out=num_out,
)
def wav2vec2_large() -> Wav2Vec2Model:
"""Build wav2vec2 model with "large" configuration
This is one of the model architecture used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for pretraining.
Returns:
Wav2Vec2Model:
"""
return _get_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=0.1,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.1,
encoder_layer_norm_first=False,
encoder_layer_drop=0.1,
aux_num_out=None,
)
def wav2vec2_ft_large(num_out: int) -> Wav2Vec2Model:
"""Build "large" wav2vec2.0 model with an extra linear module
This is one of the model architectures used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for fine-tuning for ASR task.
Args:
num_out: int
The number of output labels.
Returns:
Wav2Vec2Model:
"""
return _get_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=0.1,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.1,
encoder_layer_norm_first=False,
encoder_layer_drop=0.1,
aux_num_out=num_out,
)
def wav2vec2_large_lv60k() -> Wav2Vec2Model:
"""Build wav2vec2.0 model with "Large LV-60k" configuration
This is one of the model architectures used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for pretraining.
Returns:
Wav2Vec2Model: The resulting model.
"""
return _get_model(
extractor_mode="layer_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=True,
encoder_embed_dim=1024,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=0.0,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.0,
encoder_layer_norm_first=True,
encoder_layer_drop=0.1,
aux_num_out=None,
)
def wav2vec2_ft_large_lv60k(num_out: int) -> Wav2Vec2Model:
"""Build "Large LV-60k" wav2vec2.0 with an extra linear module
This is one of the model architectures used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for fine-tuning for ASR task.
Args:
num_out: int
The number of output labels.
Returns:
Wav2Vec2Model: The resulting model.
"""
return _get_model(
extractor_mode="layer_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=True,
encoder_embed_dim=1024,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=0.0,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.0,
encoder_layer_norm_first=True,
encoder_layer_drop=0.1,
aux_num_out=num_out,
)
def hubert_base() -> Wav2Vec2Model:
"""Build HuBERT model with "Base" configuration
This is one of the model architectures used in *HuBERT*
[:footcite:`hsu2021hubert`] for pretraining.
Returns:
HuBERT: The resulting model.
"""
return _get_model(
extractor_mode='group_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=768,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=12,
encoder_num_heads=12,
encoder_attention_dropout=0.1,
encoder_ff_interm_features=3072,
encoder_ff_interm_dropout=0.0,
encoder_dropout=0.1,
encoder_layer_norm_first=False,
encoder_layer_drop=0.05,
aux_num_out=None,
)
def hubert_large() -> Wav2Vec2Model:
"""Build HuBERT model with "Large" configuration
This is one of the model architectures used in *HuBERT*
[:footcite:`hsu2021hubert`] for pretraining.
Returns:
HuBERT: The resulting model.
"""
return _get_model(
extractor_mode='layer_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=0.0,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=0.0,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=0.0,
encoder_dropout=0.0,
encoder_layer_norm_first=True,
encoder_layer_drop=0.0,
aux_num_out=None,
)
def hubert_ft_large(num_out) -> Wav2Vec2Model:
"""Build "Large" HuBERT model with an extra linear module
This is one of the model architecture used in *HuBERT*
[:footcite:`hsu2021hubert`] for fine-tuning for ASR task.
Args:
num_out: int
The number of output labels.
Returns:
Wav2Vec2Model:
"""
return _get_model(
extractor_mode='layer_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=0.0,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=0.0,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.0,
encoder_layer_norm_first=True,
encoder_layer_drop=0.1,
aux_num_out=num_out,
)
def hubert_xlarge() -> Wav2Vec2Model:
"""Build HuBERT model with "extra large" configuration
This is one of the model architectures used in *HuBERT*
[:footcite:`hsu2021hubert`] for pretraining.
Returns:
HuBERT: The resulting model.
"""
return _get_model(
extractor_mode='layer_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=0.0,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=0.0,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=0.0,
encoder_dropout=0.0,
encoder_layer_norm_first=True,
encoder_layer_drop=0.0,
aux_num_out=None,
)
def hubert_ft_xlarge(num_out) -> Wav2Vec2Model:
"""Build "extra large" HuBERT model with an extra linear module
This is one of the model architecture used in *HuBERT*
[:footcite:`hsu2021hubert`] for fine-tuning for ASR task.
Args:
num_out: int
The number of output labels.
Returns:
Wav2Vec2Model: The resulting model.
"""
return _get_model(
extractor_mode='layer_norm',
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1280,
encoder_projection_dropout=0.0,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=48,
encoder_num_heads=16,
encoder_attention_dropout=0.0,
encoder_ff_interm_features=5120,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.0,
encoder_layer_norm_first=True,
encoder_layer_drop=0.1,
aux_num_out=num_out,
)
| 32.094845
| 92
| 0.655467
|
52b307a48adcd0ac5c618de57fd942d21049948c
| 1,100
|
py
|
Python
|
flight_plans/migrations/0008_checklistgroup_checklistitem.py
|
geoffreynyaga/ANGA-UTM
|
8371a51ad27c85d2479bb34d8c4e02ea28465941
|
[
"Apache-2.0"
] | 7
|
2020-01-18T16:53:41.000Z
|
2021-12-21T07:02:43.000Z
|
flight_plans/migrations/0008_checklistgroup_checklistitem.py
|
geoffreynyaga/ANGA-UTM
|
8371a51ad27c85d2479bb34d8c4e02ea28465941
|
[
"Apache-2.0"
] | 28
|
2020-01-06T18:36:54.000Z
|
2022-02-10T10:03:55.000Z
|
flight_plans/migrations/0008_checklistgroup_checklistitem.py
|
geoffreynyaga/ANGA-UTM
|
8371a51ad27c85d2479bb34d8c4e02ea28465941
|
[
"Apache-2.0"
] | 3
|
2020-01-18T16:53:54.000Z
|
2020-10-26T11:21:41.000Z
|
# Generated by Django 2.2.16 on 2021-02-13 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flight_plans', '0007_remove_preflight_area_size'),
]
operations = [
migrations.CreateModel(
name='ChecklistItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_title', models.CharField(max_length=40)),
('description', models.CharField(max_length=140)),
('picture', models.ImageField(blank=True, null=True, upload_to='images/checklists/profile_pic')),
],
),
migrations.CreateModel(
name='ChecklistGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('checklists', models.ManyToManyField(to='flight_plans.ChecklistItem')),
],
),
]
| 35.483871
| 114
| 0.591818
|
133ca0b4aeecab5464c0c98e9162d3cff51c0fe5
| 3,248
|
py
|
Python
|
08/vmFunctionCall.py
|
Bozar/Nand2Tetris
|
5f91805823b7572263bc31b0b4537aed14d6b4e7
|
[
"MIT"
] | 3
|
2019-12-04T12:17:32.000Z
|
2021-08-04T07:08:24.000Z
|
08/vmFunctionCall.py
|
Bozar/Nand2Tetris
|
5f91805823b7572263bc31b0b4537aed14d6b4e7
|
[
"MIT"
] | null | null | null |
08/vmFunctionCall.py
|
Bozar/Nand2Tetris
|
5f91805823b7572263bc31b0b4537aed14d6b4e7
|
[
"MIT"
] | 1
|
2021-07-31T16:01:22.000Z
|
2021-07-31T16:01:22.000Z
|
import asmPushPop
import vmPushPop
# Use the following trick to set breakpoints for debugging.
# http://nand2tetris-questions-and-answers-forum.32033.n3.nabble.com/Tip-debugging-trick-that-helped-me-td3662340.html
def writeCall(functionName, numArgs, index):
returnAddress = 'RETURN_ADDRESS$' + functionName + '$' + str(index)
pushRegD = asmPushPop.pushDtoStack()
# push return-address
part1 = [
'@' + returnAddress,
'D=A',
# pushRegD,
]
# push LCL
part2 = [
'@LCL',
'D=M',
# pushRegD,
]
# push ARG
part3 = [
'@ARG',
'D=M',
# pushRegD,
]
# push THIS
part4 = [
'@THIS',
'D=M',
# pushRegD,
]
# push THAT
part5 = [
'@THAT',
'D=M',
# pushRegD,
]
part6 = [
# ARG = SP-n-5
'@SP',
'D=M',
'@' + numArgs,
'D=D-A',
'@5',
'D=D-A',
'@ARG',
'M=D',
# LCL = SP
'@SP',
'D=M',
'@LCL',
'M=D',
# Set a breakpoint for debugging.
# '@1000',
# 'M=1',
# 'M=0',
# goto f
'@' + functionName,
'0;JMP',
# (return-address)
'(' + returnAddress + ')',
]
return\
part1 + pushRegD +\
part2 + pushRegD +\
part3 + pushRegD +\
part4 + pushRegD +\
part5 + pushRegD +\
part6
# Avoid using public registers: R13, R14 and R15. Other scripts might use them
# as well so that their data could be silently changed, which results in
# hard-to-track bugs.
def writeReturn():
part1 = [
# FRAME = LCL
'@LCL',
'D=M',
'@TEMP_SAVE_FRAME',
'M=D',
# RET = *(FRAME-5)
'@5',
'D=A',
'@TEMP_SAVE_FRAME',
'D=M-D',
'A=D',
'D=M',
'@TEMP_SAVE_RET_ADDRESS',
'M=D',
]
# *ARG = pop()
part2 = vmPushPop.pop('argument', '0', '')
part3 = [
# SP = ARG+1
'@ARG',
'D=M+1',
'@SP',
'M=D',
# THAT = *(FRAME-1)
'@TEMP_SAVE_FRAME',
'A=M-1',
'D=M',
'@THAT',
'M=D',
# THIS = *(FRAME-2)
'@2',
'D=A',
'@TEMP_SAVE_FRAME',
'A=M-D',
'D=M',
'@THIS',
'M=D',
# ARG = *(FRAME-3)
'@3',
'D=A',
'@TEMP_SAVE_FRAME',
'A=M-D',
'D=M',
'@ARG',
'M=D',
# LCL = *(FRAME-4)
'@4',
'D=A',
'@TEMP_SAVE_FRAME',
'A=M-D',
'D=M',
'@LCL',
'M=D',
# Set a breakpoint for debugging.
# '@1001',
# 'M=1',
# 'M=0',
# goto RET
'@TEMP_SAVE_RET_ADDRESS',
'A=M',
'0;JMP',
]
return part1 + part2 + part3
# http://nand2tetris-questions-and-answers-forum.32033.n3.nabble.com/Function-command-implementation-td4031726.html
def writeFunction(functionName, numLocals):
label = ['(' + functionName + ')']
repeat = []
for i in range(int(numLocals)):
repeat += vmPushPop.push('constant', '0', '')
return label + repeat
| 20.687898
| 118
| 0.430727
|
a62c23df3887a9b01f608e2ddcbd8dbfc8b9d5d5
| 1,012
|
py
|
Python
|
src/data/make_raw_price_paid_data.py
|
iaindillingham/property-prices
|
0109c3b130afc7da367306c5430dcfff3e4e0c8e
|
[
"MIT"
] | null | null | null |
src/data/make_raw_price_paid_data.py
|
iaindillingham/property-prices
|
0109c3b130afc7da367306c5430dcfff3e4e0c8e
|
[
"MIT"
] | null | null | null |
src/data/make_raw_price_paid_data.py
|
iaindillingham/property-prices
|
0109c3b130afc7da367306c5430dcfff3e4e0c8e
|
[
"MIT"
] | null | null | null |
"""Reads external and writes raw Land Registry Price Paid Data CSVs.
The external CSV does not have a header row.
The raw CSV has a header row.
"""
import csv
from src import EXTERNAL_DIR, RAW_DIR
EXTERNAL_PP_DIR = EXTERNAL_DIR / "price_paid_data"
RAW_PP_DIR = RAW_DIR / "price_paid_data"
headers = [
"id",
"price",
"date_of_transfer",
"postcode",
"property_type",
"age",
"duration",
"paon",
"saon",
"street",
"locality",
"town_city",
"district",
"county",
"ppd_category_type",
"record_status",
]
if __name__ == "__main__":
RAW_PP_DIR.mkdir()
with open(
EXTERNAL_PP_DIR / "pp-2019.csv",
"r",
encoding="iso-8859-1",
) as file_in, open(
RAW_PP_DIR / "pp-2019.csv",
"w",
encoding="utf-8",
newline="",
) as file_out:
reader = csv.reader(file_in)
writer = csv.writer(file_out, dialect="unix")
writer.writerow(headers)
writer.writerows(reader)
| 20.653061
| 68
| 0.597826
|
e8dafb6fc27d01500f58b77db93c02128d3eeae6
| 355
|
py
|
Python
|
routes/web.py
|
girardinsamuel/masonite-inertia
|
78d98e6e022296fdc78fb8f7267cc67d6d40462f
|
[
"MIT"
] | 15
|
2020-09-27T18:51:35.000Z
|
2022-02-13T11:12:42.000Z
|
routes/web.py
|
girardinsamuel/masonite-inertia
|
78d98e6e022296fdc78fb8f7267cc67d6d40462f
|
[
"MIT"
] | 90
|
2020-09-27T18:28:27.000Z
|
2022-03-21T12:25:26.000Z
|
routes/web.py
|
girardinsamuel/masonite-inertia
|
78d98e6e022296fdc78fb8f7267cc67d6d40462f
|
[
"MIT"
] | 2
|
2020-12-31T02:31:29.000Z
|
2021-04-22T17:13:20.000Z
|
""" Web Routes """
from masonite.routes import Get, Post
ROUTES = [
Get("/", "InertiaController@inertia").name("home"),
Post("/error", "InertiaController@inertia_with_error").name("home.error"),
Get("/external", "InertiaController@external").name("home.external"),
Get("/helloworld", "InertiaController@helloworld").name("helloworld"),
]
| 35.5
| 78
| 0.687324
|
17108d134c39d3bd09fa7cd6712f971df3934e8a
| 383
|
py
|
Python
|
iati/home/views.py
|
allthatilk/IATI-Standard-Website
|
21c726b11290575b1f353105f3632a98cc2fea74
|
[
"MIT"
] | null | null | null |
iati/home/views.py
|
allthatilk/IATI-Standard-Website
|
21c726b11290575b1f353105f3632a98cc2fea74
|
[
"MIT"
] | null | null | null |
iati/home/views.py
|
allthatilk/IATI-Standard-Website
|
21c726b11290575b1f353105f3632a98cc2fea74
|
[
"MIT"
] | null | null | null |
"""View definitions for the home app."""
from django.shortcuts import redirect
def reference_redirect(request):
"""Functional view that accepts any request starting with a reference namespace."""
base_url = "http://reference.iatistandard.org"
slug = request.get_full_path()
redirection_url = base_url + slug
return redirect(to=redirection_url, permanent=True)
| 31.916667
| 87
| 0.746736
|
8cfc36c57398c3da149f5e5dc6ee3ed0d8a7cf7b
| 2,394
|
py
|
Python
|
csmpe/core_plugins/csm_check_config_filesystem/plugin.py
|
kstaniek/cmspe
|
16d9c1510a17b31c8de37ba05b3c689e4952b155
|
[
"BSD-2-Clause"
] | null | null | null |
csmpe/core_plugins/csm_check_config_filesystem/plugin.py
|
kstaniek/cmspe
|
16d9c1510a17b31c8de37ba05b3c689e4952b155
|
[
"BSD-2-Clause"
] | null | null | null |
csmpe/core_plugins/csm_check_config_filesystem/plugin.py
|
kstaniek/cmspe
|
16d9c1510a17b31c8de37ba05b3c689e4952b155
|
[
"BSD-2-Clause"
] | null | null | null |
# =============================================================================
# plugin
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# # Author: Klaudiusz Staniek
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from csmpe.plugins import CSMPlugin
class Plugin(CSMPlugin):
"""This plugin checks the configuration filesystem"""
name = "Config Filesystem Check Plugin"
platforms = {'ASR9K'}
phases = {'Pre-Upgrade', "Pre-Activate", "Pre-Deactivate"}
def run(self):
ok = 0
message = []
output = self.ctx.send("cfs check")
lines = output.split("\n", 50)
for line in lines:
if line != "":
message.append(line)
if 'OK' in line:
ok += 1
for line in message:
if ok < 3:
self.ctx.warning(line)
else:
self.ctx.info(line)
if ok < 3:
self.ctx.error("The configuration filesystem has inconsistencies")
else:
self.ctx.info("Configuration filesystem is consistent")
| 40.576271
| 79
| 0.64411
|
2e619e2727f5f51390a3edd441e5039f850dfc4e
| 1,643
|
py
|
Python
|
mysite/urls.py
|
WildfootTW/Django_Demo
|
e696d80b711e636a9dacbb0c1e04fb46dba39e65
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
WildfootTW/Django_Demo
|
e696d80b711e636a9dacbb0c1e04fb46dba39e65
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
WildfootTW/Django_Demo
|
e696d80b711e636a9dacbb0c1e04fb46dba39e65
|
[
"MIT"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from mysite.views import here, math, meta, welcome, get_c, set_c, session_S, login, index, logout, register
#from views import here
#mysite 是指子mysite
from restaurants.views import menu, list_restaurants, foods, comment
#import restaurants.views
#restaurants.views.menu
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^here/$', here),
#\{1,2}代表匹配 1 or 2 個數字
#小括號中的字->參數 (2~n+1)個參數 第1個參數為HttpRequest
url(r'^(\d{1,2})/plus/(\d{1,2})/$', math),
url(r'^menu/$', menu),
url(r'^meta/$', meta),
url(r'^welcome/$', welcome),
url(r'^restaurants_list/$', list_restaurants),
url(r'^foods/$', foods),
url(r'^comment/(\d{1,5})/$', comment),
url(r'^get_cookies/$', get_c),
url(r'^set_cookies/$', set_c),
url(r'^session/$', session_S),
url(r'^accounts/login/$', login),
url(r'^accounts/logout/$', logout),
url(r'^accounts/register/$', register),
url(r'^index/$', index),
]
| 36.511111
| 107
| 0.662203
|
73d5ce57cf9a876b7fc73886571c50ea03c18061
| 4,946
|
py
|
Python
|
sdk/python/pulumi_azure_native/logic/latest/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/logic/latest/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/logic/latest/__init__.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_integration_account import *
from .get_integration_account_agreement import *
from .get_integration_account_assembly import *
from .get_integration_account_batch_configuration import *
from .get_integration_account_certificate import *
from .get_integration_account_map import *
from .get_integration_account_partner import *
from .get_integration_account_schema import *
from .get_integration_account_session import *
from .get_integration_service_environment import *
from .get_integration_service_environment_managed_api import *
from .get_rosetta_net_process_configuration import *
from .get_workflow import *
from .integration_account import *
from .integration_account_agreement import *
from .integration_account_assembly import *
from .integration_account_batch_configuration import *
from .integration_account_certificate import *
from .integration_account_map import *
from .integration_account_partner import *
from .integration_account_schema import *
from .integration_account_session import *
from .integration_service_environment import *
from .integration_service_environment_managed_api import *
from .list_integration_account_agreement_content_callback_url import *
from .list_integration_account_assembly_content_callback_url import *
from .list_integration_account_callback_url import *
from .list_integration_account_key_vault_keys import *
from .list_integration_account_map_content_callback_url import *
from .list_integration_account_partner_content_callback_url import *
from .list_integration_account_schema_content_callback_url import *
from .list_workflow_callback_url import *
from .list_workflow_run_action_expression_traces import *
from .list_workflow_run_action_repetition_expression_traces import *
from .list_workflow_trigger_callback_url import *
from .list_workflow_version_trigger_callback_url import *
from .rosetta_net_process_configuration import *
from .workflow import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:logic/latest:IntegrationAccount":
return IntegrationAccount(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountAgreement":
return IntegrationAccountAgreement(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountAssembly":
return IntegrationAccountAssembly(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountBatchConfiguration":
return IntegrationAccountBatchConfiguration(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountCertificate":
return IntegrationAccountCertificate(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountMap":
return IntegrationAccountMap(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountPartner":
return IntegrationAccountPartner(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountSchema":
return IntegrationAccountSchema(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationAccountSession":
return IntegrationAccountSession(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationServiceEnvironment":
return IntegrationServiceEnvironment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:IntegrationServiceEnvironmentManagedApi":
return IntegrationServiceEnvironmentManagedApi(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:RosettaNetProcessConfiguration":
return RosettaNetProcessConfiguration(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:logic/latest:Workflow":
return Workflow(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "logic/latest", _module_instance)
_register_module()
| 52.617021
| 101
| 0.765669
|
3149b20ed037d99bd0202ee62fcbf6153dbcc859
| 16,889
|
py
|
Python
|
tableau_rest_api/methods/schedule.py
|
Kamran-ov/tableau_tools
|
a373943fc8ea233ce00a2c2deaaf3f8d05716326
|
[
"MIT"
] | null | null | null |
tableau_rest_api/methods/schedule.py
|
Kamran-ov/tableau_tools
|
a373943fc8ea233ce00a2c2deaaf3f8d05716326
|
[
"MIT"
] | null | null | null |
tableau_rest_api/methods/schedule.py
|
Kamran-ov/tableau_tools
|
a373943fc8ea233ce00a2c2deaaf3f8d05716326
|
[
"MIT"
] | null | null | null |
from .rest_api_base import *
class ScheduleMethods():
def __init__(self, rest_api_base: TableauRestApiBase):
self.rest_api_base = rest_api_base
def __getattr__(self, attr):
return getattr(self.rest_api_base, attr)
def query_schedules(self) -> ET.Element:
self.start_log_block()
schedules = self.query_resource("schedules", server_level=True)
self.end_log_block()
return schedules
def query_schedules_json(self, page_number: Optional[int] = None)-> Dict:
self.start_log_block()
schedules = self.query_resource_json("schedules", server_level=True, page_number=page_number)
self.end_log_block()
return schedules
def query_extract_schedules(self) -> ET.Element:
self.start_log_block()
schedules = self.query_schedules()
extract_schedules = schedules.findall('.//t:schedule[@type="Extract"]', self.ns_map)
self.end_log_block()
return extract_schedules
def query_subscription_schedules(self) -> ET.Element:
self.start_log_block()
schedules = self.query_schedules()
subscription_schedules = schedules.findall('.//t:schedule[@type="Subscription"]', self.ns_map)
self.end_log_block()
return subscription_schedules
def query_schedule(self, schedule_name_or_luid: str) -> ET.Element:
self.start_log_block()
schedule = self.query_single_element_from_endpoint('schedule', schedule_name_or_luid, server_level=True)
self.end_log_block()
return schedule
def create_schedule(self, name: Optional[str] = None, extract_or_subscription: Optional[str] = None,
frequency: Optional[str] = None, parallel_or_serial: Optional[str] = None,
priority: Optional[int] = None, start_time: Optional[str] = None,
end_time: Optional[str] = None, interval_value_s: Optional[Union[List[str], str]] = None,
interval_hours_minutes: Optional[str] = None,
direct_xml_request: Optional[ET.Element] = None) -> str:
self.start_log_block()
if direct_xml_request is not None:
tsr = direct_xml_request
else:
if extract_or_subscription not in ['Extract', 'Subscription']:
raise InvalidOptionException("extract_or_subscription can only be 'Extract' or 'Subscription'")
if priority < 1 or priority > 100:
raise InvalidOptionException("priority must be an integer between 1 and 100")
if parallel_or_serial not in ['Parallel', 'Serial']:
raise InvalidOptionException("parallel_or_serial must be 'Parallel' or 'Serial'")
if frequency not in ['Hourly', 'Daily', 'Weekly', 'Monthly']:
raise InvalidOptionException("frequency must be 'Hourly', 'Daily', 'Weekly' or 'Monthly'")
tsr = ET.Element('tsRequest')
s = ET.Element('schedule')
s.set('name', name)
s.set('priority', str(priority))
s.set('type', extract_or_subscription)
s.set('frequency', frequency)
s.set('executionOrder', parallel_or_serial)
fd = ET.Element('frequencyDetails')
fd.set('start', start_time)
if end_time is not None:
fd.set('end', end_time)
intervals = ET.Element('intervals')
# Daily does not need an interval value
if interval_value_s is not None:
ivs = self.to_list(interval_value_s)
for i in ivs:
interval = ET.Element('interval')
if frequency == 'Hourly':
if interval_hours_minutes is None:
raise InvalidOptionException(
'Hourly must set interval_hours_minutes to "hours" or "minutes"')
interval.set(interval_hours_minutes, i)
if frequency == 'Weekly':
interval.set('weekDay', i)
if frequency == 'Monthly':
interval.set('monthDay', i)
intervals.append(interval)
fd.append(intervals)
s.append(fd)
tsr.append(s)
# Schedule requests happen at the server rather than site level, like a login
url = self.build_api_url("schedules", server_level=True)
try:
new_schedule = self.send_add_request(url, tsr)
new_schedule_luid = new_schedule.findall('.//t:schedule', self.ns_map)[0].get("id")
self.end_log_block()
return new_schedule_luid
except RecoverableHTTPException as e:
self.end_log_block()
if e.tableau_error_code == '409021':
raise AlreadyExistsException('Schedule With this Name Already exists on the server', None)
else:
raise e
def update_schedule(self, schedule_name_or_luid: str, new_name: Optional[str] = None,
frequency: Optional[str] = None, parallel_or_serial: Optional[str] = None,
priority: Optional[int] = None, start_time: Optional[str] = None,
end_time: Optional[str] = None, interval_value_s: Optional[Union[List[str], str]] = None,
interval_hours_minutes: Optional[str] = None,
direct_xml_request: Optional[ET.Element] = None) -> ET.Element:
self.start_log_block()
if self.is_luid(schedule_name_or_luid):
luid = schedule_name_or_luid
else:
luid = self.query_schedule_luid(schedule_name_or_luid)
if direct_xml_request is not None:
tsr = direct_xml_request
else:
tsr = ET.Element('tsRequest')
s = ET.Element('schedule')
if new_name is not None:
s.set('name', new_name)
if priority is not None:
if priority < 1 or priority > 100:
raise InvalidOptionException("priority must be an integer between 1 and 100")
s.set('priority', str(priority))
if frequency is not None:
s.set('frequency', frequency)
if parallel_or_serial is not None:
if parallel_or_serial not in ['Parallel', 'Serial']:
raise InvalidOptionException("parallel_or_serial must be 'Parallel' or 'Serial'")
s.set('executionOrder', parallel_or_serial)
if frequency is not None:
if frequency not in ['Hourly', 'Daily', 'Weekly', 'Monthly']:
raise InvalidOptionException("frequency must be 'Hourly', 'Daily', 'Weekly' or 'Monthly'")
fd = ET.Element('frequencyDetails')
fd.set('start', start_time)
if end_time is not None:
fd.set('end', end_time)
intervals = ET.Element('intervals')
# Daily does not need an interval value
if interval_value_s is not None:
ivs = self.to_list(interval_value_s)
for i in ivs:
interval = ET.Element('interval')
if frequency == 'Hourly':
if interval_hours_minutes is None:
raise InvalidOptionException(
'Hourly must set interval_hours_minutes to "hours" or "minutes"')
interval.set(interval_hours_minutes, i)
if frequency == 'Weekly':
interval.set('weekDay', i)
if frequency == 'Monthly':
interval.set('monthDay', i)
intervals.append(interval)
fd.append(intervals)
s.append(fd)
tsr.append(s)
# Schedule requests happen at the server rather than site level, like a login
url = self.build_api_url("schedules/{}".format(luid), server_level=True)
try:
response = self.send_update_request(url, tsr)
self.end_log_block()
return response
except RecoverableHTTPException as e:
self.end_log_block()
if e.tableau_error_code == '409021':
raise AlreadyExistsException('Schedule With this Name Already exists on the server', None)
else:
raise e
def disable_schedule(self, schedule_name_or_luid: str):
self.start_log_block()
luid = self.query_schedule_luid(schedule_name_or_luid)
tsr = ET.Element('tsRequest')
s = ET.Element('schedule')
s.set('state', 'Suspended')
tsr.append(s)
url = self.build_api_url("schedules/{}".format(luid), server_level=True)
self.send_update_request(url, tsr)
self.end_log_block()
def enable_schedule(self, schedule_name_or_luid: str):
self.start_log_block()
luid = self.query_schedule_luid(schedule_name_or_luid)
tsr = ET.Element('tsRequest')
s = ET.Element('schedule')
s.set('state', 'Active')
tsr.append(s)
url = self.build_api_url("schedules/{}".format(luid), server_level=True)
self.send_update_request(url, tsr)
self.end_log_block()
def create_daily_extract_schedule(self, name: str, start_time: str, priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
# Check the time format at some point
luid = self.create_schedule(name, 'Extract', 'Daily', parallel_or_serial, priority, start_time)
self.end_log_block()
return luid
def create_daily_subscription_schedule(self, name: str, start_time: str, priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
# Check the time format at some point
luid = self.create_schedule(name, 'Subscription', 'Daily', parallel_or_serial, priority, start_time)
self.end_log_block()
return luid
def create_weekly_extract_schedule(self, name: str, weekday_s: Union[List[str], str], start_time: str,
priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
luid = self.create_schedule(name, 'Extract', 'Weekly', parallel_or_serial, priority, start_time=start_time,
interval_value_s=weekday_s)
self.end_log_block()
return luid
def create_weekly_subscription_schedule(self, name: str, weekday_s: Union[List[str], str], start_time: str,
priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
luid = self.create_schedule(name, 'Subscription', 'Weekly', parallel_or_serial, priority,
start_time=start_time, interval_value_s=weekday_s)
self.end_log_block()
return luid
def create_monthly_extract_schedule(self, name: str, day_of_month: str, start_time: str,
priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
luid = self.create_schedule(name, 'Extract', 'Monthly', parallel_or_serial, priority, start_time=start_time,
interval_value_s=day_of_month)
self.end_log_block()
return luid
def create_monthly_subscription_schedule(self, name: str, day_of_month: str, start_time: str,
priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
luid = self.create_schedule(name, 'Subscription', 'Monthly', parallel_or_serial, priority,
start_time=start_time, interval_value_s=day_of_month)
self.end_log_block()
return luid
def create_hourly_extract_schedule(self, name: str, interval_hours_or_minutes: str, interval: str, start_time: str,
end_time: str, priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
luid = self.create_schedule(name, 'Extract', 'Hourly', parallel_or_serial, priority, start_time, end_time,
interval, interval_hours_or_minutes)
self.end_log_block()
return luid
def create_hourly_subscription_schedule(self, name: str, interval_hours_or_minutes: str, interval: str, start_time: str,
end_time: str, priority: Optional[int] = 1,
parallel_or_serial: Optional[str] = 'Parallel') -> str:
self.start_log_block()
luid = self.create_schedule(name, 'Subscription', 'Hourly', parallel_or_serial, priority, start_time, end_time,
interval, interval_hours_or_minutes)
self.end_log_block()
return luid
def delete_schedule(self, schedule_name_or_luid: str):
self.start_log_block()
schedule_luid = self.query_schedule_luid(schedule_name_or_luid)
url = self.build_api_url("schedules/{}".format(schedule_luid), server_level=True)
self.send_delete_request(url)
self.end_log_block()
class ScheduleMethods27(ScheduleMethods):
def __init__(self, rest_api_base: TableauRestApiBase27):
self.rest_api_base = rest_api_base
class ScheduleMethods28(ScheduleMethods27):
def __init__(self, rest_api_base: TableauRestApiBase28):
self.rest_api_base = rest_api_base
def add_workbook_to_schedule(self, wb_name_or_luid: str, schedule_name_or_luid: str,
proj_name_or_luid: Optional[str] = None) -> ET.Element:
self.start_log_block()
wb_luid = self.query_workbook_luid(wb_name_or_luid, proj_name_or_luid)
schedule_luid = self.query_schedule_luid(schedule_name_or_luid)
tsr = ET.Element('tsRequest')
t = ET.Element('task')
er = ET.Element('extractRefresh')
w = ET.Element('workbook')
w.set('id', wb_luid)
er.append(w)
t.append(er)
tsr.append(t)
url = self.build_api_url("schedules/{}/workbooks".format(schedule_luid))
response = self.send_update_request(url, tsr)
self.end_log_block()
return response
def add_datasource_to_schedule(self, ds_name_or_luid: str, schedule_name_or_luid: str,
proj_name_or_luid: Optional[str] = None) -> ET.Element:
self.start_log_block()
ds_luid = self.query_workbook_luid(ds_name_or_luid, proj_name_or_luid)
schedule_luid = self.query_schedule_luid(schedule_name_or_luid)
tsr = ET.Element('tsRequest')
t = ET.Element('task')
er = ET.Element('extractRefresh')
d = ET.Element('datasource')
d.set('id', ds_luid)
er.append(d)
t.append(er)
tsr.append(t)
url = self.build_api_url("schedules/{}/datasources".format(schedule_luid))
response = self.send_update_request(url, tsr)
self.end_log_block()
return response
class ScheduleMethods30(ScheduleMethods28):
def __init__(self, rest_api_base: TableauRestApiBase30):
self.rest_api_base = rest_api_base
class ScheduleMethods31(ScheduleMethods30):
def __init__(self, rest_api_base: TableauRestApiBase31):
self.rest_api_base = rest_api_base
class ScheduleMethods32(ScheduleMethods31):
def __init__(self, rest_api_base: TableauRestApiBase32):
self.rest_api_base = rest_api_base
class ScheduleMethods33(ScheduleMethods32):
def __init__(self, rest_api_base: TableauRestApiBase33):
self.rest_api_base = rest_api_base
class ScheduleMethods34(ScheduleMethods33):
def __init__(self, rest_api_base: TableauRestApiBase34):
self.rest_api_base = rest_api_base
class ScheduleMethods35(ScheduleMethods34):
def __init__(self, rest_api_base: TableauRestApiBase35):
self.rest_api_base = rest_api_base
class ScheduleMethods36(ScheduleMethods35):
def __init__(self, rest_api_base: TableauRestApiBase36):
self.rest_api_base = rest_api_base
| 45.769648
| 124
| 0.603943
|
6966dd32de6113249ce73140eaa2d3be4452d116
| 1,309
|
py
|
Python
|
flaskapp/resources/heretraffic.py
|
cs-510-explorations-car-dependence/backend
|
25fe32486890dd66a4c03f4a89fec25f412b4220
|
[
"MIT"
] | null | null | null |
flaskapp/resources/heretraffic.py
|
cs-510-explorations-car-dependence/backend
|
25fe32486890dd66a4c03f4a89fec25f412b4220
|
[
"MIT"
] | 1
|
2021-08-07T18:35:07.000Z
|
2021-08-07T20:23:59.000Z
|
flaskapp/resources/heretraffic.py
|
cs-510-explorations-car-dependence/backend
|
25fe32486890dd66a4c03f4a89fec25f412b4220
|
[
"MIT"
] | null | null | null |
import requests
from flaskapp.resources.utils import raise_if_invalid_coordinates
class HERETraffic:
""" A thin wrapper over the HERE Traffic Flow API. """
def __init__(self, apikey):
self.apikey = apikey
self.url_base = f"https://traffic.ls.hereapi.com/traffic/6.2/flow.json?apiKey={apikey}&responseattributes=sh,fc"
def get_flow_data(self, upperleftbb, lowerrightbb):
"""
Gets raw Road Shape and Road Class Filter response.
An example output is found here: https://developer.here.com/documentation/traffic/dev_guide/topics_v6.1/example-flow-sh-frc.html
Both arguments are a (float latitude, float longitude) pair representing either the upper left or the bottom
right coordinate of the bounding box to be searched.
Returns a (int status_code, dict json_response) pair. If status_code is not 200, then json_response will be empty.
"""
raise_if_invalid_coordinates(upperleftbb)
raise_if_invalid_coordinates(lowerrightbb)
request_url = f"{self.url_base}&bbox={upperleftbb[0]},{upperleftbb[1]};{lowerrightbb[0]},{lowerrightbb[1]}"
response = requests.get(request_url)
if response.status_code == 200:
return 200, response.json()
return response.status_code, {}
| 52.36
| 136
| 0.705882
|
350b64879b3ef5131b14027e1c528087f8f49d3a
| 350
|
py
|
Python
|
setup.py
|
reddymeghraj/lodge
|
10f94c7197bf47581f101ac0db6194accf80038e
|
[
"MIT"
] | null | null | null |
setup.py
|
reddymeghraj/lodge
|
10f94c7197bf47581f101ac0db6194accf80038e
|
[
"MIT"
] | null | null | null |
setup.py
|
reddymeghraj/lodge
|
10f94c7197bf47581f101ac0db6194accf80038e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
version = '0.0.1'
setup(
name='lodge',
version=version,
description='App for lodge',
author='Wayzon',
author_email='info@wayzon.in',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=("frappe",),
)
| 19.444444
| 43
| 0.662857
|
b1cf8074377a8316691bd796bd3a1689deaec295
| 8,186
|
py
|
Python
|
configs/tabnet/icdar2019_tracka_modern:faster_rcnn_r50_fpn_2x.py
|
vansin/tabnet
|
2403c8134c23a704940522ace92a75b0fc6f5d99
|
[
"Apache-2.0"
] | 2
|
2021-10-18T02:52:18.000Z
|
2022-01-21T08:54:18.000Z
|
configs/tabnet/icdar2019_tracka_modern:faster_rcnn_r50_fpn_2x.py
|
vansin/tabnet
|
2403c8134c23a704940522ace92a75b0fc6f5d99
|
[
"Apache-2.0"
] | null | null | null |
configs/tabnet/icdar2019_tracka_modern:faster_rcnn_r50_fpn_2x.py
|
vansin/tabnet
|
2403c8134c23a704940522ace92a75b0fc6f5d99
|
[
"Apache-2.0"
] | null | null | null |
# _base_ = [
# '../_base_/models/faster_rcnn_r50_fpn.py',
# '../_base_/datasets/icdar2019_tracka_modern_detection.py',
# '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
# ]
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
dataset_type = 'TableDataset'
data_root = 'data/table/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='TableDataset',
ann_file='data/icdar2019/modern_train.json',
img_prefix='data/icdar2019/training/TRACKA/ground_truth',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]),
val=dict(
type='TableDataset',
ann_file='data/icdar2019/modern_test.json',
img_prefix='data/icdar2019/test/TRACKA/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]),
test=dict(
type='TableDataset',
ann_file='data/icdar2019/modern_test.json',
img_prefix='data/icdar2019/test/TRACKA/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
work_dir = './work_dirs/icdar2019_tracka_modern:faster_rcnn_r50_fpn_2x'
gpu_ids = range(0, 1)
| 34.834043
| 79
| 0.519057
|
00e1ce0f99cc4dc19139ed3c63812961ac60d7fe
| 13,776
|
py
|
Python
|
tests/core/test_suite.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 26
|
2017-06-11T18:21:17.000Z
|
2021-02-21T20:36:30.000Z
|
tests/core/test_suite.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 143
|
2017-06-29T11:18:35.000Z
|
2021-06-10T17:23:46.000Z
|
tests/core/test_suite.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 11
|
2017-06-12T09:16:14.000Z
|
2021-07-11T23:20:59.000Z
|
"""Test TestSuite behavior and common variables."""
# pylint: disable=too-many-locals,protected-access
# pylint: disable=too-many-public-methods,invalid-name
# pylint: disable=no-member,protected-access,no-init,too-few-public-methods
from __future__ import absolute_import
import os
from future.builtins import object
from rotest.core.suite import TestSuite
from rotest.common.config import ROTEST_WORK_DIR
from rotest.management.models.ut_models import DemoResourceData
from tests.core.utils import (MockSuite1, MockSuite2, MockTestSuite,
MockNestedTestSuite, SuccessCase, FailureCase,
PartialCase, MockFlow, MockFlow1, MockFlow2,
SuccessBlock, FailureBlock, BasicRotestUnitTest)
class TestTestSuite(BasicRotestUnitTest):
"""Test TestSuite behavior on successful & failed components."""
fixtures = ['case_ut.json']
def test_empty_suite(self):
"""Test empty component tuple raises AttributeError."""
MockTestSuite.components = ()
self.assertRaises(AttributeError, MockTestSuite)
def test_happy_flow(self):
"""Create test suite with success components & validate run success.
We test the suite result was success and that all the components run.
"""
MockSuite1.components = (SuccessCase, SuccessCase)
MockSuite2.components = (SuccessCase,)
MockTestSuite.components = (MockSuite1, MockSuite2)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertTrue(self.result.wasSuccessful(),
'Suite failed when it should have succeeded')
self.assertEqual(self.result.testsRun, 3,
"Suite didn't run the correct number of tests")
# === Validate data object ===
self.assertTrue(test_suite.data.success,
'Suite data result should have been True')
self.assertEqual(len(list(test_suite)),
len(MockTestSuite.components),
'Data members number differs form number of tests')
def test_giant_suite(self):
"""See that a test suite with a large amount of tests doesn't crash."""
tests_amount = 1500
MockTestSuite.components = [SuccessCase] * tests_amount
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertTrue(self.result.wasSuccessful(),
'Suite failed when it should have succeeded')
self.assertEqual(self.result.testsRun, tests_amount,
"Suite didn't run the correct number of tests")
# === Validate data object ===
self.assertTrue(test_suite.data.success,
'Suite data result should have been True')
self.assertEqual(len(list(test_suite)),
len(MockTestSuite.components),
'Data members number differs form number of tests')
def test_skip_init(self):
"""Create a suite that should skip initialization and validate it."""
MockSuite1.components = (SuccessCase, SuccessCase)
MockSuite2.components = (SuccessCase,)
MockTestSuite.components = (MockSuite1, MockSuite2)
test_suite = MockTestSuite(skip_init=True)
self.run_test(test_suite)
self.assertTrue(self.result.wasSuccessful(),
'Suite failed when it should have succeeded')
self.assertEqual(self.result.testsRun, 3,
"Suite didn't run the correct number of tests")
# === Validate data object ===
self.assertTrue(test_suite.data.success,
'Suite data result should have been True')
for resource_request in SuccessCase.resources:
test_resource = DemoResourceData.objects.get(
ip_address=resource_request.kwargs['ip_address'])
self.assertFalse(test_resource.initialization_flag,
"Resource %r was initialized" % test_resource)
def test_suite_failure(self):
"""Create test suite with failed component & validate its behavior.
We test the suite result was failure and all the component run.
"""
MockSuite1.components = (SuccessCase, FailureCase)
MockSuite2.components = (SuccessCase,)
MockTestSuite.components = (MockSuite1, MockSuite2)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertFalse(self.result.wasSuccessful(),
'Suite succeeded when it should have failed')
self.assertEqual(self.result.testsRun, 3,
"Suite didn't run the correct number of tests")
self.assertEqual(len(self.result.failures), 1,
"Suite didn't fail the correct number of tests")
# === Validate data object ===
self.assertFalse(test_suite.data.success,
'Suite data result should have been False')
self.assertEqual(len(list(test_suite)),
len(MockTestSuite.components),
'Number of components differs from the actual'
'number of tests')
def test_case_method_failure(self):
"""Create test suite with failed method & validate the suite behavior.
We test the suite result was failure and all the component run.
"""
MockSuite1.components = (SuccessCase,)
MockSuite2.components = (PartialCase,)
MockTestSuite.components = (MockSuite1, MockSuite2)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertFalse(self.result.wasSuccessful(),
'Suite succeeded when it should have failed')
self.assertEqual(self.result.testsRun, 3,
"Suite didn't run the correct number of tests")
self.assertEqual(len(self.result.failures), 1,
"Suite didn't fail the correct number of tests")
def test_nested_suite_happy_flow(self):
"""Create nested test suite and validate the test run success.
We test the suite result was success and that all the components run.
"""
MockSuite1.components = (SuccessCase, SuccessCase)
MockSuite2.components = (SuccessCase,)
MockNestedTestSuite.components = (MockSuite1, MockSuite2)
MockTestSuite.components = (MockSuite1, MockNestedTestSuite)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertTrue(self.result.wasSuccessful(),
'Suite failed when it should have succeeded')
self.assertEqual(self.result.testsRun, 5,
"Suite didn't run the correct number of tests")
# === Validate data object ===
self.assertTrue(test_suite.data.success,
'Suite data result should have been True')
self.assertEqual(
len(list(test_suite)),
len(MockTestSuite.components),
'Data members number differs form number of tests')
def test_nested_suite_internal_fail(self):
"""Test nested test suite behavior on internal suite failure.
We test the suite result was failure and that all the components run.
"""
MockSuite1.components = (SuccessCase, FailureCase)
MockSuite2.components = (SuccessCase,)
MockNestedTestSuite.components = (MockSuite1, MockSuite2)
MockTestSuite.components = (MockSuite2,
MockNestedTestSuite,
MockSuite2)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertFalse(self.result.wasSuccessful(),
'Suite succeeded when it should have failed')
self.assertEqual(self.result.testsRun, 5,
"Suite didn't run the correct number of tests")
self.assertEqual(len(self.result.failures), 1,
"Suite didn't fail the correct number of tests")
def test_nested_suite_external_fail(self):
"""Test nested test suite behavior on external test failure.
We test the suite result was failure and that all the components run.
"""
MockSuite1.components = (SuccessCase,)
MockSuite2.components = (FailureCase,)
MockNestedTestSuite.components = (MockSuite1, MockSuite2)
MockTestSuite.components = (MockSuite1,
MockSuite2,
MockNestedTestSuite,
MockSuite2)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertFalse(self.result.wasSuccessful(),
'Suite succeeded when it should have failed')
self.assertEqual(self.result.testsRun, 5,
"Suite didn't run the correct number of tests")
self.assertEqual(len(self.result.failures), 3,
"Suite didn't fail the correct number of tests")
def test_invalid_type(self):
"""Test invalid component type raises TypeError."""
class BadTestType(object):
pass
MockSuite1.components = (SuccessCase,)
MockTestSuite.components = (MockSuite1, BadTestType, MockSuite1)
self.assertRaises(TypeError, MockTestSuite)
def validate_work_dirs(self, test):
"""Validate the test work directories recursively.
Validates that all tests working directories were created and that
each sub test work directory is contained by its containing test work
directory.
Args:
test (TestCase / TestSuite): test whose test data work dir
is being validated.
"""
self.assertTrue(os.path.exists(test.work_dir),
"Test %r work directory %r doesn't exists")
sub_test_iterator = ()
if isinstance(test, TestSuite):
sub_test_iterator = iter(test)
for sub_test in sub_test_iterator:
base_work_dir = os.path.dirname(sub_test.work_dir)
self.assertEqual(
test.work_dir,
base_work_dir,
"Test %r work directory %r is not contained in its "
"parent's %r work directory %r" %
(sub_test.data, sub_test.work_dir, test, test.work_dir))
def test_suite_with_flow(self):
"""Create nested test suite and with test-flow validate success.
We test the suite result was success and that all the components run.
"""
MockFlow.blocks = (SuccessBlock, SuccessBlock)
MockTestSuite.components = (SuccessCase, MockFlow, SuccessCase)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertTrue(self.result.wasSuccessful(),
'Suite failed when it should have succeeded')
self.assertEqual(self.result.testsRun, 3,
"Suite didn't run the correct number of tests")
# === Validate data object ===
self.assertTrue(test_suite.data.success,
'Suite data result should have been True')
def test_complex_nested_suite_with_flows(self):
"""Create nested test suite with test-flows and validate run success.
We test the suite result was success and that all the components run.
"""
MockFlow1.blocks = (SuccessBlock, SuccessBlock)
MockFlow2.blocks = (FailureBlock, SuccessBlock)
MockSuite1.components = (MockFlow2, SuccessCase)
MockSuite2.components = (MockFlow1, MockFlow2)
MockTestSuite.components = (MockSuite1, MockSuite2)
test_suite = MockTestSuite()
self.run_test(test_suite)
self.assertFalse(self.result.wasSuccessful(),
'Suite succeeded when it should have failed')
self.assertEqual(self.result.testsRun, 4,
"Suite didn't run the correct number of tests")
self.assertEqual(len(self.result.failures), 2,
"Suite didn't fail the correct number of tests")
# === Validate data object ===
self.assertFalse(test_suite.data.success,
'Suite data result should have been False')
def test_working_dir(self):
"""Test the tests working directories creation and structure.
Validates that all tests working directories were created and each
that sub test work directory is contained by its containing test work
directory.
It tests it using the following scenario:
TestSuite
- Suite1
- Suite2
- TestSuite
-- Suite1
-- Suite2
- Suite2
"""
MockSuite1.components = (SuccessCase,)
MockSuite2.components = (FailureCase,)
MockNestedTestSuite.components = (MockSuite1, MockSuite2)
MockTestSuite.components = (MockSuite1,
MockSuite2,
MockNestedTestSuite,
MockSuite2)
test_suite = MockTestSuite()
self.assertEqual(
ROTEST_WORK_DIR.rstrip(os.path.sep),
os.path.dirname(test_suite.work_dir).rstrip(os.path.sep),
"Test %r work directory %r is not contained in the base work "
"directory %r" %
(test_suite.data, test_suite.work_dir, ROTEST_WORK_DIR))
self.validate_work_dirs(test_suite)
| 37.846154
| 79
| 0.612805
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.