blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58fa7eb98f9ddaf9c735d1a5be9232da1b31fd64 | 87b66cbf9eb8aca426808734eb754f87cf3ec38e | /tests/dolfin/test_ascalarbar.py | c80a048a0c2e3a8970760f0d6197c67c8c5d50c3 | [
"MIT",
"LicenseRef-scancode-public-domain",
"OFL-1.1"
] | permissive | RubendeBruin/vtkplotter | ea33c38cc9df72c15d7ca2fa357205ea35a559bb | 641743f29289d6df3d44d366072b72e221d16a51 | refs/heads/master | 2023-05-25T13:06:24.774288 | 2023-05-01T10:27:19 | 2023-05-01T10:27:19 | 219,933,917 | 0 | 1 | MIT | 2019-11-06T07:10:46 | 2019-11-06T07:10:45 | null | UTF-8 | Python | false | false | 899 | py | import numpy as np
from dolfin import *
from dolfin import __version__
from vedo.dolfin import plot, screenshot, MeshActor, show
from vedo import settings
print('Test ascalarbar, dolfin version', __version__)
if hasattr(MPI, 'comm_world'):
mesh = UnitSquareMesh(MPI.comm_world, nx=16, ny=16)
else:
mesh = UnitSquareMesh(16,16)
V = FunctionSpace(mesh, 'Lagrange', 1)
f = Expression('10*(x[0]+x[1]-1)', degree=1)
u = interpolate(f, V)
actors = plot(u, mode='color', cmap='viridis', vmin=-3, vmax=3, style=1,
returnActorsNoShow=True)
actor = actors[0]
solution = actor.pointdata[0]
print('ArrayNames', actor.pointdata.keys())
print('min', 'mean', 'max:')
print(np.min(solution), np.mean(solution), np.max(solution), len(solution))
assert len(solution) == 289
assert np.isclose(np.min(solution) , -10., atol=1e-05)
assert np.isclose(np.max(solution) , 10., atol=1e-05)
| [
"marco.musy@gmail.com"
] | marco.musy@gmail.com |
e5b816a271981e5e88da96fe714366af82c5840e | bf64d19174ef332f39e2d8210f3eb4f783262554 | /lib/generate_defect/zlrm_Generate_the_defects_data.py | 75ffb5c6ed53c7e27cde20f4b9f75e40f2a2ca73 | [] | no_license | juzisedefeimao/cv | 3e4dd7deee471321e071ca996769fc3b65481993 | fb9e9292030481f5a26efde4003fb83d37a34962 | refs/heads/master | 2020-05-30T14:29:13.253563 | 2019-06-02T01:08:53 | 2019-06-02T01:08:53 | 189,791,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,276 | py | from PIL import Image
import numpy as np
from time import strftime
import os
import xml.etree.ElementTree as ET
class Generate_Defect():
def __init__(self, save_image_root=None, save_label_root=None, read_label_root=None,
read_defect_root=None, save_fail_label_root=None, save_fail_image_root=None):
self.save_image_root = save_image_root
self.save_label_root = save_label_root
self.read_label_root = read_label_root
self.read_defect_root = read_defect_root
self.fail_label_root = save_fail_label_root
self.fail_image_root = save_fail_image_root
self.scale_random = False
self.ratio_random = False
self.rotate_random = False
self.painting_random = False
self.translation_random = True
# 变换后的相应缺陷图片
self.defect_image_list = []
self.defect_scale_image_list = []
self.defect_ratio_image_list = []
self.defect_rotate_image_list = []
self.defect_translation_image_list = []
self.defect_painting_image_list = []
# 缺陷存放
self.generate_defect_image_list = []
self.defect_affirm = {'class_affirm':False, 'scale_affirm':False, 'ratio_affirm':False,
'rotate_affirm':False, 'painting_affirm':False}
# 读图片,并转换为矩阵
def readimage(self, filename, channel=None):
image = np.array(Image.open(filename))
if channel==1:
image = self.image_transform_3_1(image)
elif channel==3:
image = self.image_transform_1_3(image)
return image
# 切除图片黑边
def cutback(self, image, right_left_threshold=80, up_and_down_threshold=80):
rows, cols = image.shape
cols_index = cols - 1
# 遍历判断列是否可以剪除
def cut_rl(w_index):
for i in range(rows):
if image[i][w_index] > right_left_threshold:
return False
return True
# 切除右边黑边
right_cut_x = cols_index
while right_cut_x > 0 and cut_rl(right_cut_x):
right_cut_x = right_cut_x - 1
if right_cut_x == 0:
print('图片全为黑,切除失败')
return False
image, _ = np.hsplit(image, (right_cut_x + 1,))
# 切除左边黑边
left_cut_x = 0
print(image.shape)
while cut_rl(left_cut_x):
left_cut_x = left_cut_x + 1
_, image = np.hsplit(image, (left_cut_x - 1,))
rows_, cols_ = image.shape
rows_index = rows_ - 1
# 遍历判断行是否可以剪除
def cut_ud(h_index):
for j in range(cols_):
if image[h_index][j] > up_and_down_threshold:
return False
return True
# 切除下边黑边
down_cut_y = rows_index
while cut_ud(down_cut_y):
down_cut_y = down_cut_y - 1
image, _ = np.split(image, (down_cut_y + 1,), axis=0)
# 切除上边黑边
up_cut_y = 0
while cut_ud(up_cut_y):
up_cut_y = up_cut_y + 1
_, image = np.split(image, (up_cut_y - 1,), axis=0)
print('左边切除', left_cut_x, '像素; ', '右边切除', cols_index - right_cut_x, '像素;',
'上边切除', up_cut_y, '像素; ', '下边切除', rows_index - down_cut_y, '像素;')
return image
# 单通道图像转为3通道图像
def image_transform_1_3(self, image):
assert len(image.shape) != 2 or len(image.shape) != 3, print('图像既不是3通道,也不是单通道')
if len(image.shape) == 2:
c = []
for i in range(3):
c.append(image)
image = np.asarray(c)
image = image.transpose([1, 2, 0])
elif len(image.shape)==3:
print('图像为3通道图像,不需要转换')
return image
# 3通道图像转为单通道图像
def image_transform_3_1(self, image):
assert len(image.shape) != 2 or len(image.shape) != 3, print('图像既不是3通道,也不是单通道')
if len(image.shape) == 3:
image_2 = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
# 灰度化方法2:根据亮度与RGB三个分量的对应关系:Y=0.3*R+0.59*G+0.11*B
h, w, color = image.shape
for i in range(h):
for j in range(w):
image_2[i][j] = np.uint8(0.3 * image[i][j][0] + 0.59 * image[i][j][1] + 0.11 * image[i][j][2])
image = image_2
assert len(image.shape) == 2, '3通道转为单通道图像失败'
elif len(image.shape) == 2:
print('图像为单通道图像,不需要转换')
return image
# 保存图片
def saveimage(self, image, saveimage_name=None, image_ext='bmp', saveimage_root=None):
if len(image.shape)==2:
image = self.image_transform_1_3(image)
if saveimage_name is None:
saveimage_name = 'image_{}'.format(strftime("%Y_%m_%d_%H_%M_%S")) + '.' + image_ext
else:
saveimage_name = saveimage_name + '.' + image_ext
if saveimage_root is None:
saveimage_root = 'C:\\Users\\jjj\\Desktop\\jjj\\zlrm\\data\\default_root'
print('未设置保存图片的路径,默认保存到_{}'.format(saveimage_root))
if not os.path.isdir(saveimage_root):
os.makedirs(saveimage_root)
root = os.path.join(saveimage_root, str(saveimage_name))
image = Image.fromarray(image)
image.save(root)
# 保存label
def savelabel(self, boxes, labelfile, savelabel_name=None, savelabel_root=None):
tree = ET.parse(labelfile)
root = tree.getroot()
if savelabel_name is None:
savelabel_name = 'box_{}'.format(strftime("%Y_%m_%d_%H_%M_%S")) + '.' + 'x,l'
else:
savelabel_name = savelabel_name + '.' + 'xml'
if savelabel_root is None:
savelabel_root = 'C:\\Users\\jjj\\Desktop\\jjj\\zlrm\\data\\default_root'
print('未设置保存boxes的路径,默认保存到_{}'.format(savelabel_root))
for i in range(len(boxes)):
# 一级
object = ET.Element('object')
# 二级
name = ET.Element('name')
name.text = boxes[i]['name']
pose = ET.Element('pose')
pose.text = 'Unspecified'
truncated = ET.Element('truncated')
truncated.text = '0'
difficult = ET.Element('difficult')
difficult.text = '1'
bndbox = ET.Element('bndbox')
# 三级
xmin = ET.Element('xmin')
xmin.text = str(boxes[i]['xmin'])
ymin = ET.Element('ymin')
ymin.text = str(boxes[i]['ymin'])
xmax = ET.Element('xmax')
xmax.text = str(boxes[i]['xmax'])
ymax = ET.Element('ymax')
ymax.text = str(boxes[i]['ymax'])
# 将节点添加到树
bndbox.append(xmin)
bndbox.append(ymin)
bndbox.append(xmax)
bndbox.append(ymax)
object.append(name)
object.append(pose)
object.append(truncated)
object.append(difficult)
object.append(bndbox)
root.append(object)
savelabel = os.path.join(savelabel_root, savelabel_name)
tree.write(savelabel)
# 生成一张纯白图片
def generate_white_image(self, shape=(600,600)):
image = np.zeros(shape, dtype=np.uint8)
h, w = image.shape
for i in range(h):
for j in range(w):
image[i][j] = np.uint8(255)
return image
# 清空残留列表
def clean_list(self):
if self.defect_affirm['class_affirm']:
self.defect_image_list = []
self.defect_affirm['class_affirm'] = False
if self.defect_affirm['scale_affirm']:
self.defect_scale_image_list = []
self.defect_affirm['scale_affirm'] = False
if self.defect_affirm['ratio_affirm']:
self.defect_ratio_image_list = []
self.defect_affirm['ratio_affirm'] = False
if self.defect_affirm['rotate_affirm']:
self.defect_rotate_image_list = []
self.defect_affirm['ratio_affirm'] = False
if self.defect_affirm['painting_affirm']:
self.defect_painting_image_list = []
self.defect_affirm['painting_affirm'] = False
# 为图片随机生成一些缺陷
def generate_defects(self, image, labelfile, freehand_sketching = False, save_name=None):
if save_name==None:
save_name = len(os.listdir(self.save_image_root))
save_name = save_name + 1
if len(self.generate_defect_image_list)==0:
for file in os.listdir(self.read_defect_root):
if freehand_sketching and file == 'freehand_sketching':
freehand_sketching_folder_root = os.path.join(self.read_defect_root, 'freehand_sketching')
for freehand_sketching_file in os.listdir(freehand_sketching_folder_root):
freehand_sketching_image_root = os.path.join(freehand_sketching_folder_root,
freehand_sketching_file)
freehand_sketching_image = self.readimage(freehand_sketching_image_root)
self.get_defect_freehand_sketching(freehand_sketching_image)
elif file == 'paint_smear':
paint_smear_folder_root = os.path.join(self.read_defect_root, 'paint_smear')
for paint_smear_file in os.listdir(paint_smear_folder_root):
paint_smear_image_root = os.path.join(paint_smear_folder_root, paint_smear_file)
paint_smear_image = self.readimage(paint_smear_image_root)
self.get_defect_paint_smear(paint_smear_image)
elif file == 'aluminium_skimmings':
aluminium_skimmings_folder_root = os.path.join(self.read_defect_root, 'aluminium_skimmings')
for aluminium_skimmings_file in os.listdir(aluminium_skimmings_folder_root):
aluminium_skimmings_image_root = os.path.join(aluminium_skimmings_folder_root,
aluminium_skimmings_file)
aluminium_skimmings_image = self.readimage(aluminium_skimmings_image_root)
self.get_defect_aluminium_skimmings(aluminium_skimmings_image)
# else:
# raise KeyError('未知的缺陷', file)
# self.random_defect()
defect_image_list = self.defect_image_list
if self.scale_random:
self.defect_scale(defect_image_list)
defect_image_list = self.defect_scale_image_list
if self.ratio_random:
self.defect_ratio(defect_image_list)
defect_image_list = self.defect_ratio_image_list
if self.rotate_random:
self.defect_rotate(defect_image_list)
defect_image_list = self.defect_rotate_image_list
if self.painting_random:
self.defect_painting(defect_image_list)
defect_image_list = self.defect_painting_image_list
self.generate_defect_image_list = defect_image_list
self.clean_list()
defect_image_list = self.generate_defect_image_list
print('生成的缺陷还有', len(defect_image_list))
if self.translation_random:
fetch = self.defect_translation(image, defect_image_list, labelfile)
if fetch == None:
print('输出未合成的label和image')
tree = ET.parse(labelfile)
save_xml_root = os.path.join(self.fail_label_root, save_name + '.xml')
tree.write(save_xml_root)
self.saveimage(image, saveimage_name=save_name, saveimage_root=self.fail_image_root)
else:
image = fetch[0]
boxes = fetch[1]
self.saveimage(image, saveimage_name=save_name, saveimage_root=self.save_image_root)
self.savelabel(boxes, labelfile, savelabel_name=save_name, savelabel_root=self.save_label_root)
def judge_vein_exist(self, file):
tree = ET.parse(file)
vein_exist = False
for obj in tree.findall('object'):
if obj.find('name').text == 'vein':
vein_exist = True
return vein_exist
# 为一批图像生成缺陷
def generate_defect_batch(self, batch_data_root=None):
for labelfile in os.listdir(self.read_label_root):
if labelfile.split('.')[-1] == 'xml':
print('为图片 ', labelfile.split('.')[0], ' 生成缺陷')
image_root = os.path.join(batch_data_root, labelfile.split('.')[0] + '.bmp')
image = self.readimage(image_root, channel=1)
# image = self.cutback(image)
h, w = image.shape
label_root = os.path.join(self.read_label_root, labelfile)
if h > 200 and w > 200 and h / w < 4.4 and w / h < 4.4:
if self.judge_vein_exist(label_root):
self.generate_defects(image, label_root, save_name=labelfile.split('.')[0])
print('已生成', len(os.listdir(self.save_image_root)), '个图片')
else:
tree = ET.parse(label_root)
save_xml_root = os.path.join(self.save_label_root, labelfile.split('.')[0])
tree.write(save_xml_root)
self.saveimage(image, saveimage_name=labelfile.split('.')[0], saveimage_root=self.save_image_root)
def preload_defect(self, preload_defect_root, freehand_sketching = False):
for file in os.listdir(preload_defect_root):
if freehand_sketching and file == 'freehand_sketching':
freehand_sketching_folder_root = os.path.join(preload_defect_root, 'freehand_sketching')
for freehand_sketching_file in os.listdir(freehand_sketching_folder_root):
freehand_sketching_image_root = os.path.join(freehand_sketching_folder_root,
freehand_sketching_file)
freehand_sketching_image = self.readimage(freehand_sketching_image_root)
image = self.get_defect_freehand_sketching(freehand_sketching_image)
if image is not None:
self.saveimage(image, saveimage_name=freehand_sketching_file.split('.')[0],
saveimage_root=os.path.join(self.read_defect_root, 'freehand_sketching'))
elif file == 'paint_smear1':
paint_smear_folder_root = os.path.join(preload_defect_root, 'paint_smear')
for paint_smear_file in os.listdir(paint_smear_folder_root):
paint_smear_image_root = os.path.join(paint_smear_folder_root, paint_smear_file)
paint_smear_image = self.readimage(paint_smear_image_root)
image = self.get_defect_paint_smear(paint_smear_image, preload=True)
if image is not None:
self.saveimage(image, saveimage_name=paint_smear_file.split('.')[0],
saveimage_root=os.path.join(self.read_defect_root, 'paint_smear'))
elif file == 'aluminium_skimmings':
aluminium_skimmings_folder_root = os.path.join(preload_defect_root, 'aluminium_skimmings')
for aluminium_skimmings_file in os.listdir(aluminium_skimmings_folder_root):
aluminium_skimmings_image_root = os.path.join(aluminium_skimmings_folder_root,
aluminium_skimmings_file)
aluminium_skimmings_image = self.readimage(aluminium_skimmings_image_root)
image = self.get_defect_aluminium_skimmings(aluminium_skimmings_image, preload=True)
if image is not None:
self.saveimage(image, saveimage_name=aluminium_skimmings_file.split('.')[0],
saveimage_root=os.path.join(self.read_defect_root, 'aluminium_skimmings'))
# 获得手绘缺陷
def get_defect_freehand_sketching(self, image):
if len(image.shape)==3:
image = self.image_transform_3_1(image)
assert len(image.shape)==2, '图片不能转为单通道'
h, w = image.shape
for i in range(h):
for j in range(w):
if image[i][j]>200:
image[i][j] = 0
else:
image[i][j] = 255
image = self.cutback(image)
if image is not False:
print('读取缺陷完成')
self.defect_image_list.append({'name': 'freehand_sketching', 'image': image})
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
return image
# 获得油污缺陷
def get_defect_paint_smear(self, image, preload=False):
if len(image.shape) == 3:
image = self.image_transform_3_1(image)
assert len(image.shape) == 2, '图片不能转为单通道'
h, w = image.shape
for i in range(h):
for j in range(w):
if image[i][j] > 75:
image[i][j] = 0
image = self.cutback(image, right_left_threshold=1, up_and_down_threshold=1)
if image is not False:
h, w = image.shape
if preload:
for i in range(h):
for j in range(w):
if image[i][j] == 0:
image[i][j] = 255
print('读取缺陷完成')
self.defect_image_list.append({'name': 'paint_smear', 'image': image})
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
return image
# 获得铝屑缺陷
def get_defect_aluminium_skimmings(self, image, preload=False):
if len(image.shape) == 3:
image = self.image_transform_3_1(image)
assert len(image.shape) == 2, '图片不能转为单通道'
h, w = image.shape
for i in range(h):
for j in range(w):
if image[i][j] > 80:
image[i][j] = 0
image = self.cutback(image, right_left_threshold=1, up_and_down_threshold=1)
if image is not False:
h, w = image.shape
if preload:
for i in range(h):
for j in range(w):
if image[i][j] == 0:
image[i][j] = 255
print('读取缺陷完成')
self.defect_image_list.append({'name': 'aluminium_skimmings', 'image': image})
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
return image
# 随机生成缺陷
def random_defect(self, p_threshold=0.5):
# 从一个点开始以一定的概率分布随机往外生长
h = 0
w = 0
while h < 100 and w < 100:
image = np.zeros((401, 401), dtype=np.uint8)
h, w = image.shape
image[0][0] = 255
for i in range(h):
for j in range(i + 1):
if j - 1 >= 0:
if image[j - 1][i - j] == 255:
if np.random.rand() < p_threshold:
image[j][i - j] = 255
if i - j - 1 >= 0:
if image[j][i - j - 1] == 255:
if np.random.rand() < p_threshold:
image[j][i - j] = 255
if j - 1 >= 0 and i - j - 1 >= 0:
if image[j - 1][i - j - 1] == 255:
if np.random.rand() < p_threshold:
image[j][i - j] = 255
image = self.cutback(image)
h, w = image.shape
# h = 0
# w = 0
# while h < 100 and w < 100:
# image_ = np.zeros((401, 401), dtype=np.uint8)
# h, w = image_.shape
# image_[400][400] = 255
# for i in range(h):
# for j in range(i + 1):
# if j - 1 >= 0:
# if image_[400 - j + 1][400 - i + j] == 255:
# if np.random.rand() < p_threshold:
# image_[400 - j][400 - i + j] = 255
# if i - j - 1 >= 0:
# if image_[400 - j][400 - i + j + 1] == 255:
# if np.random.rand() < p_threshold:
# image_[400 - j][400 - i + j] = 255
# if j - 1 >= 0 and i - j - 1 >= 0:
# if image_[400 - j + 1][400 - i + j + 1] == 255:
# if np.random.rand() < p_threshold:
# image_[400 - j][400 - i + j] = 255
# image_ = self.cutback(image_)
# h, w = image_.shape
self.defect_image_list.append(image)
# print(len(self.defect_image))
self.defect_affirm['class_affirm'] = True
self.saveimage(image, saveimage_name='jjj')
# 随机的上色方案
def painting_random_fetch(self, painting_schem=None, ):
random = np.random.randint(1,11)
if painting_schem == 1:
painting = np.random.randint(1,50)
if painting_schem == 2:
painting = np.random.randint(70, 120)
if painting_schem == 3:
painting = np.random.randint(150,255)
return painting
# 给曲线内部上色
def defect_painting(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
h, w = image.shape
for p in range(np.random.randint(3,5)):
# painting_schem为随机到的上色方案,共有3套方案
painting_schem = np.random.randint(1, 5)
painting = 1
if painting_schem < 4:
painting = self.painting_random_fetch(painting_schem=painting_schem)
for i in range(h):
left_ = 0
left_2 = 0
right_ = 0
switch = 0
j = 0
while j < w:
left_2 = j
while j < w and image[i][j] == 0:
j = j + 1
left_ = j
while j < w and image[i][j] != 0:
j = j + 1
right_ = j
if left_ != right_:
if switch == 0:
switch = 1
switch = (-1)*switch
if switch == 1:
left_ = left_2
for k in range(left_, right_):
if painting_schem == 4:
image[i][k] = np.random.randint(1,255)
image[i][k] = painting
self.defect_painting_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['painting_affirm'] = True
# 对缺陷进行旋转
def defect_rotate(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
for s in range(np.random.randint(3, 5)):
rotation_angle = np.random.randint(0, 360)
image = Image.fromarray(image.astype(np.uint8))
image = image.rotate(rotation_angle)
image = np.array(image)
self.defect_rotate_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['rotate_affirm'] = True
# 从xml文件里得到对应铝锭表面图片的缺陷框,分为缺陷和纹理
def get_defectbox_from_xml(self, xlm_filename):
tree = ET.parse(xlm_filename)
obj_box = []
vein_box = []
for obj in tree.findall('object'):
if obj.find('name').text == 'vein':
bbox = obj.find('bndbox')
box = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
vein_box.append(box)
else:
bbox = obj.find('bndbox')
box = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
obj_box.append(box)
return obj_box, vein_box
# 选择放缺陷的位置,并返回最小h, w坐标
def select_defect_loacte(self, obj_box, vein_box, defect_size):
# 寻找位置的次数
find_num = 0
vein = vein_box[np.random.randint(0, len(vein_box))]
locate = []
locate.append(np.random.randint(vein[1] + 1, vein[3] - defect_size[0]))#h
locate.append(np.random.randint(vein[0] + 1, vein[2] - defect_size[1]))#w
while self.judge_inter(obj_box, locate, defect_size) and find_num<300:
locate[0] = np.random.randint(vein[1] + 1, vein[3] - defect_size[0])
locate[1] = np.random.randint(vein[0] + 1, vein[2] - defect_size[1])
find_num = find_num + 1
if find_num < 300:
return locate
else:
print('获取位置失败')
return None
# 判断所选的框与obj_box是否相交
def judge_inter(self, obj_box, locate, defect_size):
defect_box = [locate[0], locate[1], locate[0] + defect_size[1], locate[1] + defect_size[0]]
defect_box = np.array(defect_box)
obj_box = np.array(obj_box)
if len(obj_box) == 0:
inters = 0
elif len(obj_box) == 1:
ixmin = np.maximum(obj_box[0, 0], defect_box[0])
iymin = np.maximum(obj_box[0, 1], defect_box[1])
ixmax = np.minimum(obj_box[0, 2], defect_box[2])
iymax = np.minimum(obj_box[0, 3], defect_box[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
else:
ixmin = np.maximum(obj_box[:, 0], defect_box[0])
iymin = np.maximum(obj_box[:, 1], defect_box[1])
ixmax = np.minimum(obj_box[:, 2], defect_box[2])
iymax = np.minimum(obj_box[:, 3], defect_box[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
print('inters', inters, np.sum(np.array(inters) <= 0), (np.array(inters)).size)
if np.sum(np.array(inters) <= 0) == (np.array(inters)).size:
return False
else:
return True
# 对缺陷进行平移
def defect_translation(self, image, defect_image_list, filename):
# 得到缺陷的位置框和纹理的位置框
obj_box, vein_box = self.get_defectbox_from_xml(filename)
h, w = image.shape
# print(len(defect_image))
assert len(defect_image_list)>0, '未生成缺陷,不能与样本合成有缺陷的样本'
boxes = []
high = min(len(defect_image_list), 4)
low = 1
if len(defect_image_list)>=2:
low = 2
defect_image_fetch = np.random.randint(low=0, high=len(defect_image_list), size=np.random.randint(low, high+1))
defect_image_fetch = list(defect_image_fetch)
defect_image_fetch = list(set(defect_image_fetch))
defect_image_fetch.sort(reverse=True)
for n in defect_image_fetch:
defect_image_ = defect_image_list[n]['image']
defect_size = defect_image_.shape
# print(defect_image_.shape)
locate = self.select_defect_loacte(obj_box, vein_box, defect_size)#h,w
if locate == None :
return None
else:
for i in range(defect_size[0]):
for j in range(defect_size[1]):
if defect_image_[i][j] != 0:
image[i + locate[0]][j + locate[1]] = defect_image_[i][j]
box = {'name':defect_image_list[n]['name'], 'xmin': locate[1] - 1, 'ymin': locate[0] - 1,
'xmax': locate[1] + defect_size[1] + 1, 'ymax': locate[0] + defect_size[0] + 1}
print(locate)
print('defectsize',defect_size)
print('box',box)
boxes.append(box)
defect_box = [locate[1] - 1, locate[0] - 1, locate[1] + defect_size[1] + 1,
locate[0] + defect_size[1] + 1]
obj_box.append(defect_box)
for i in range(len(defect_image_fetch)):
defect_image_list.pop(defect_image_fetch[i])
return image, boxes
# 按一定分布得到一随机数,以此作为缺陷图片的大小
def scale_random_fetch(self):
p = np.random.randint(0,10)
if p < 2:
size = np.random.randint(8,20)
elif p < 4:
size = np.random.randint(20,40)
elif p < 6:
size = np.random.randint(40,60)
elif p < 8:
size = np.random.randint(60,80)
else:
size = np.random.randint(80,100)
return size
# 对缺陷进行大小变换
def defect_scale(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
for s in range(np.random.randint(3, 5)):
size = self.scale_random_fetch()
image = Image.fromarray(image.astype(np.uint8))
image = image.resize((size, size), Image.ANTIALIAS)
image = np.array(image)
self.defect_scale_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['scale_affirm'] = True
# 对缺陷进行高宽的比例变换
def defect_ratio(self, defect_image_list):
defect_data = defect_image_list
for n in range(len(defect_data)):
image = defect_data[n]['image']
h, w = image.shape
for s in range(np.random.randint(3, 5)):
h_, w_ = np.random.randint(1,11,size=2)
size_h = np.int(np.sqrt((h * w) / (h_ * w_)) * h_) + 1
size_w = np.int(np.sqrt((h * w) / (h_ * w_)) * w_) + 1
image = Image.fromarray(image.astype(np.uint8))
image = image.resize((size_h, size_w), Image.ANTIALIAS)
image = np.array(image)
self.defect_ratio_image_list.append({'name':defect_data[n]['name'], 'image':image})
self.defect_affirm['ratio_affirm'] = True
if __name__ == '__main__':
# k = []
datadir = 'H:\\defect\\paint_smear'
ga = Generate_Defect()
for imagefile in os.listdir(datadir):
imageroot = os.path.join(datadir, imagefile)
image = ga.readimage(imageroot, channel=3)
# print(image)
image = ga.get_defect_paint_smear(image)
name = imagefile + 'k'
ga.saveimage(image,saveimage_name=name, saveimage_root=datadir)
| [
"17696272096@163.com"
] | 17696272096@163.com |
9a2213ec3ee95a7ecf2b52d08704ba5983875ca3 | 7f4306057991622329ed3ab43c8e338ebdfb6d74 | /pilgram/css/blending/tests/test_nonseparable.py | 9e606fbb615e0ff3891a7809437768cffa36a290 | [
"Apache-2.0"
] | permissive | akiomik/pilgram | d958312c98a5418d176ad884c50303e9f2731825 | 410252928e50e954472ff5ffcd45446aa8be32f7 | refs/heads/main | 2023-07-24T06:54:00.285954 | 2023-07-18T00:26:42 | 2023-07-18T00:26:42 | 169,348,812 | 90 | 19 | Apache-2.0 | 2023-09-12T12:31:08 | 2019-02-06T03:10:44 | Jupyter Notebook | UTF-8 | Python | false | false | 4,985 | py | # Copyright 2019 Akiomi Kamakura
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import floor
import pytest
from PIL import Image, ImageMath
from PIL.ImageMath import imagemath_convert as _convert
from pilgram import util
from pilgram.css.blending.nonseparable import (
_clip_color,
_max3,
_min3,
lum,
lum_im,
sat,
set_lum,
set_sat,
)
def test_min3():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
im_min = ImageMath.eval('convert(min3((r, g, b)), "L")', min3=_min3, r=r, g=g, b=b)
assert list(im_min.getdata()) == [0]
def test_max3():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
im_max = ImageMath.eval('convert(max3((r, g, b)), "L")', max3=_max3, r=r, g=g, b=b)
assert list(im_max.getdata()) == [255]
def test_clip_color():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
bands = ImageMath.eval(
"clip_color((float(r - 64), float(g), float(b + 64)))",
clip_color=_clip_color,
r=r,
g=g,
b=b,
)
expected = [
[pytest.approx(25.70517158047366, 1e-6)],
[pytest.approx(106.8796587856024, 1e-6)],
[pytest.approx(187.63136220320442, 1e-6)],
]
assert [list(band.im.getdata()) for band in bands] == expected
def test_lum():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
im_f = ImageMath.eval("lum((float(r), float(g), float(b)))", lum=lum, r=r, g=g, b=b)
im_l = im_f.convert("L")
assert list(im_f.getdata()) == [pytest.approx(103.57, 1e-6)]
assert list(im_l.getdata()) == [floor(103.57)]
def test_lum_im():
im = util.fill((1, 1), [0, 128, 255])
im_lum = lum_im(im)
assert list(im_lum.getdata()) == [round(103.57)]
def test_set_lum():
im1 = util.fill((1, 1), [0, 128, 255])
im2 = util.fill((1, 1), [128, 128, 128])
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
c1 = "(float(r1), float(g1), float(b1))"
c2 = "(float(r2), float(g2), float(b2))"
bands = ImageMath.eval(
"set_lum({}, lum({}))".format(c1, c2),
set_lum=set_lum,
lum=lum,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
b2=b2,
g2=g2,
)
expected1 = [
[pytest.approx(41.13881001122631, 1e-6)],
[pytest.approx(148.48874067225782, 1e-6)],
[255],
]
assert [list(band.im.getdata()) for band in bands] == expected1
im_set_lum = Image.merge("RGB", [_convert(band, "L").im for band in bands])
expected2 = [(floor(41.13881001122631), floor(148.48874067225782), 255)]
assert list(im_set_lum.getdata()) == expected2
def test_sat():
im = util.fill((1, 1), [80, 128, 200])
r, g, b = im.split()
im_sat = ImageMath.eval('convert(sat((r, g, b)), "L")', sat=sat, r=r, g=g, b=b)
assert list(im_sat.getdata()) == [120]
def test_set_sat_cmax_gt_cmin():
im1 = util.fill((1, 1), [0, 128, 255])
im2 = util.fill((1, 1), [64, 96, 128]) # sat = 64
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
bands = ImageMath.eval(
"set_sat((r1, g1, b1), sat((r2, g2, b2)))",
set_sat=set_sat,
sat=sat,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
g2=g2,
b2=b2,
)
expected = [
[0],
[pytest.approx(32.12549019607843, abs=1)],
[64],
]
assert [list(band.im.getdata()) for band in bands] == expected
def test_set_sat_cmax_eq_cmid_gt_cmin():
im1 = util.fill((1, 1), [0, 128, 128])
im2 = util.fill((1, 1), [64, 96, 128]) # sat = 64
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
bands = ImageMath.eval(
"set_sat((r1, g1, b1), sat((r2, g2, b2)))",
set_sat=set_sat,
sat=sat,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
g2=g2,
b2=b2,
)
expected = [[0], [64], [64]]
assert [list(band.im.getdata()) for band in bands] == expected
def test_set_sat_cmax_eq_cmin():
im1 = util.fill((1, 1), [128, 128, 128])
im2 = util.fill((1, 1), [64, 96, 128]) # sat = 64
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
bands = ImageMath.eval(
"set_sat((r1, g1, b1), sat((r2, g2, b2)))",
set_sat=set_sat,
sat=sat,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
g2=g2,
b2=b2,
)
expected = [[0], [0], [0]]
assert [list(band.im.getdata()) for band in bands] == expected
| [
"akiomik@gmail.com"
] | akiomik@gmail.com |
bdfad6b54ae00a6d7c9e0d9cd948635cf824be8a | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/fibrou.py | 300e431fb55e7fb34927daf703980349d1a387f7 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 357 | py | ii = [('RogePAV2.py', 4), ('CoolWHM2.py', 1), ('RogePAV.py', 20), ('RennJIT.py', 2), ('ProuWCM.py', 2), ('PettTHE.py', 2), ('AdamWEP.py', 2), ('WilkJMC2.py', 2), ('KiddJAE.py', 2), ('BuckWGM.py', 6), ('WestJIT2.py', 1), ('MedwTAI.py', 1), ('KirbWPW2.py', 1), ('MedwTAI2.py', 1), ('BuckWGM2.py', 4), ('FitzRNS4.py', 2), ('BellCHM.py', 2), ('AinsWRR2.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
6172f4ceb50b246a9a22c31b5e7ca149bb778ad7 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/M/malo/blablaabla.py | 6bbd29c6f5b4f88d800a03fb2216d159be561e10 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | import scraperwiki
html = scraperwiki.scrape("http://www.wipo.int/amc/en/domains/search/text.jsp?case=D2013-0001")
print html
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
44f89ac374c146890f0b8ea11344cfeacb3cc065 | 04a643a77927bc56ab58c7df91d4733321e61e51 | /p19_plots/fig_hair_mosh.py | 2bd63d253824490409c2a69e2466befa213887e2 | [] | no_license | dcollins4096/p19_newscripts | d2fae1807170a4d70cf4c87222a6258211f993ff | 23c780dd15b60944ed354406706de85282d0bee6 | refs/heads/master | 2023-07-21T11:53:55.188383 | 2023-07-18T17:38:21 | 2023-07-18T17:38:21 | 215,159,839 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py |
from starter2 import *
from collections import defaultdict
import scipy
import colors
import hair_dryer
reload(hair_dryer)
import three_loopers_u500 as TL
def simple_hair(this_looper,core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
thtr=this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
all_times=thtr.times
all_frames=thtr.frames
times=thtr.times[mask]+0 #the zero makes a copy
times.shape=times.size,1
times=times/colors.tff
for core_id in core_list:
fig,axes=plt.subplots(3,1, figsize=(6,10))
ax=axes[0];ax1=axes[2]; ax3=axes[1]
ms = trackage.mini_scrubber(thtr,core_id, do_velocity=True)
ms.particle_pos(core_id)
LOS = 0
x = [1,0,1][LOS] # Using [1,0,1] and [2,2,0]
y = [2,2,0][LOS] # unfolds nicely.
if False:
sl=slice(None)
c=[0.5]*4
else:
sl = slice(None,None,30)
#c=[0,0,0,0.1]
c=[0.1]*4
Linewidth1=Linewidth2=0.2
print(sl)
print(c)
rho = ms.density[sl].transpose()#*colors.density_units
rho = rho[mask,:]
dv = ms.cell_volume[sl].transpose()[mask,:]
vv = dv.sum(axis=1)
vx = ms.rel_vx[sl].transpose()[mask,:]
vy = ms.rel_vy[sl].transpose()[mask,:]
vz = ms.rel_vz[sl].transpose()[mask,:]
v22_all = ms.rel_vmag[:].transpose()[mask,:]
vr_all = ms.vr_rel[:].transpose()[mask,:]
vt_all = (ms.vt2_rel[:].transpose()[mask,:])**0.5
vrm=vr_all.mean(axis=1)
v2 = v22_all.mean(axis=1)
vtm=vt_all.mean(axis=1)
rho_plot=ax1.twinx()
print(rho.shape,c)
rho_plot.plot(times, rho*colors.density_units, c=c, linewidth=Linewidth1)
rho_plot.set(yscale='log',ylabel=r'$\rho_{\rm{particle}} [cm^{-3}]$')
ax1.plot(times, v2, c='k')
ax1.plot(times, vtm, c='c')
ax1.plot(times, np.abs(vrm), c='r')
ax1.set(ylabel=r'$v_{\rm{particles}}/c_s$', xlabel=r'$t/t_{\rm{ff}}$')
p = [ms.particle_x[sl].transpose(),ms.particle_y[sl].transpose(),ms.particle_z[sl].transpose()]
for aaa in [ax,ax3]:
aaa.scatter( p[x][0,:].flatten(),p[y][0,:].flatten(),c='k',s=0.1)
aaa.scatter( p[x][-1,:].flatten(),p[y][-1,:].flatten(),c='r',s=0.1)
aaa.plot( p[x], p[y], c=c, linewidth=0.3)
aaa.set(xlabel='xyz [code length]'[x], ylabel='xyz [code length]'[y])
x0,x1=[0.090,0.175]
y0,y1=[0.15,0.25]
ax.plot([x0,x1,x1,x0,x0],[y0,y0,y1,y1,y0],c='r',linewidth=Linewidth1)
ax3.set(xlim=[x0,x1],ylim=[y0,y1])
outname='plots_to_sort/%s_mosh_%s_c%04d.png'%(this_looper.sim_name,'xyz'[LOS],core_id)
fig.tight_layout()
fig.savefig(outname)
print(outname)
sims=[ 'u502']
for sim in sims:
core_list=[9]
print('word')
simple_hair(TL.loops[sim],core_list=core_list)
| [
"dccollins@fsu.edu"
] | dccollins@fsu.edu |
e0cb24cdf4bc78dca043fbefd148f25a1bede4f9 | 2a66fdf4ddcfb475f80a61a8d8c31b3a320c5bae | /code/hprog59.py | 3db8c1e47c0056cf681abeea658da9f87b3d3ba1 | [] | no_license | sujasriman/guvi | 12143757bee6e0679ca44f44a6624d34a6dd2cb4 | 74b4caf2a9c58da5e72eabe0b05adfe77310f71b | refs/heads/master | 2020-05-31T07:24:37.295062 | 2019-08-12T18:24:22 | 2019-08-12T18:24:22 | 190,163,562 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | n=int(input())
l=[]
l1=[]
for i in range(2):
l.append(input().split())
for i in range(n):
a=0
l1.append(int(l[a][i])+int(l[a+1][i]))
for i in range(n-1):
print(l1[i],end=' ')
print(l1[n-1])
| [
"noreply@github.com"
] | sujasriman.noreply@github.com |
8c3abcbe5c6d057c305c7384d01b769a48f5b9c5 | 425db5a849281d333e68c26a26678e7c8ce11b66 | /LeetCodeSolutions/LeetCode_0170.py | 2e5beb6f321161d1499a11d942cd419feca374df | [
"MIT"
] | permissive | lih627/python-algorithm-templates | e8092b327a02506086414df41bbfb2af5d6b06dc | a61fd583e33a769b44ab758990625d3381793768 | refs/heads/master | 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
import collections
self.nums = collections.defaultdict(int)
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
self.nums[number] += 1
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
for key in self.nums:
if value - key in self.nums:
if value - key == key:
if self.nums[key] > 1:
return True
else:
return True
return False
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
| [
"lih627@outlook.com"
] | lih627@outlook.com |
b0eaf2f6d26344d045fa9fdb82857f26f776ef18 | 00f1e3047afe17a4c213b7e5655a5a4d99dc94c6 | /mainapp/ajax.py | 0a395dae7a9fe960e7352907e86f2a6731622865 | [] | no_license | zhyfruit/DietCat | 3e52391cce3b3245c9ae940bea35e9ab5cfd2d74 | 15a66f5b9de3d8cbe7cce2fa4458de85dd383c76 | refs/heads/master | 2020-03-27T04:42:01.497212 | 2018-08-23T10:47:59 | 2018-08-23T10:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from django.shortcuts import render
from django.shortcuts import HttpResponse
# 下载文件要用
from django.http import FileResponse
# 提交用户反馈,这里用了ajax
# 其实我觉得在这个页面不用ajax也没什么,毕竟这个页面也没有别的需要交互的东西
def subProp(request):
if request.method == 'POST':
# 获得提交的具体内容
msg = request.POST.get('prop')
print(msg)
# TODO 写入DB
return HttpResponse('1')
return HttpResponse('3')
| [
"java233@foxmail.com"
] | java233@foxmail.com |
6409b6315841b7310d574b1298147aeae7cbc055 | e557b8b67412a281b95161c3eb878bbfaa3ab436 | /testEnvironmentHeader.py | 851cfe69171b96c1a3ffa440ced182351ce31643 | [] | no_license | BARarch/bash-pipe | a995a27b40877d0da8aa05befba9df155fa46a65 | 9afdc6535ee61a4bbc02c31c708372f7b802f336 | refs/heads/master | 2023-01-22T14:02:48.634824 | 2023-01-16T15:58:57 | 2023-01-16T15:58:57 | 181,198,216 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | import math
import os
import random
import re
import sys
import qtimer
# Complete the function below.
@qtimer.timeit | [
"anthonyquivers@gmail.com"
] | anthonyquivers@gmail.com |
0a352ed1e90f79878495e8ff29d3a10d4aeb72eb | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/tepitopepan/mat/DRB1_1384_9.py | 2a7524c2956d25ebb184a415e1533a49f23753b5 | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,166 | py | DRB1_1384_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.98558, 'I': -0.014418, 'H': -999.0, 'K': -999.0, 'M': -0.014418, 'L': -0.014418, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.98558, 'V': -0.014418, 'Y': -0.98558}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.2516, 'D': -1.4689, 'G': -1.5137, 'F': 0.69584, 'I': -0.25302, 'H': 1.0176, 'K': 0.51692, 'M': 0.84257, 'L': 0.52778, 'N': 0.0663, 'Q': 0.34672, 'P': -1.5309, 'S': -0.63039, 'R': 0.044313, 'T': -0.97765, 'W': 0.53815, 'V': -0.60455, 'Y': 0.16337}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -1.4135, 'D': -2.3788, 'G': -0.70956, 'F': -1.3942, 'I': 0.68776, 'H': -0.11894, 'K': 1.2454, 'M': -0.90138, 'L': 0.18164, 'N': -0.57129, 'Q': -0.31767, 'P': 0.49193, 'S': -0.083453, 'R': 0.95198, 'T': 0.81432, 'W': -1.3929, 'V': 1.1936, 'Y': -1.3988}, 6: {'A': 0.0, 'E': -1.1817, 'D': -1.7837, 'G': -1.4422, 'F': 0.33363, 'I': -0.12763, 'H': 0.1599, 'K': -0.14075, 'M': 0.31712, 'L': 0.55475, 'N': 0.047175, 'Q': -0.072232, 'P': -0.67377, 'S': -0.76102, 'R': 0.94339, 'T': -0.7475, 'W': 0.40934, 'V': -0.13174, 'Y': 0.0062252}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.90493, 'D': -1.0724, 'G': -0.2193, 'F': 0.057698, 'I': 0.22715, 'H': -0.2517, 'K': -0.30896, 'M': 0.44032, 'L': -0.40372, 'N': -0.9256, 'Q': 0.057376, 'P': -0.75109, 'S': 0.87213, 'R': -0.19283, 'T': -0.76762, 'W': -0.99887, 'V': -0.12038, 'Y': -0.27794}} | [
"schubert@informatik.uni-tuebingen.de"
] | schubert@informatik.uni-tuebingen.de |
113d8826277c464d78f5df2901a3616ed0be649c | 307089d509d2b72ac036b7fcc5bd60f5759cca6f | /opencv/timelapse-usb.py | 5796e4b04d7171718fe2ddbaca9b0b4efb04bce1 | [] | no_license | bluemooninc/campi | 45a7bf480d6c507a20f132c64ed8315776ccacbb | 7614e2847e12442c1900281662b7bac587a9ee46 | refs/heads/master | 2020-04-06T13:12:41.184245 | 2016-09-06T14:40:03 | 2016-09-06T14:40:03 | 52,285,836 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | import cv2
import numpy as np
import time
import datetime
import logging
import scp
import ConfigParser
import os.path
import os
import socket
import glob
import re
import lcd
##
## config
##
inifile = ConfigParser.SafeConfigParser()
inifile.read("/home/pi/camlaps.ini")
serialno = inifile.get("user","serialno")
frameWidth = inifile.getint("camera","frameWidth")
frameHeight = inifile.getint("camera","frameHeight")
delay = inifile.getint("camera","delay")
shottime = inifile.getint("camera","shottime")
## get ip address
gw = os.popen("ip -4 route show default").read().split()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((gw[2], 0))
ipaddr = s.getsockname()[0]
LOG_FILENAME = '/var/log/timelapse.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
logging.debug(cv2.__version__)
logging.debug('timelapse start...')
# initialize the camera and grab a reference to the raw camera capture
print frameWidth
print frameHeight
location = (0,30)
fontscale = 2.0
fontface = cv2.FONT_HERSHEY_PLAIN
color = (255,190,0)
dt = datetime.datetime.today()
seekfile = '/home/pi/picture/img%02d-*.jpg' % dt.hour
newestCount = 0
##
## capture start
##
# capture frames from the camera
count = 0
cap = cv2.VideoCapture(0)
cap.set(3,frameWidth)
cap.set(4,frameHeight)
if not cap:
print "Could not open camera"
sys.exit()
time.sleep(1)
while(cap.isOpened()):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
ret, img = cap.read()
print count
now = datetime.datetime.now()
msg = now.strftime("%Y/%m/%d %H:%M:%S")
cv2.putText(img,msg,location,fontface,fontscale,color,4)
fname = "img%02d-%04d.jpg" % (dt.hour,count,)
fpath = "/home/pi/picture/" + fname
#logging.debug("debug:"+fname)
if os.path.exists(fpath):
os.remove(fpath)
print fname + msg
cv2.imwrite(fpath, img)
lcd.printLcd("Shot:%04d/%04d, IP:%s" % (count,shottime,ipaddr))
if count < newestCount+shottime:
time.sleep(delay)
count+=1
else:
break
##
## finish
##
lcd.printIP()
| [
"root@raspberrypi.(none)"
] | root@raspberrypi.(none) |
9cc2627bf5272352630d9aa278a96054cea3167f | 1f1b62a23e9267fba41a5f9dc757d2c107d3d7c1 | /www.scxsls.com/scxsls.py | 9446e44147732c749163f9f9de14280849a508bf | [
"Apache-2.0"
] | permissive | zzh-python/all-project | 0e45e2e542b9e63f6ed080ad47725d71f1590772 | 915a47fb42d63ff3a36814992283c2f4ed8703a3 | refs/heads/master | 2020-07-24T01:47:19.924959 | 2019-09-11T08:48:17 | 2019-09-11T08:48:17 | 207,765,744 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,362 | py |
from bs4 import BeautifulSoup
import requests
import time
import datetime
import json
import re
import xlsxwriter
import os
from docx import Document
from docx.shared import Inches
cook='UM_distinctid=16822772df50-0cca678828aea6-b353461-100200-16822772e8fd5; CNZZDATA5626149=cnzz_eid%3D569227167-1546763274-%26ntime%3D1546779612'
header={
'Cookie':cook,
'Host':'m.my478.com',
'Referer':'http://m.my478.com/jishu/list/',
# 'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
}
#种植技术
def get_scxsls_link():
f=open('scxsls_link.txt','w+',encoding='utf-8')
for i in range(1,649):
print('第'+str(i)+'页')
if i ==1:
url='http://www.scxsls.com/anli/index.html'
else:
url='http://www.scxsls.com/anli/index_'+str(i)+'.html'
req=requests.get(url)
req.encoding='GB2312'
bsObj=BeautifulSoup(req.text,'html.parser')
jshu=bsObj.find_all('div',class_='cld_list')
print(len(jshu))
for div in jshu:
a=div.find('h3').find('a')
f.write(str([a.attrs['href'],a.get_text()])+'\n')
f.close()
def get_Obj(url):
for i in range(5):
try:
req=requests.get(url,headers=header)
bsObj=BeautifulSoup(req.text,'html.parser')
div=bsObj.find('ul',class_='bot_list')
li_list=div.find_all('li')
return li_list
except:
pass
if i == 4:
print(url+'失效')
return None
def get_url(type,f):
for i in range(1,1000):
print(str(i)+'页')
url='http://www.vegnet.com.cn'+str(type)+'_p'+str(i)+'.html'
p_list=get_Obj(url)
print(len(p_list))
for p in p_list:
a=p.find('a')
href=a.attrs['href']
title=a.attrs['title']
row=[href,title]
f.write(str(row)+'\n')
if len(p_list)<12:
break
def veg_get_all_link():
f=open('scxsls_line_two.txt','a+',encoding='utf-8')
for line in open('scxsls_link.txt','r',encoding='utf-8'):
line=eval(line)
get_url(line[0].replace('.html',''),f)
f.close()
def d_load(src,imgrootname):
root='./'+imgrootname
path='./'+imgrootname+'/'+src.split('/')[-1]
try:
if not os.path.exists(path):
r = requests.get(src)
r.raise_for_status()
# 使用with语句可以不用自己手动关闭已经打开的文件流
with open(path, "wb") as f: # 开始写文件,wb代表写二进制文件
f.write(r.content)
print("爬取完成")
else:
print("文件已存在")
except Exception as e:
print("爬取失败:"+str(e))
return src.split('/')[-1]
def remove_control_characters(html):
def str_to_int(s, default, base=10):
if int(s, base) < 0x10000:
return unichr(int(s, base))
return default
html = re.sub(u'&#(\d+);?' ,lambda c: str_to_int(c.group(1), c.group(0)), html)
html = re.sub(u"&#[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), html)
html = re.sub(u"[\x00-\x08\x0b\x0e-\x1f\x7f]", "", html)
return html
def get_detail():
f_error=open('scxsls_error.txt','a+',encoding='utf-8')
f_final=open('scxsls_final.txt','a+',encoding='utf-8')
wordrootname='刑事案例word'
if not os.path.exists(wordrootname):
os.mkdir(wordrootname)
r=0
for line in open('scxsls_link.txt','r',encoding='utf-8'):
line=eval(line)
if 'http' in line[0]:
url=line[0]
else:
url='http://www.scxsls.com'+line[0]
id=url.split('/')[-1].replace('.html','')
print(url)
fname=line[1].strip().replace('?','').replace('|','').replace('"','').replace('>','').replace('<','').replace('*','').replace('*','').replace('\\','').replace(':','').replace('/','').replace('\t','').replace('\r','').replace('\n','')
path1='./'+wordrootname+'/'+fname+'_'+str(id)
# print(path1)
if os.path.exists(path1+'.docx'):
print('已存在')
continue
# print(id)
doc = Document()
for i in range(3):
try:
req=requests.get(url,timeout=15)
req.encoding='GB2312'
bsObj=BeautifulSoup(req.text,'lxml')
#
content=bsObj.find('div',{'id':'news_content'})
origin_text=bsObj.find('div',{'id':'news_meta_left'}).get_text()
#写入标题
break
except:
pass
# print(origin_text)
# print(content)
# break
if content==None or '来源' not in origin_text :
print(url+'未取到数据')
f_error.write(str(line)+'\n')
continue
content_word=str(content).replace('</p','</p')
print(fname)
doc.add_heading(line[1],level=0)
origin_text=origin_text.split('\u3000')
for ori in origin_text:
if '来源' in ori:
origin_text=ori
break
try:
origin_text=re.search(r'来源:(.+?)\| 作者',origin_text).group(1)
except:
pass
if '来源' in origin_text:
pass
else:
origin_text='来源:'+origin_text
doc.add_paragraph(u'%s'%(origin_text))
print(origin_text)
# print(origin_text)
#写入文本内容
# content_word=remove_control_characters(BeautifulSoup(content_word,'html.parser').get_text())
content_word=BeautifulSoup(content_word,'html.parser').get_text()
doc.add_paragraph(u'%s'%( content_word))
doc.save(path1+'.docx')
# r=r+1
# if r==5:
# break
# break
f_error.close()
f_final.close()
get_detail()
| [
"379935132@qq.com"
] | 379935132@qq.com |
23ba4376c2e5ab6b6503f003f02cea08b743baa8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2813/60586/315241.py | 9b5ccf47b777567c25738279b88e15c540c046da | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | x=int(input())
z=[]
for i in range(x):
z.append(input())
if x==3 and z[0]=="andrew 3"and z[1]=="andrew 2":
print("andrew")
elif x==3 and z[0]=="mike 3"and z[1]=="andrew 5":
print("andrew")
elif x==15 :
print("aawtvezfntstrcpgbzjbf")
elif x==10:
print("aawtvezfntstrcpgbzjbf")
elif x==17 :
print("ivhgbxiv")
else:
print(x)
print(z) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
b61ec36b99c34c698da5c5d18d23ec69ebfac857 | 1a114943c92a5db40034470ff31a79bcf8ddfc37 | /stdlib_exam/stringio-example-3.py | b2b6aa2c19f7aec67b8f9b3e01fc7bc8b6628997 | [] | no_license | renwl/mylinux | 1924918599efd6766c266231d66b2a7ed6f6cdd1 | 0602fc6d2b0d254a8503e57310f848fc3e1a73b4 | refs/heads/master | 2020-07-10T22:12:03.259349 | 2017-01-02T12:32:04 | 2017-01-02T12:32:04 | 66,467,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import io
import string, sys
stdout = sys.stdout
sys.stdout = file = io.StringIO()
print("""
According to Gbaya folktales, trickery and guile
are the best ways to defeat the python, king of
snakes, which was hatched from a dragon at the
world's start. -- National Geographic, May 1997
""")
sys.stdout = stdout
#print(string.upper(file.getvalue()))
print((file.getvalue()).upper())
## ACCORDING TO GBAYA FOLKTALES, TRICKERY AND GUILE
## ARE THE BEST WAYS TO DEFEAT THE PYTHON, KING OF
## SNAKES, WHICH WAS HATCHED FROM A DRAGON AT THE
## WORLD'S START. -- NATIONAL GEOGRAPHIC, MAY 1997
| [
"wenliang.ren@quanray.com"
] | wenliang.ren@quanray.com |
18f7e635724110a5baed431c364fc897b6b7cbd2 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations_async/_ddos_protection_plans_operations_async.py | 317e4c9bdaa59ccbd8ccbf2b4a8fc26187651570 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 25,359 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations:
"""DdosProtectionPlansOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs
) -> None:
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs
) -> "models.DdosProtectionPlan":
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosProtectionPlan"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "models.DdosProtectionPlan",
**kwargs
) -> "models.DdosProtectionPlan":
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosProtectionPlan"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "models.DdosProtectionPlan",
**kwargs
) -> "models.DdosProtectionPlan":
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.DdosProtectionPlan
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.DdosProtectionPlan":
"""Update a DDoS protection plan tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the update DDoS protection plan resource tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosProtectionPlan"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["models.DdosProtectionPlanListResult"]:
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosProtectionPlanListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.DdosProtectionPlanListResult"]:
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosProtectionPlanListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
| [
"noreply@github.com"
] | YijunXieMS.noreply@github.com |
982d69e6df52e5a4f3c3994d1c3fe4dd3325f1ec | 90a3b42254b3df3e9ed85c650204a2a9a88efc4c | /tests/open_alchemy/helpers/test_get_ext_prop.py | 299c11353a52948474a187d400adc8f8f1838a4c | [
"Apache-2.0"
] | permissive | SkynetRTN/OpenAlchemy | 967668001ab67344acfcf7eeb91f2ae6f601db01 | 75ec91f5085508beafd20fe3472a135b87b642f1 | refs/heads/master | 2022-11-15T22:38:34.863563 | 2020-07-12T11:38:12 | 2020-07-12T11:38:12 | 278,888,084 | 0 | 0 | null | 2020-07-11T15:23:37 | 2020-07-11T15:23:36 | null | UTF-8 | Python | false | false | 13,937 | py | """Tests for ext_prop."""
import functools
import pytest
from open_alchemy import exceptions
from open_alchemy import helpers
@pytest.mark.helper
def test_miss():
"""
GIVEN empty source
WHEN get is called with the source
THEN None is returned.
"""
assert helpers.ext_prop.get(source={}, name="missing") is None
@pytest.mark.helper
def test_miss_default():
"""
GIVEN empty source and default value
WHEN get is called with the source and default value
THEN default value is returned.
"""
default = "value 1"
value = helpers.ext_prop.get(source={}, name="missing", default=default)
assert value == default
@pytest.mark.parametrize(
"name, value",
[
pytest.param("x-backref", True, id="x-backref"),
pytest.param("x-uselist", "True", id="x-uselist"),
pytest.param("x-secondary", True, id="x-secondary"),
pytest.param("x-primary-key", "True", id="x-primary-key"),
pytest.param("x-autoincrement", "True", id="x-autoincrement"),
pytest.param("x-index", "True", id="x-index"),
pytest.param("x-unique", "True", id="x-unique"),
pytest.param("x-json", "True", id="x-json"),
pytest.param("x-foreign-key", True, id="x-foreign-key invalid type"),
pytest.param("x-foreign-key", "no column", id="x-foreign-key invalid format"),
pytest.param("x-foreign-key-column", True, id="x-foreign-key-column"),
pytest.param("x-tablename", True, id="x-tablename"),
pytest.param("x-tablename", None, id="x-tablename None"),
pytest.param("x-de-$ref", True, id="x-de-$ref"),
pytest.param("x-dict-ignore", "True", id="x-dict-ignore"),
pytest.param("x-generated", "True", id="x-generated"),
pytest.param("x-inherits", 1, id="x-inherits"),
],
)
@pytest.mark.helper
def test_invalid(name, value):
"""
GIVEN property and invalid value
WHEN get is called with a source made of the property and value
THEN MalformedExtensionPropertyError is raised.
"""
source = {name: value}
with pytest.raises(exceptions.MalformedExtensionPropertyError):
helpers.ext_prop.get(source=source, name=name)
@pytest.mark.parametrize(
"name, value",
[
pytest.param("x-backref", "table 1", id="x-backref",),
pytest.param("x-uselist", True, id="x-uselist",),
pytest.param("x-secondary", "association", id="x-secondary",),
pytest.param("x-primary-key", True, id="x-primary-key",),
pytest.param("x-autoincrement", True, id="x-autoincrement",),
pytest.param("x-index", True, id="x-index",),
pytest.param("x-unique", True, id="x-unique",),
pytest.param("x-json", True, id="x-json",),
pytest.param("x-foreign-key", "table 1.column 1", id="x-foreign-key",),
pytest.param("x-foreign-key-column", "column 1", id="x-foreign-key-column",),
pytest.param("x-tablename", "table 1", id="x-tablename",),
pytest.param("x-de-$ref", "Table1", id="x-de-$ref",),
pytest.param("x-dict-ignore", True, id="x-dict-ignore",),
pytest.param("x-generated", True, id="x-generated",),
pytest.param("x-inherits", True, id="x-inherits bool",),
pytest.param("x-inherits", "Parent", id="x-inherits string",),
],
)
@pytest.mark.helper
def test_valid(name, value):
"""
GIVEN property and valid value
WHEN get is called with a source made of the property and value
THEN the value is returned.
"""
source = {name: value}
returned_value = helpers.ext_prop.get(source=source, name=name)
assert returned_value == value
@pytest.mark.helper
def test_pop():
"""
GIVEN property and valid value
WHEN geterty is called with the name, value and pop set
THEN the key is removed from the dictionary.
"""
name = "x-dict-ignore"
value = True
source = {name: value}
returned_value = helpers.ext_prop.get(source=source, name=name, pop=True)
assert returned_value == value
assert source == {}
@pytest.mark.parametrize(
"value",
["column 1", [], [[]], [None], [1], {}, {"name": 1}],
ids=[
"not object not array",
"empty list",
"empty list of list",
"list of null",
"list of not string",
"object columns missing",
"object name not string",
],
)
@pytest.mark.helper
def test_unique_constraint_invalid(value):
"""
GIVEN value for x-composite-unique that has an invalid format
WHEN get with x-composite-unique and the value
THEN MalformedExtensionPropertyError is raised.
"""
name = "x-composite-unique"
source = {name: value}
with pytest.raises(exceptions.MalformedExtensionPropertyError):
helpers.ext_prop.get(source=source, name=name)
@pytest.mark.parametrize(
"value",
[
["column 1"],
[["column 1"]],
{"columns": ["column 1"]},
{"columns": ["column 1"], "name": "name 1"},
[{"columns": ["column 1"]}],
],
ids=[
"list of string",
"list of list of string",
"object with columns",
"object with columns and name",
"list of object with columns",
],
)
@pytest.mark.helper
def test_unique_constraint_valid(value):
"""
GIVEN value for x-composite-unique that has a valid format
WHEN get with x-composite-unique and the value
THEN the value is returned.
"""
name = "x-composite-unique"
source = {name: value}
returned_value = helpers.ext_prop.get(source=source, name=name)
assert returned_value == value
@pytest.mark.parametrize(
"value",
[
"column 1",
[],
[[]],
[None],
[1],
{},
{"name": 1, "expressions": ["column 1"]},
{"expressions": ["column 1"], "unique": "true"},
],
ids=[
"not object not array",
"empty list",
"empty list of list",
"list of null",
"list of not string",
"object expressions missing",
"object name not string",
"object unique not boolean",
],
)
@pytest.mark.helper
def test_composite_index_invalid(value):
"""
GIVEN value for x-composite-index that has an invalid format
WHEN get with x-composite-index and the value
THEN MalformedExtensionPropertyError is raised.
"""
name = "x-composite-index"
source = {name: value}
with pytest.raises(exceptions.MalformedExtensionPropertyError):
helpers.ext_prop.get(source=source, name=name)
@pytest.mark.parametrize(
"value",
[
["column 1"],
[["column 1"]],
{"expressions": ["column 1"]},
{"name": "name 1", "expressions": ["column 1"]},
{"expressions": ["column 1"], "unique": True},
[{"expressions": ["column 1"]}],
],
ids=[
"list of string",
"list of list of string",
"object",
"object name",
"object unique",
"list of object",
],
)
@pytest.mark.helper
def test_composite_index_valid(value):
"""
GIVEN value for x-composite-index that has a valid format
WHEN get is called with x-composite-index and the value
THEN the value is returned.
"""
name = "x-composite-index"
source = {name: value}
returned_value = helpers.ext_prop.get(source=source, name=name)
assert returned_value == value
@pytest.mark.parametrize(
"value",
[
"RefSchema",
{"ref_schema": "RefSchema"},
{"ref_schema": {"x-de-$ref": "RefSchema"}},
{"ref_schema": {"type": "object"}},
{"ref_schema": {"type": "object", "x-de-$ref": True}},
{"ref_schema": {"type": "object", "x-de-$ref": None}},
{"ref_schema": {"type": True, "x-de-$ref": "RefSchem"}},
{"ref_schema": {"type": None, "x-de-$ref": "RefSchem"}},
{"ref_schema": {"type": "array"}},
{"ref_schema": {"type": "array", "items": {}}},
],
ids=[
"not object",
"object not of object",
"object object object type type missing",
"object object object type x-de-$ref missing",
"object object object type x-de-$ref wrong type",
"object object object type x-de-$ref null",
"object object object type type wrong type",
"object object object type type null",
"object object array type items missing",
"object object array type items empty",
],
)
@pytest.mark.helper
def test_relationship_backrefs_invalid(value):
"""
GIVEN value for x-backrefs with an invalid format
WHEN get is called with x-backrefs and the value
THEN MalformedExtensionPropertyError is raised.
"""
name = "x-backrefs"
source = {name: value}
with pytest.raises(exceptions.MalformedExtensionPropertyError):
helpers.ext_prop.get(source=source, name=name)
@pytest.mark.parametrize(
"value",
[
{},
{"ref_schema": {"type": "object", "x-de-$ref": "RefSchema"}},
{
"ref_schema": {
"type": "array",
"items": {"type": "object", "x-de-$ref": "RefSchema"},
}
},
{
"ref_schema1": {"type": "object", "x-de-$ref": "RefSchema1"},
"ref_schema2": {"type": "object", "x-de-$ref": "RefSchema2"},
},
],
ids=["empty", "single object type", "single array type", "multiple"],
)
@pytest.mark.helper
def test_relationship_backrefs_valid(value):
"""
GIVEN value for x-backrefs with a valid format
WHEN get is called with x-backrefs and the value
THEN value is returned.
"""
name = "x-backrefs"
source = {name: value}
return_value = helpers.ext_prop.get(source=source, name=name)
assert return_value == value
@pytest.mark.parametrize(
"value",
[
"value",
["value"],
{1: "value"},
{1: "value 1", 2: "value 2"},
{"key 1": "value 1", 2: "value 2"},
],
ids=[
"simple",
"array",
"object not string key",
"object multiple key none string",
"object multiple key some string",
],
)
@pytest.mark.helper
def test_kwargs_invalid(value):
"""
GIVEN value for x-kwargs that has an invalid format
WHEN get_kwargs is called with the value
THEN MalformedExtensionPropertyError is raised.
"""
name = "x-kwargs"
source = {name: value}
with pytest.raises(exceptions.MalformedExtensionPropertyError):
helpers.ext_prop.get_kwargs(source=source)
@pytest.mark.parametrize(
"value",
[
{"key": "value"},
{"key1": "value 1", "key2": "value 2"},
{"key": ["value"]},
{"key": {"sub_key": "value"}},
{"key": {1: "value"}},
],
ids=[
"simple value",
"simple value multiple keys",
"array value",
"object simple string key value",
"object simple integer key value",
],
)
@pytest.mark.helper
def test_kwargs_valid(value):
"""
GIVEN value for x-kwargs that has a valid format
WHEN get_kwargs is called with the value
THEN the value is returned.
"""
name = "x-kwargs"
source = {name: value}
returned_value = helpers.ext_prop.get_kwargs(source=source)
assert returned_value == value
@pytest.mark.helper
def test_kwargs_valid_name():
"""
GIVEN value for kwargs that has a valid format and a property name
WHEN get_kwargs is called with the value and the name
THEN the value is returned.
"""
name = "x-foreign-key-kwargs"
value = {"key": "value"}
source = {name: value}
returned_value = helpers.ext_prop.get_kwargs(source=source, name=name)
assert returned_value == value
@pytest.mark.helper
def test_kwargs_valid_missing():
"""
GIVEN empty value
WHEN get_kwargs is called with the value
THEN None is returned.
"""
source = {}
returned_value = helpers.ext_prop.get_kwargs(source=source)
assert returned_value is None
@pytest.mark.parametrize(
"reserved, value, raises",
[
(set(), {}, False),
(set(), {"key 1": "value 1"}, False),
(set(), {"key 1": "value 1", "key 2": "value 2"}, False),
({"key 1"}, {}, False),
({"key 1"}, {"key 1": "value 1"}, True),
({"key 1"}, {"key 2": "value 2"}, False),
({"key 1"}, {"key 1": "value 1", "key 2": "value 2"}, True),
({"key 1"}, {"key 2": "value 2", "key 3": "value 3"}, False),
({"key 1", "key 2"}, {}, False),
({"key 1", "key 2"}, {"key 1": "value 1"}, True),
({"key 1", "key 2"}, {"key 2": "value 2"}, True),
({"key 1", "key 2"}, {"key 3": "value 3"}, False),
],
ids=[
"empty reserved empty keys",
"empty reserved single key",
"empty reserved multiple keys",
"single reserved empty keys",
"single reserved single key hit",
"single reserved single key miss",
"single reserved multiple keys hit",
"single reserved multiple keys miss",
"multiple reserved empty keys",
"multiple reserved single key first hit",
"multiple reserved single key second hit",
"multiple reserved single key miss",
],
)
@pytest.mark.helper
def test_kwargs_reserved(reserved, value, raises):
"""
GIVEN value for x-kwargs, set of reserved keys and whether to raise
WHEN get_kwargs is called with the value and reserved keys
THEN MalformedExtensionPropertyError is raised if it is expected to raise.
"""
name = "x-kwargs"
source = {name: value}
test_func = functools.partial(
helpers.ext_prop.get_kwargs, source=source, reserved=reserved
)
if raises:
with pytest.raises(exceptions.MalformedExtensionPropertyError):
test_func()
else:
returned_value = test_func()
assert returned_value == value
| [
"anderssonpublic@gmail.com"
] | anderssonpublic@gmail.com |
ef7741d54eeee85ae5344eff0a1b128c0f983cca | ddd466457316662a1455bae429740eb3c8411444 | /intro/5_8_loop_sum_odd_even.py | c34320afd5ec784e60b484fd6a9aec17b6837f95 | [] | no_license | fingerman/python_fundamentals | 9ef46e51d6e9b8328e9c949fa0f807f30bd6e482 | 1fb604220922530d1171200a3cf3a927c028a6ed | refs/heads/master | 2023-01-09T12:02:26.712810 | 2020-01-22T16:12:32 | 2020-01-22T16:12:32 | 151,728,846 | 0 | 0 | null | 2022-12-27T15:34:12 | 2018-10-05T13:58:10 | Python | UTF-8 | Python | false | false | 374 | py | n = int(input())
sumEven = 0
sumOdd = 0
for i in range(1, n+1):
if i % 2 == 0:
c = int(input())
sumEven = sumEven + c
elif i % 2 != 0:
d = int(input())
sumOdd = sumOdd + d
if sumEven == sumOdd:
print("Yes" + "\n" + "Sum = " + str(sumOdd))
elif sumEven != sumOdd:
print("No" + "\n" + "Diff = " + str(abs(sumEven - sumOdd)))
| [
"adamov.george@gmail.com"
] | adamov.george@gmail.com |
461e91c5ae3e88520a1d32ddd42f196b9e7dcc5d | ebaa12cfa89a44f7da2fa9cc2cd8028c7536e4ed | /blog/migrations/0001_initial.py | 4945c1a237aaa3db2192ce7c6724e00778edd595 | [] | no_license | likelionskhu7th/comment | 853786dcd895ec4af3242024086e6e8f18bd2b3d | fda93337791ea865a4d55f9dd14fdf78a6b81f40 | refs/heads/master | 2020-05-20T02:45:12.901725 | 2019-05-21T11:52:55 | 2019-05-21T11:52:55 | 185,340,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # Generated by Django 2.2.1 on 2019-05-07 10:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('content', models.TextField()),
('created_at', models.DateTimeField(default=datetime.datetime(2019, 5, 7, 10, 13, 36, 147311))),
('published_at', models.DateTimeField()),
],
),
]
| [
"gustn4563@gmail.com"
] | gustn4563@gmail.com |
5812d140f1eb56e0bfedb90c21e8abd19b1945c7 | cf763c59fe8f0cafe002da68bc2aae8df3f00617 | /setup.py | 23f385b190e3e72f9cddcdce6c5457aa06bd7c82 | [
"MIT"
] | permissive | Sandip117/pl-gpu-test | b993a75f717a8059201881c2fb740acf162ce818 | f4e37259fabb282f59ca2ff974733140101bf73b | refs/heads/master | 2021-04-23T20:27:22.609481 | 2020-04-06T20:18:15 | 2020-04-06T20:18:15 | 249,995,974 | 0 | 0 | MIT | 2020-03-30T15:54:07 | 2020-03-25T14:09:13 | Python | UTF-8 | Python | false | false | 1,107 | py |
import sys
import os
# Make sure we are running python3.5+
if 10 * sys.version_info[0] + sys.version_info[1] < 35:
sys.exit("Sorry, only Python 3.5+ is supported.")
from setuptools import setup
def readme():
print("Current dir = %s" % os.getcwd())
print(os.listdir())
with open('README.rst') as f:
return f.read()
setup(
name = 'gpu_test',
# for best practices make this version the same as the VERSION class variable
# defined in your ChrisApp-derived Python class
version = '0.1',
description = 'An app to check the available GPUs',
long_description = readme(),
author = 'Sandip Samal',
author_email = 'sandip.samal@childrens.harvard.edu',
url = '...',
packages = ['gpu_test'],
install_requires = ['chrisapp', 'pudb'],
test_suite = 'nose.collector',
tests_require = ['nose'],
scripts = ['gpu_test/gpu_test.py'],
license = 'MIT',
zip_safe = False
)
| [
"rudolph.pienaar@gmail.com"
] | rudolph.pienaar@gmail.com |
1faae3c4aea51ca9985f8386fb977d6d2755dd18 | d8edd97f8f8dea3f9f02da6c40d331682bb43113 | /networks770.py | 87f6057de77bc92edac7a4ec58ca3bd15b63ce7e | [] | no_license | mdubouch/noise-gan | bdd5b2fff3aff70d5f464150443d51c2192eeafd | 639859ec4a2aa809d17eb6998a5a7d217559888a | refs/heads/master | 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,403 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of wires in the CDC
n_wires = 3606
# Number of continuous features (E, t, dca)
n_features = 3
geom_dim = 1
def wire_hook(grad):
print('wg %.2e %.2e' % (grad.abs().mean().item(), grad.std().item()))
return grad
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim):
super().__init__()
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
n512 = 512
self.lin0 = nn.Linear(latent_dims, seq_len//16*n512, bias=True)
self.bn0 = nn.BatchNorm1d(n512)
self.dropout = nn.Dropout(0.1)
self.n512 = n512
n256 = n512 // 2
n128 = n512 // 4
n64 = n512 // 8
n32 = n512 // 16
n16 = n512 // 32
class GBlock(nn.Module):
def __init__(self, in_c, out_c, k_s, stride, padding):
super().__init__()
self.bn0 = nn.BatchNorm1d(in_c)
self.conv = nn.ConvTranspose1d(in_c, out_c, k_s, stride, padding)
self.bn = nn.BatchNorm1d(out_c)
self.conv1 = nn.ConvTranspose1d(in_c + out_c, out_c, 1, 1, 0)
self.act = nn.ReLU()
def forward(self, x):
y = self.bn0(x)
y = self.conv(y)
y = self.bn(self.act(y))
x0 = F.interpolate(x, size=y.shape[2], mode='linear')
y = self.act(self.conv1(torch.cat([x0, y], dim=1)))
return y
#self.convw4 = GBlock(n512, n256, 12, 4, 4)
#self.convw3 = GBlock(n256, n128, 12, 4, 4)
#self.convw2 = GBlock(n128, n64, 3, 1, 1)
#self.convw1 = nn.Conv1d(n64, n_wires, 1, 1, 0)
self.linws = nn.Linear(seq_len // 16, seq_len)
self.linw1 = nn.Linear(n512, n512)
self.linw2 = nn.Linear(n512, n_wires)
#self.convp4 = nn.ConvTranspose1d(n512, n256, 12, 4, 4)
#self.bnp4 = nn.BatchNorm1d(n256)
#self.convp3 = nn.ConvTranspose1d(n256, n128, 12, 4, 4)
#self.bnp3 = nn.BatchNorm1d(n512+n128)
#self.convp2 = nn.ConvTranspose1d(n512+n128, n64, 3, 1, 1)
#self.bnp2 = nn.BatchNorm1d(n64)
self.convp4 = GBlock(n512, n256, 12, 4, 4)
self.convp3 = GBlock(n256, n128, 12, 4, 4)
self.convp2 = GBlock(n128, n64, 3, 1, 1)
self.convp1 = nn.Conv1d(n64, n_features, 1, 1, 0)
#self.conv1 = nn.ConvTranspose1d(n128, n128, 32, 2, 15)
#self.bn1 = nn.BatchNorm1d(n128)
#self.convw1 = nn.ConvTranspose1d(n128, n_wires, 1, 1, 0, bias=True)
#self.convp1 = nn.ConvTranspose1d(n128, n_features, 1, 1, 0)
self.out = nn.Tanh()
self.max_its = 3000
self.temp_min = 1.0
self.gen_it = 3000
def forward(self, z, wire_to_xy):
#print('latent space %.2e %.2e' % (z.mean().item(), z.std().item()))
# z: random point in latent space
x = self.act(self.lin0(z).reshape(-1, self.n512, self.seq_len // 16))
#x = self.act(self.bnu1(self.convu1(x)))
#x = self.act(self.bnu2(self.convu2(x)))
#x = self.act(self.bnu3(self.convu3(x)))
#x = self.act(self.bnu4(self.convu4(x)))
#x = self.act(self.bnu5(self.convu5(x)))
#x = self.act(self.bnu6(self.convu6(x)))
#x = self.act(self.bn1(self.conv1(x)))
w = self.act(self.linws(x))
w = self.act(self.linw1(self.dropout(w.permute(0,2,1))))
w = self.linw2(w).permute(0,2,1)
#print(w.unsqueeze(0).shape)
#print((w.unsqueeze(0) - wire_to_xy.view(n_wires, 1, geom_dim, 1)).shape)
# w: (b, 2, seq)
# wire_to_xy: (2, n_wires)
#print(wire_to_xy.unsqueeze(0).unsqueeze(2).shape)
#print(w.unsqueeze(3).shape)
#import matplotlib.pyplot as plt
#import matplotlib.lines as lines
#plt.figure()
#plt.scatter(w[:,0,:].detach().cpu(), w[:,1,:].detach().cpu(), s=1)
#_l = lines.Line2D(w[:,0,:].detach().cpu(), w[:,1,:].detach().cpu(), linewidth=0.1, color='gray', alpha=0.7)
#plt.gca().add_line(_l)
#plt.gca().set_aspect(1.0)
#plt.savefig('test.png')
#plt.close()
#import matplotlib.pyplot as plt
#plt.figure()
#plt.plot(w[0,:,0].detach().cpu())
#plt.savefig('testw.png')
#plt.close()
#wdist = torch.norm(w.unsqueeze(3) - wire_to_xy.unsqueeze(0).unsqueeze(2), dim=1)
#print(wdist.shape)
##print(1/wdist)
#plt.figure()
#plt.plot(wdist[0,0,:].detach().cpu())
#plt.savefig('test.png')
#plt.close()
#self.gen_it += 1
tau = 1. / ((1./self.temp_min)**(self.gen_it / self.max_its))
#print(tau)
wg = F.gumbel_softmax(w, dim=1, hard=True, tau=tau)
#wg = F.softmax(w, dim=1)
#print(wg.shape)
#exit(1)
#wg.register_hook(wire_hook)
#xy = torch.tensordot(wg, wire_to_xy, dims=[[1],[1]]).permute(0,2,1)
#p = self.act(self.bnp4(self.convp4(self.act(x))))
#p = self.convp3(p)
#p = torch.cat([p, F.interpolate(x, size=p.shape[2])], dim=1)
#p = self.act(self.bnp3(p))
#p = self.act(self.bnp2(self.convp2(p)))
p = self.convp4(x)
p = self.convp3(p)
p = self.convp2(p)
p = self.convp1(p)
#return torch.cat([self.out(p), xy], dim=1), wg
return self.out(p), wg
def xy_hook(grad):
print('xy %.2e %.2e' % (grad.abs().mean().item(), grad.std().item()))
return grad
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
n512 = 512
n256 = n512 // 2
n128 = n256 // 2
n64 = n128 // 2
self.conv0 = nn.Conv1d(geom_dim, n64, 3, 1, 1)
self.lin1 = nn.Linear(n64*seq_len, n512)
self.lin2 = nn.Linear(n512, n256)
self.lin3 = nn.Linear(n256, n128)
self.lin4 = nn.Linear(n128, 1)
#self.conv0 = nn.Conv1d(geom_dim, n512, 1, 1, 0)
#self.lin1 = nn.Linear(n512, n256)
#self.lin2 = nn.Linear(n256, n128)
##self.conv1 = nn.Conv1d(n64, n128, 17, 2, 8, padding_mode='circular')
##self.conv2 = nn.Conv1d(n128, n256, 9, 2, 4, padding_mode='circular')
##self.conv3 = nn.Conv1d(n256, n512, 5, 2, 2, padding_mode='circular')
##self.conv4 = nn.Conv1d(n512, n512, 5, 4, 2, padding_mode='circular')
##self.conv5 = nn.Conv1d(n512, n512, 5, 4, 2, padding_mode='circular')
#self.dropout=nn.Dropout(0.1)
#self.lin3 = nn.Linear(n128, 1)
#self.conv1 = nn.Conv1d(n64*1, n128, 3, 3, 1)
#self.db1 = DBlock(n256)
#self.db2 = DBlock(n256)
#self.db3 = DBlock(n256)
#self.conv2 = nn.Conv1d(256, 512, 3, 2, 1)
#self.conv3 = nn.Conv1d(512, 1024, 3, 2, 1)
#self.conv4 = nn.Conv1d(1024, 2048, 3, 2, 1)
#self.lin0 = nn.Linear(256 * seq_len // 1, 1, bias=True)
#self.lin0 = nn.Linear(seq_len//4*512, 1)
#self.convf = nn.utils.spectral_norm(nn.Conv1d(n512, 1, 3, 1, 1, padding_mode='circular'))
#self.lin0 = nn.Linear(n128, 1)
#self.lin0 = nn.utils.spectral_norm(nn.Linear(n512*seq_len//32, 128))
#self.lin1 = nn.utils.spectral_norm(nn.Linear(128, 1))
self.out = nn.Identity()
def forward(self, x_, w_, wire_sphere):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
x = x_
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x
#xy = x[:,n_features:n_features+geom_dim]
wg = w_
#pxy = x[:,:n_features+geom_dim]
#print(wire0)
#print('mean %.2e %.2e' % (p.mean().item(), xy.mean().item()))
#print('std %.2e %.2e' % (p.std().item(), xy.std().item()))
#print('xy1 %.2e %.2e' % (xy.mean().item(), xy.std().item()))
#print('p %.2e %.2e' %( p.abs().mean().item(), p.std().item()))
#print('xy %.2e %.2e' %( xy.abs().mean().item(), xy.std().item()))
#print('xy2 %.2e %.2e' % (xy.mean().item(), xy.std().item()))
#x = torch.cat([p, xy], dim=1)
#w0 = self.convw0(wg)
#xy = torch.tensordot(wg, wire_sphere, dims=[[1], [1]]).permute(0,2,1)
xy = w_
x = xy
x = self.act(self.conv0(x))
#x0 = torch.cat([x0 , w0], dim=1)
#x0 = w0
x = self.act(self.lin1(x.flatten(1,2)))
x = self.act(self.lin2(x))
x = self.act(self.lin3(x))
x = self.lin4(x).squeeze(1)
#x = self.lin0(x.mean(dim=1))
return self.out(x)#.squeeze(1)
class VAE(nn.Module):
def __init__(self, encoded_dim):
super().__init__()
class Enc(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.LeakyReLU(0.2)
self.lin1 = nn.Linear(n_wires, hidden_size)
self.lin2 = nn.Linear(hidden_size, encoded_dim)
self.out = nn.Tanh()
def forward(self, x):
x = self.act(self.lin1(x))
return self.out(self.lin2(x))
class Dec(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.ReLU()
self.lin1 = nn.Linear(encoded_dim, hidden_size)
self.lin2 = nn.Linear(hidden_size, n_wires)
def forward(self, x):
x = self.act(self.lin1(x))
return self.lin2(x)
self.enc_net = Enc(512)
self.dec_net = Dec(512)
def enc(self, x):
return self.enc_net(x.permute(0, 2, 1)).permute(0,2,1)
def dec(self, x):
return self.dec_net(x.permute(0, 2, 1)).permute(0,2,1)
def forward(self, x):
y = self.dec_net(self.enc_net(x))
return y
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
| [
"m.dubouchet18@imperial.ac.uk"
] | m.dubouchet18@imperial.ac.uk |
df39650fa8bcc5df083b819e4b7b1060a76cf046 | 25970b0796082ed43e7662834b613e651fdcf648 | /0427/either/issue/views.py | 4510d6ba5d4782c633612faa678547e5888f8628 | [] | no_license | ttppggnnss/django_practice | 41668c6a5debced09ad999b68fc2ce2a84c4ef55 | 737e9a706688853bcfc21162ec815c103ca8e5eb | refs/heads/master | 2022-12-14T13:23:10.805575 | 2020-09-07T05:52:41 | 2020-09-07T05:52:41 | 293,249,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.views.decorators.http import require_POST
from .models import Issue, Reply
from .forms import IssueForm, ReplyForm
def index(request):
issues = Issue.objects.order_by('-pk')
context = {
'issues':issues,
}
return render(request, 'issue/index.html', context)
def detail(request, issue_pk):
issue = get_object_or_404(Issue, pk=issue_pk)
form = ReplyForm()
context = {
'issue':issue,
'form':form,
}
return render(request, 'issue/detail.html', context)
def create(request):
if request.method == 'POST':
form = IssueForm(request.POST)
if form.is_valid():
issue = form.save(commit=False)
issue.hitcountA=0
issue.hitcountB=0
issue.save()
messages.info(request, '이슈가 작성되었습니다.')
return redirect('issue:detail', issue.pk)
else:
form = IssueForm()
context = {
'form':form,
}
return render(request, 'issue/form.html', context)
def reply(request,issue_pk):
issue = get_object_or_404(Issue, pk=issue_pk)
form = ReplyForm(request.POST)
if form.is_valid():
reply = form.save(commit=False)
if reply.pick=='A':
issue.hitcountA+=1
else:
issue.hitcountB+=1
issue.save()
reply.issue = issue
reply.save()
messages.info(request, '의견이 반영되었습니다.')
return redirect('issue:detail', issue.pk)
def random(request):
issues=Issue.objects.all()
import random
issue=random.choice(issues)
return redirect('issue:detail',issue.pk) | [
"kimsae123@naver.com"
] | kimsae123@naver.com |
e43253a3f7cdcbde46879df0ba839aebfd0da1fb | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Uranium/tests/TestTrust.py | 957cf8e23e489144c14580cb9b46630ce0356987 | [
"LGPL-3.0-only",
"GPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 4,822 | py | import copy
from unittest.mock import patch, MagicMock
import pytest
import os
import random
import tempfile
from UM.Trust import TrustBasics, Trust
from scripts.signfile import signFile
from scripts.signfolder import signFolder
_folder_names = ["a", "b"]
_subfolder_names = ["sub", "."]
_file_names = ["x.txt", "y.txt", "z.txt"]
_passphrase = "swordfish" # For code coverage: Securely storing a private key without one is probably better.
class TestTrust:
# NOTE: Exhaustively testing trust is going to be difficult. We rely on audits (as well) in this matter.
@pytest.fixture()
def init_trust(self):
# create a temporary directory and save a test key-pair to it:
temp_dir = tempfile.TemporaryDirectory()
temp_path = temp_dir.name
private_key, public_key = TrustBasics.generateNewKeyPair()
private_path = os.path.join(temp_path, "test_private_key.pem")
public_path = os.path.join(temp_path, "test_public_key.pem")
TrustBasics.saveKeyPair(private_key, private_path, public_path, _passphrase)
# create random files:
all_paths = [os.path.abspath(os.path.join(temp_path, x, y, z))
for x in _folder_names for y in _subfolder_names for z in _file_names]
for path in all_paths:
folder_path = os.path.dirname(path)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
with open(path, "w") as file:
file.write("".join(random.choice(['a', 'b', 'c', '0', '1', '2', '\n']) for _ in range(1024)))
# instantiate a trust object with the public key that was just generated:
trust = Trust(public_path) # Don't use Trust.getInstance as that uses the 'normal' public key instead of test.
yield temp_path, private_path, trust
temp_dir.cleanup()
def test_signFileAndVerify(self, init_trust):
temp_dir, private_path, trust_instance = init_trust
filepath_signed = os.path.join(temp_dir, _folder_names[0], _subfolder_names[0], _file_names[0])
filepath_unsigned = os.path.join(temp_dir, _folder_names[1], _subfolder_names[0], _file_names[2])
assert signFile(private_path, filepath_signed, _passphrase)
assert trust_instance.signedFileCheck(filepath_signed)
assert not trust_instance.signedFileCheck(filepath_unsigned)
assert not trust_instance.signedFileCheck("file-not-found-check")
public_key = copy.copy(trust_instance._public_key)
trust_instance._public_key = None
assert not trust_instance.signedFileCheck(filepath_signed)
trust_instance._public_key = public_key
with open(filepath_signed, "w") as file:
file.write("\nPay 10 Golden Talents To Get Your Data Back Or Else\n")
assert not trust_instance.signedFolderCheck(filepath_signed)
os.remove(filepath_signed)
assert not trust_instance.signedFolderCheck(filepath_signed)
def test_signFolderAndVerify(self, init_trust):
temp_dir, private_path, trust_instance = init_trust
folderpath_signed = os.path.join(temp_dir, _folder_names[0])
folderpath_unsigned = os.path.join(temp_dir, _folder_names[1])
assert signFolder(private_path, folderpath_signed, [], _passphrase)
assert trust_instance.signedFolderCheck(folderpath_signed)
assert not trust_instance.signedFolderCheck(folderpath_unsigned)
assert not trust_instance.signedFileCheck("folder-not-found-check")
public_key = copy.copy(trust_instance._public_key)
trust_instance._public_key = None
assert not trust_instance.signedFolderCheck(folderpath_signed)
trust_instance._public_key = public_key
filepath = os.path.join(folderpath_signed, _subfolder_names[0], _file_names[1])
with open(filepath, "w") as file:
file.write("\nAlice and Bob will never notice this! Hehehehe.\n")
assert not trust_instance.signedFolderCheck(folderpath_signed)
os.remove(filepath)
assert not trust_instance.signedFolderCheck(folderpath_signed)
def test_initTrustFail(self):
with pytest.raises(Exception):
Trust("key-not-found")
with pytest.raises(Exception):
Trust.getInstance()
assert Trust.getInstanceOrNone() is None
def test_keyIOFails(self):
private_key, public_key = TrustBasics.generateNewKeyPair()
assert not TrustBasics.saveKeyPair(private_key, public_key, "file-not-found", _passphrase)
assert TrustBasics.loadPrivateKey("key-not-found", _passphrase) is None
def test_signNonexisting(self):
private_key, public_key = TrustBasics.generateNewKeyPair()
assert TrustBasics.getFileSignature("file-not-found", private_key) is None
| [
"t106360212@ntut.org.tw"
] | t106360212@ntut.org.tw |
330d78a566adabb9d6ed5fb63fdf38dd9679a5b0 | e65ae5bd9ae1c93e7117e630f7340bc73aa71212 | /lib/database/mongodb/tools/clean.py | 9c260e3a71b08cbd719f369f0034eac8bf867378 | [
"Apache-2.0"
] | permissive | nadirhamid/oneline | e98ff1ed81da0536f9602ecdde2fb2a4fe80d256 | 833ebef0e26ae8e0cc452756381227746d830b23 | refs/heads/master | 2021-01-21T04:27:41.715047 | 2016-05-30T03:50:34 | 2016-05-30T03:50:34 | 23,320,578 | 1 | 2 | NOASSERTION | 2020-03-12T17:22:24 | 2014-08-25T16:29:36 | Python | UTF-8 | Python | false | false | 1,112 | py | # Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clean up script for build artifacts.
Only really intended to be used by internal build scripts.
"""
import os
import sys
try:
os.remove("pymongo/_cmessage.so")
os.remove("bson/_cbson.so")
except:
pass
try:
os.remove("pymongo/_cmessage.pyd")
os.remove("bson/_cbson.pyd")
except:
pass
try:
from pymongo import _cmessage
sys.exit("could still import _cmessage")
except ImportError:
pass
try:
from bson import _cbson
sys.exit("could still import _cbson")
except ImportError:
pass
| [
"matrix.nad@gmail.com"
] | matrix.nad@gmail.com |
978556195551870174e1acfb634337587249570f | db3126a082b5b0d11bc3ea8c5b439a45d059909f | /pipenv/vendor/pythonfinder/models/pyenv.py | 1595a963a78bffe2f945c8f48a02d83113cedb09 | [
"MIT",
"BSD-3-Clause"
] | permissive | omkar-dsd/pipenv | 543da2f35246cf3004b1b27079e61c7f90c52cb4 | 810611d3c0205b6251a0d8c6501b3d4b160a4737 | refs/heads/master | 2020-04-04T16:37:14.988138 | 2018-11-04T07:27:11 | 2018-11-04T07:27:11 | 156,085,423 | 1 | 0 | null | 2018-11-04T13:33:08 | 2018-11-04T13:33:08 | null | UTF-8 | Python | false | false | 7,938 | py | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import logging
import operator
from collections import defaultdict
import attr
import sysconfig
from vistir.compat import Path
from ..utils import (
ensure_path,
optional_instance_of,
get_python_version,
filter_pythons,
unnest,
)
from .mixins import BaseFinder, BasePath
from .path import SystemPath, PathEntry
from .python import PythonVersion
logger = logging.getLogger(__name__)
@attr.s
class PyenvFinder(BaseFinder, BasePath):
root = attr.ib(default=None, validator=optional_instance_of(Path))
#: ignore_unsupported should come before versions, because its value is used
#: in versions's default initializer.
ignore_unsupported = attr.ib(default=True)
paths = attr.ib(default=attr.Factory(list))
roots = attr.ib(default=attr.Factory(defaultdict))
versions = attr.ib()
pythons = attr.ib()
@property
def expanded_paths(self):
return (
path for path in unnest(p for p in self.versions.values())
if path is not None
)
@classmethod
def version_from_bin_dir(cls, base_dir, name=None):
py_version = None
version_path = PathEntry.create(
path=base_dir.absolute().as_posix(),
only_python=True,
name=base_dir.parent.name,
)
py_version = next(iter(version_path.find_all_python_versions()), None)
return py_version
@versions.default
def get_versions(self):
versions = defaultdict()
bin_ = sysconfig._INSTALL_SCHEMES[sysconfig._get_default_scheme()]["scripts"]
for p in self.root.glob("versions/*"):
if p.parent.name == "envs" or p.name == "envs":
continue
bin_dir = Path(bin_.format(base=p.as_posix()))
version_path = None
if bin_dir.exists():
version_path = PathEntry.create(
path=bin_dir.absolute().as_posix(),
only_python=False,
name=p.name,
is_root=True,
)
version = None
try:
version = PythonVersion.parse(p.name)
except ValueError:
entry = next(iter(version_path.find_all_python_versions()), None)
if not entry:
if self.ignore_unsupported:
continue
raise
else:
version = entry.py_version.as_dict()
except Exception:
if not self.ignore_unsupported:
raise
logger.warning(
"Unsupported Python version %r, ignoring...", p.name, exc_info=True
)
continue
if not version:
continue
version_tuple = (
version.get("major"),
version.get("minor"),
version.get("patch"),
version.get("is_prerelease"),
version.get("is_devrelease"),
version.get("is_debug"),
)
self.roots[p] = version_path
versions[version_tuple] = version_path
self.paths.append(version_path)
return versions
@pythons.default
def get_pythons(self):
pythons = defaultdict()
for p in self.paths:
pythons.update(p.pythons)
return pythons
@classmethod
def create(cls, root, ignore_unsupported=True):
root = ensure_path(root)
return cls(root=root, ignore_unsupported=ignore_unsupported)
def find_all_python_versions(
self,
major=None,
minor=None,
patch=None,
pre=None,
dev=None,
arch=None,
name=None,
):
"""Search for a specific python version on the path. Return all copies
:param major: Major python version to search for.
:type major: int
:param int minor: Minor python version to search for, defaults to None
:param int patch: Patch python version to search for, defaults to None
:param bool pre: Search for prereleases (default None) - prioritize releases if None
:param bool dev: Search for devreleases (default None) - prioritize releases if None
:param str arch: Architecture to include, e.g. '64bit', defaults to None
:param str name: The name of a python version, e.g. ``anaconda3-5.3.0``
:return: A list of :class:`~pythonfinder.models.PathEntry` instances matching the version requested.
:rtype: List[:class:`~pythonfinder.models.PathEntry`]
"""
version_matcher = operator.methodcaller(
"matches",
major=major,
minor=minor,
patch=patch,
pre=pre,
dev=dev,
arch=arch,
name=name,
)
py = operator.attrgetter("as_python")
pythons = (
py_ver for py_ver in (py(p) for p in self.pythons.values() if p is not None)
if py_ver is not None
)
# pythons = filter(None, [p.as_python for p in self.pythons.values()])
matching_versions = filter(lambda py: version_matcher(py), pythons)
version_sort = operator.attrgetter("version_sort")
return sorted(matching_versions, key=version_sort, reverse=True)
def find_python_version(
self,
major=None,
minor=None,
patch=None,
pre=None,
dev=None,
arch=None,
name=None,
):
"""Search or self for the specified Python version and return the first match.
:param major: Major version number.
:type major: int
:param int minor: Minor python version to search for, defaults to None
:param int patch: Patch python version to search for, defaults to None
:param bool pre: Search for prereleases (default None) - prioritize releases if None
:param bool dev: Search for devreleases (default None) - prioritize releases if None
:param str arch: Architecture to include, e.g. '64bit', defaults to None
:param str name: The name of a python version, e.g. ``anaconda3-5.3.0``
:returns: A :class:`~pythonfinder.models.PathEntry` instance matching the version requested.
"""
version_matcher = operator.methodcaller(
"matches",
major=major,
minor=minor,
patch=patch,
pre=pre,
dev=dev,
arch=arch,
name=name,
)
pythons = filter(None, [p.as_python for p in self.pythons.values()])
matching_versions = filter(lambda py: version_matcher(py), pythons)
version_sort = operator.attrgetter("version_sort")
return next(iter(c for c in sorted(matching_versions, key=version_sort, reverse=True)), None)
@attr.s
class VersionPath(SystemPath):
base = attr.ib(default=None, validator=optional_instance_of(Path))
name = attr.ib(default=None)
@classmethod
def create(cls, path, only_python=True, pythons=None, name=None):
"""Accepts a path to a base python version directory.
Generates the pyenv version listings for it"""
path = ensure_path(path)
path_entries = defaultdict(PathEntry)
bin_ = sysconfig._INSTALL_SCHEMES[sysconfig._get_default_scheme()]["scripts"]
if path.as_posix().endswith(Path(bin_).name):
path = path.parent
bin_dir = ensure_path(bin_.format(base=path.as_posix()))
if not name:
name = path.name
current_entry = PathEntry.create(
bin_dir, is_root=True, only_python=True, pythons=pythons, name=name
)
path_entries[bin_dir.as_posix()] = current_entry
return cls(name=name, base=bin_dir, paths=path_entries)
| [
"dan@danryan.co"
] | dan@danryan.co |
a39a8d7424d5238f93f227379926bbe709bff466 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03042/s778355065.py | 4932b1ed9cf31ae9fbee443f2bfdab9f70f39a21 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | s = input()
head = int(s[:2])
tail = int(s[2:])
is_mmyy = False
is_yymm = False
if 1 <= head <= 12:
is_mmyy = True
if 1 <= tail <= 12:
is_yymm = True
if is_yymm and is_mmyy:
print('AMBIGUOUS')
elif is_yymm:
print('YYMM')
elif is_mmyy:
print('MMYY')
else:
print('NA')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fa41ee39285b9f185a32f8833a5831870b294041 | 5bf1dca7bcbbaca219ff6ab31777fba0212aee5a | /bugs/migrations/0117_auto_20190511_0029.py | 631a3f8f9c1f5e463b3fea118959c34a2926f269 | [] | no_license | Code-Institute-Submissions/Nordlander | d8b5935b3e701fae1ae785043988103602a24bc3 | 6400e424c3dc9ae41acc6e8a4684d33ed01a94a3 | refs/heads/master | 2020-05-24T13:46:41.472340 | 2019-05-18T00:13:40 | 2019-05-18T00:13:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-11 00:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bugs', '0116_auto_20190511_0020'),
]
operations = [
migrations.AlterField(
model_name='bugs',
name='status',
field=models.CharField(choices=[('Fixed', 'Fixed'), ('Doing', 'Doing'), ('To do', 'To do')], default='To do', max_length=50),
),
migrations.AlterField(
model_name='bugs',
name='type',
field=models.CharField(choices=[('Items', 'Items'), ('Worlds', 'Worlds'), ('Base game', 'Base game'), ('Quests', 'Quests'), ('Skills', 'Skills')], default='Base game', max_length=50),
),
]
| [
"brookkynaston@live.fr"
] | brookkynaston@live.fr |
3600ecac625a77bb66845f500fb757fbeb2f6d6f | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/2and3/google/protobuf/api_pb2.pyi | 36468780e0e5691d1e7758c0318c5b7de10aab6b | [
"MIT",
"Apache-2.0"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 2,266 | pyi | from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message,
)
from google.protobuf.source_context_pb2 import (
SourceContext,
)
from google.protobuf.type_pb2 import (
Option,
Syntax,
)
from typing import (
Iterable,
Optional,
Text,
)
class Api(Message):
name: Text
version: Text
syntax: Syntax
@property
def methods(self) -> RepeatedCompositeFieldContainer[Method]: ...
@property
def options(self) -> RepeatedCompositeFieldContainer[Option]: ...
@property
def source_context(self) -> SourceContext: ...
@property
def mixins(self) -> RepeatedCompositeFieldContainer[Mixin]: ...
def __init__(self,
name: Optional[Text] = ...,
methods: Optional[Iterable[Method]] = ...,
options: Optional[Iterable[Option]] = ...,
version: Optional[Text] = ...,
source_context: Optional[SourceContext] = ...,
mixins: Optional[Iterable[Mixin]] = ...,
syntax: Optional[Syntax] = ...,
) -> None: ...
@classmethod
def FromString(cls, s: bytes) -> Api: ...
class Method(Message):
name: Text
request_type_url: Text
request_streaming: bool
response_type_url: Text
response_streaming: bool
syntax: Syntax
@property
def options(self) -> RepeatedCompositeFieldContainer[Option]: ...
def __init__(self,
name: Optional[Text] = ...,
request_type_url: Optional[Text] = ...,
request_streaming: Optional[bool] = ...,
response_type_url: Optional[Text] = ...,
response_streaming: Optional[bool] = ...,
options: Optional[Iterable[Option]] = ...,
syntax: Optional[Syntax] = ...,
) -> None: ...
@classmethod
def FromString(cls, s: bytes) -> Method: ...
class Mixin(Message):
name: Text
root: Text
def __init__(self,
name: Optional[Text] = ...,
root: Optional[Text] = ...,
) -> None: ...
@classmethod
def FromString(cls, s: bytes) -> Mixin: ...
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
d06ea1986f9e77cd18b88918ff0a489b6d7fa2af | 03b4b71e5a73288ffa7453ed5ccaf8e53057da79 | /MassDelete.py | 667047346a23660016c840c9ccb74b454af1a28b | [] | no_license | M4cs/MassDelete-Telegram-Discord | b25035dc98d08ec3ec96439186263bece7ec0760 | 37bc6465504025adb8b640d485585c5b1fd36964 | refs/heads/master | 2020-03-28T20:48:33.523481 | 2018-09-17T09:54:19 | 2018-09-17T09:54:19 | 149,103,991 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | #!usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys, time, psutil, win32com.client
def main():
print("""
Mass Discord Message Deleter by Macs
Delete Messages In Any Chat In Discord or Telegram Windows App.
With This You Will Be Able To Automate Deleting of Personal
Messages Without The Use Of A Selfbot.
Enjoy :)
""")
print("[?] How Many Messages Would You Like To Delete? [?]")
num_of_msgs = input("[Int]» ")
num_of_msgsq = int(num_of_msgs)
os.system("cls")
print("[?] How Quickly Would You Like To Delete? [?]")
speed = input("[Fast, Medium, Slow]» ")
if speed == "Fast" or "fast":
speedq = 0.1
elif speed == "Medium" or "medium":
speedq = 0.5
elif speed == "Slow" or "slow":
speedq = 1
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 5 Seconds... \ [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 4 Seconds... - [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 3 Seconds... / [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 2 Seconds... | [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 1 Seconds... \ [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Starting to Delete " + num_of_msgs + " Number of Messages [!]")
try:
count = 0
while count <= int(num_of_msgsq):
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys("{UP}")
time.sleep(speedq)
shell.SendKeys("^a")
time.sleep(speedq)
shell.SendKeys("{DELETE}")
time.sleep(speedq)
shell.SendKeys("{ENTER}")
time.sleep(speedq)
shell.SendKeys("{ENTER}")
time.sleep(speedq)
count = count + 1
except KeyboardInterrupt:
exit()
os.system("cls")
print("[!] Completed! The Evidence Has Been Destroyed. " + num_of_msgsq + " Messages Deleted Successfully! [!]")
main()
| [
"maxlikescs@gmail.com"
] | maxlikescs@gmail.com |
9b07931d123e3b125f28211b5515cfcfdd722c0c | d93768f381014faf371fe2a7e1b4752f8fb1ac28 | /backend/berry_26570/settings.py | ad0db43e6bccbdeb593d964cce355a9df91cc193 | [] | no_license | crowdbotics-apps/berry-26570 | 5effa97498aabc47bdc3ff4315e4b8db9d7f90d4 | 6d262c59ac8c0697ec11424ae8bf9efdb96b36d9 | refs/heads/master | 2023-04-30T17:27:08.418542 | 2021-05-11T20:01:06 | 2021-05-11T20:01:06 | 366,499,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,099 | py | """
Django settings for berry_26570 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'berry_26570.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'berry_26570.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9dee772e81806a8fd6e131d2f2364f1a18d3ebc6 | 2f74c4d2e5c6dd51eb3eaf0ee4b97122b26e7066 | /unit_02/04_object-oriented/2-Inheritance/rpg/characters.py | b1a8ceecbd20dd879227f7f8fd3ffe0936fbd126 | [
"MIT"
] | permissive | duliodenis/python_master_degree | c6a4ccf5d98c48cfc1efd29dfc116bf55b6b4f01 | 3ab76838ce2fc1606f28e988a3273dd27122a621 | refs/heads/master | 2020-04-14T09:03:51.863305 | 2019-07-22T23:05:19 | 2019-07-22T23:05:19 | 163,751,089 | 21 | 5 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | class Character:
def __init__(self, name="", **kwargs):
if not name:
raise ValueError("'name' is required")
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
| [
"dulio.denis@yahoo.com"
] | dulio.denis@yahoo.com |
f47f9093f017659333e6273e7216cae6b7c19062 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/agc012/B/4831396.py | 17e998546c2da875090f921a4089fb8b0487c03a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | n, m = map(int, input().split())
g = {}
used = {i:[False for _ in range(11)] for i in range(n)}
color = [0 for _ in range(n)]
for _ in range(m):
u, v = map(int, input().split())
u-=1
v-=1
if u not in g:
g[u] = []
if v not in g:
g[v] = []
g[u].append(v)
g[v].append(u)
q = int(input())
Q = []
for _ in range(q):
node, dis, col = map(int, input().split())
Q.append([node-1, dis, col])
Q = Q[::-1]
def bfs(now, dist, col):
if dist < 0:
return
if used[now][dist]:
return
used[now][dist] = True
if not color[now]:
color[now] = col
if now in g:
for x in g[now]:
bfs(x, dist - 1, col)
for node, dis, col in Q:
bfs(node, dis, col)
for x in color:
print(x) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
e618b47cc8e5a58686cbf86115b424cdba8a9a32 | 7fd0c4608e32c53fea935ac63cacf66e1a0c971d | /Canonical_Monojet/AxialModel/DMsimp_s_spin1_1750_100_801/parameters.py | daa10ce68a7dcb62e225b3741b6266ecac4d16ac | [] | no_license | Quantumapple/MadGraph5_cards | 285f8a303b04b9745abfc83f5ea4fb06a2922fc9 | 3db368ada01f59bace11b48eab2f58ab40ba29f2 | refs/heads/master | 2020-05-02T20:43:23.791641 | 2020-01-17T16:10:46 | 2020-01-17T16:10:46 | 178,199,838 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,303 | py | # This file was automatically created by FeynRules 2.3.7
# Mathematica version: 9.0 for Linux x86 (64-bit) (November 20, 2012)
# Date: Mon 24 Aug 2015 13:37:17
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# This is a default parameter object representing the renormalization scale (MU_R).
MU_R = Parameter(name = 'MU_R',
nature = 'external',
type = 'real',
value = 91.188,
texname = '\\text{\\mu_r}',
lhablock = 'LOOP',
lhacode = [1])
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
gVXc = Parameter(name = 'gVXc',
nature = 'external',
type = 'real',
value = 0.,
texname = 'g_{\\text{VXc}}',
lhablock = 'DMINPUTS',
lhacode = [ 1 ])
gVXd = Parameter(name = 'gVXd',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{VXd}}',
lhablock = 'DMINPUTS',
lhacode = [ 2 ])
gAXd = Parameter(name = 'gAXd',
nature = 'external',
type = 'real',
value = 0.9999999,
texname = 'g_{\\text{AXd}}',
lhablock = 'DMINPUTS',
lhacode = [ 3 ])
gVd11 = Parameter(name = 'gVd11',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Vd11}}',
lhablock = 'DMINPUTS',
lhacode = [ 4 ])
gVu11 = Parameter(name = 'gVu11',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Vu11}}',
lhablock = 'DMINPUTS',
lhacode = [ 5 ])
gVd22 = Parameter(name = 'gVd22',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Vd22}}',
lhablock = 'DMINPUTS',
lhacode = [ 6 ])
gVu22 = Parameter(name = 'gVu22',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Vu22}}',
lhablock = 'DMINPUTS',
lhacode = [ 7 ])
gVd33 = Parameter(name = 'gVd33',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Vd33}}',
lhablock = 'DMINPUTS',
lhacode = [ 8 ])
gVu33 = Parameter(name = 'gVu33',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Vu33}}',
lhablock = 'DMINPUTS',
lhacode = [ 9 ])
gAd11 = Parameter(name = 'gAd11',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Ad11}}',
lhablock = 'DMINPUTS',
lhacode = [ 10 ])
gAu11 = Parameter(name = 'gAu11',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Au11}}',
lhablock = 'DMINPUTS',
lhacode = [ 11 ])
gAd22 = Parameter(name = 'gAd22',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Ad22}}',
lhablock = 'DMINPUTS',
lhacode = [ 12 ])
gAu22 = Parameter(name = 'gAu22',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Au22}}',
lhablock = 'DMINPUTS',
lhacode = [ 13 ])
gAd33 = Parameter(name = 'gAd33',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Ad33}}',
lhablock = 'DMINPUTS',
lhacode = [ 14 ])
gAu33 = Parameter(name = 'gAu33',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Au33}}',
lhablock = 'DMINPUTS',
lhacode = [ 15 ])
gVh = Parameter(name = 'gVh',
nature = 'external',
type = 'real',
value = 0.,
texname = 'g_{\\text{Vh}}',
lhablock = 'DMINPUTS',
lhacode = [ 16 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\alpha _s',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
MXr = Parameter(name = 'MXr',
nature = 'external',
type = 'real',
value = 10.,
texname = '\\text{MXr}',
lhablock = 'MASS',
lhacode = [ 5000001 ])
MXc = Parameter(name = 'MXc',
nature = 'external',
type = 'real',
value = 10.,
texname = '\\text{MXc}',
lhablock = 'MASS',
lhacode = [ 51 ])
MXd = Parameter(name = 'MXd',
nature = 'external',
type = 'real',
value = 100.0,
texname = '\\text{MXd}',
lhablock = 'MASS',
lhacode = [ 9100012 ])
MY1 = Parameter(name = 'MY1',
nature = 'external',
type = 'real',
value = 1750,
texname = '\\text{MY1}',
lhablock = 'MASS',
lhacode = [ 55 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00407,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
#WY1 = Parameter(name = 'WY1',
# nature = 'external',
# type = 'real',
# value = 10.,
# texname = '\\text{WY1}',
# lhablock = 'DECAY',
# lhacode = [ 55 ])
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\alpha _{\\text{EW}}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = '\\text{vev}')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*vev**2)',
texname = '\\text{lam}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vev',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vev',
texname = '\\text{ytau}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*vev**2)',
texname = '\\mu')
I2a33 = Parameter(name = 'I2a33',
nature = 'internal',
type = 'complex',
value = 'yt',
texname = '\\text{I2a33}')
I3a33 = Parameter(name = 'I3a33',
nature = 'internal',
type = 'complex',
value = 'yt',
texname = '\\text{I3a33}')
MFU = Parameter(name = 'MFU',
nature = 'internal',
type = 'real',
value = '0.002550',
texname = '\\text{MFU}')
MFC = Parameter(name = 'MFC',
nature = 'internal',
type = 'real',
value = '1.27',
texname = '\\text{MFC}')
MFD = Parameter(name = 'MFD',
nature = 'internal',
type = 'real',
value = '0.00504',
texname = '\\text{MFD}')
MFS = Parameter(name = 'MFS',
nature = 'internal',
type = 'real',
value = '0.101',
texname = '\\text{MFS}')
MFB = Parameter(name = 'MFB',
nature = 'internal',
type = 'real',
value = '4.7',
texname = '\\text{MFB}')
# vector, 1411.0535
WVuu = Parameter(name = 'WVuu',
nature = 'internal',
type = 'real',
value = '((gVd11**2)*(MY1**2 + 2*MFU**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFU**2/MY1**2))',
texname = '\\text{WVuu}')
WVcc = Parameter(name = 'WVcc',
nature = 'internal',
type = 'real',
value = '((gVd22**2)*(MY1**2 + 2*MFC**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFC**2/MY1**2))',
texname = '\\text{WVcc}')
WVtt = Parameter(name = 'WVtt',
nature = 'internal',
type = 'real',
value = '((gVd33**2)*(MY1**2 + 2*MT**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MT**2/MY1**2),0.01))',
texname = '\\text{WVtt}')
WVdd = Parameter(name = 'WVdd',
nature = 'internal',
type = 'real',
value = '((gVd11**2)*(MY1**2 + 2*MFD**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFD**2/MY1**2))',
texname = '\\text{WVdd}')
WVss = Parameter(name = 'WVss',
nature = 'internal',
type = 'real',
value = '((gVd22**2)*(MY1**2 + 2*MFS**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFS**2/MY1**2))',
texname = '\\text{WVss}')
WVbb = Parameter(name = 'WVbb',
nature = 'internal',
type = 'real',
value = '((gVd33**2)*(MY1**2 + 2*MFB**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFB**2/MY1**2))',
texname = '\\text{WVbb}')
WVDM = Parameter(name = 'WVDM',
nature = 'internal',
type = 'real',
value = '((gVXd**2)*(MY1**2 + 2*MXd**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MXd**2/MY1**2),0.01))',
texname = '\\text{WVDM}')
# axial, 1411.0535
WAuu = Parameter(name = 'WAuu',
nature = 'internal',
type = 'real',
value = '((gAd11**2)*(MY1**2 - 4*MFU**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFU**2/MY1**2))',
texname = '\\text{WAuu}')
WAcc = Parameter(name = 'WAcc',
nature = 'internal',
type = 'real',
value = '((gAd22**2)*(MY1**2 - 4*MFC**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFC**2/MY1**2))',
texname = '\\text{WAcc}')
WAtt = Parameter(name = 'WAtt',
nature = 'internal',
type = 'real',
value = '((gAd33**2)*(MY1**2 - 4*MT**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MT**2/MY1**2),0.01))',
texname = '\\text{WAtt}')
WAdd = Parameter(name = 'WAdd',
nature = 'internal',
type = 'real',
value = '((gAd11**2)*(MY1**2 - 4*MFD**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFD**2/MY1**2))',
texname = '\\text{WAdd}')
WAss= Parameter(name = 'WAss',
nature = 'internal',
type = 'real',
value = '((gAd22**2)*(MY1**2 - 4*MFS**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFS**2/MY1**2))',
texname = '\\text{WAss}')
WAbb= Parameter(name = 'WAbb',
nature = 'internal',
type = 'real',
value = '((gAd33**2)*(MY1**2 - 4*MFB**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFB**2/MY1**2))',
texname = '\\text{WAbb}')
WADM = Parameter(name = 'WADM',
nature = 'internal',
type = 'real',
value = '((gAXd**2)*(MY1**2 - 4*MXd**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MXd**2/MY1**2),0.01))',
texname = '\\text{WADM}')
sumY1 = Parameter(name = 'sumY1',
nature = 'internal',
type = 'real',
value = 'WVDM + WADM + 3*(WVuu+WVcc+WVtt+WVdd+WVss+WVbb+WAuu+WAcc+WAtt+WAdd+WAss+WAbb)',
texname = '\\text{sumZpV}')
WY1 = Parameter(name = 'WY1',
nature = 'internal',
type = 'real',
value = 'sumY1',
texname = '\\text{WY1}',
lhablock = 'DECAY',
lhacode = [ 55 ])
| [
"jongho.lee@cern.ch"
] | jongho.lee@cern.ch |
0276d5cdd8155571af835e9e1fd67c2cac949128 | 2e4d33bad14ab88195fc9aac3b2d0841bacfa767 | /python-udemy/Assessments_and_Challenges/Objects_and_Data_Structures/list.py | 1a2d2a9959f9a1b75d1825469af482858c1f824d | [] | no_license | Richiewong07/Python-Exercises | 062ea8386abad50efccfe398ca61972435f9e218 | a99816b23ad6fd338b26d66b5ccfd09bf6ddc527 | refs/heads/master | 2018-07-09T01:48:36.777998 | 2018-06-01T03:55:10 | 2018-06-01T03:55:10 | 103,578,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | # Reassign 'hello' in this nested list to say 'goodbye' instead:
list = [1,2,[3,4,'hello']]
list[2][2] = "goodbye"
print(list)
# Sort the list below:
list2 = [5,3,4,6,1]
print(sorted(list2))
list2.sort()
print(list2)
| [
"richiewong07@yahoo.com"
] | richiewong07@yahoo.com |
b1962e0e9f2c59f82edbe2c5876c621034548995 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/shared_code/cod_database/12. Noise reduction/run_phase_finalclean.py | e571261061d68d1868399f75b2d3a6e346799569 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,034 | py | """Final step before uploading data.
This phase includes raking, age aggregation, creating upper/lower bounds
for CoDViz, and calculating variances for CODEm
"""
import sys
import os
from cod_prep.utils import (
print_log_message,
enforce_asr
)
from cod_prep.claude.cf_adjustments import Raker
from cod_prep.claude.aggregators import AgeAggregator
from configurator import Configurator
from cod_prep.claude.redistribution_variance import (
dataset_has_redistribution_variance,
RedistributionVarianceEstimator
)
from cod_prep.claude.rate_adjustments import NonZeroFloorer
from claude_io import (
get_claude_data,
write_phase_output,
get_datasets
)
from cod_prep.downloaders import (
get_pop,
get_env,
get_current_location_hierarchy,
get_value_from_nid,
get_age_weights,
get_current_cause_hierarchy,
get_ages
)
CONF = Configurator('standard')
PHASE_ANTECEDENT = 'noisereduction'
PHASE_NAME = 'finalclean'
# sources that are noise reduced, but not raked
NOT_RAKED_SOURCES = [
"Maternal_report", "SUSENAS", "China_MMS", "China_Child",
]
MATERNAL_NR_SOURCES = [
"Mexico_BIRMM", "Maternal_report", "SUSENAS",
"China_MMS", "China_Child",
]
def run_phase(df, nid, extract_type_id, env_run_id,
pop_run_id, location_set_version_id, cause_set_version_id):
cache_dir = CONF.get_directory('db_cache')
source = get_value_from_nid(
nid, 'source', extract_type_id=extract_type_id,
location_set_version_id=location_set_version_id
)
data_type_id = get_value_from_nid(
nid, 'data_type_id', extract_type_id=extract_type_id,
location_set_version_id=location_set_version_id
)
iso3 = get_value_from_nid(nid, 'iso3', extract_type_id=extract_type_id,
location_set_version_id=location_set_version_id
)
standard_cache_options = {
'force_rerun': False,
'block_rerun': True,
'cache_dir': cache_dir,
'cache_results': False
}
# ************************************************************
# Get cached metadata
# ************************************************************
print_log_message("Getting cached db resources")
location_hierarchy = get_current_location_hierarchy(
location_set_version_id=location_set_version_id,
**standard_cache_options
)
pop_df = get_pop(pop_run_id=pop_run_id,
**standard_cache_options)
env_df = get_env(env_run_id=env_run_id,
**standard_cache_options)
age_weight_df = get_age_weights(**standard_cache_options)
cause_meta_df = get_current_cause_hierarchy(
cause_set_version_id=cause_set_version_id,
**standard_cache_options)
age_meta_df = get_ages(**standard_cache_options)
# ************************************************************
# RAKING
# ************************************************************
# Rake if appropriate based on this logic
if ((data_type_id in [8, 9, 10] and (source != 'Other_Maternal')) or
source in MATERNAL_NR_SOURCES):
if source not in NOT_RAKED_SOURCES:
print_log_message("Raking sub national estimates")
raker = Raker(df, source)
df = raker.get_computed_dataframe(location_hierarchy)
# for the Other_Maternal source we only rake household surveys
elif source == "Other_Maternal":
model_groups = get_datasets(
nid, extract_type_id, block_rerun=True,
force_rerun=False
).model_group.unique()
assert len(model_groups) == 1
model_group = model_groups[0]
if "HH_SURVEYS" in model_group:
if model_group == 'MATERNAL-HH_SURVEYS-IND':
print_log_message("Raking sub national estimates," \
" applying double raking for India Maternal"
)
raker = Raker(df, source, double=True)
df = raker.get_computed_dataframe(location_hierarchy)
else:
print_log_message("Raking sub national estimates")
raker = Raker(df, source)
df = raker.get_computed_dataframe(location_hierarchy)
# ************************************************************
# DROP ZERO SAMPLE SIZE AND RESTRICTED AGE/SEX DATA
# ************************************************************
# data with zero sample size is almost certaintly some anomolous result
# of a program generating data it shouldn't have, and it shouldn't be
# included in codem models. Was probably already dropped, anyway, before
# running noise reduction.
df = df.query('sample_size != 0')
# uploading data before 1980 is a waste of space because neither codem
# nor codviz use it
df = df.loc[df['year_id'] >= 1980]
print_log_message("Enforcing age sex restrictions")
# this actually drops data from the dataframe if it violates age/sex
# restrictions (e.g. male maternity disorders)
df = enforce_asr(df, cause_meta_df, age_meta_df)
# ************************************************************
# FIT EACH DRAW TO NON-ZERO FLOOR
# ************************************************************
print_log_message("Fitting to non-zero floor...")
nonzero_floorer = NonZeroFloorer(df)
df = nonzero_floorer.get_computed_dataframe(pop_df, env_df, cause_meta_df)
# ************************************************************
# AGE AGGREGATION
# ************************************************************
print_log_message("Creating age standardized and all ages groups")
age_aggregator = AgeAggregator(df, pop_df, env_df, age_weight_df)
df = age_aggregator.get_computed_dataframe()
# ************************************************************
# Make CODEm and CoDViz metrics for uncertainty
# ************************************************************
# columns that should be present in the phase output
final_cols = [
'age_group_id', 'cause_id', 'cf_corr', 'cf_final', 'cf_raw', 'cf_rd',
'extract_type_id', 'location_id', 'nid', 'sample_size',
'sex_id', 'site_id', 'year_id'
]
# Use draws to make metrics for uncertainty to
# be used by CODEm and CoDViz
# also creates cf_final from mean of draws
print_log_message("Making metrics for CODEm and CoDViz")
if dataset_has_redistribution_variance(data_type_id, source):
df = RedistributionVarianceEstimator.make_codem_codviz_metrics(
df, pop_df)
final_cols += ['cf_final_high_rd', 'cf_final_low_rd',
'variance_rd_log_dr', 'variance_rd_logit_cf']
# we did this in the old code-- no cfs over 1 nor below 0
for cf_col in ['cf_final', 'cf_rd', 'cf_raw', 'cf_corr']:
df.loc[df[cf_col] > 1, cf_col] = 1
df.loc[df[cf_col] < 0, cf_col] = 0
df = df[final_cols]
return df
def main(nid, extract_type_id, launch_set_id):
"""Read the data, run the phase, write the output."""
print_log_message("Reading {} data".format(PHASE_ANTECEDENT))
df = get_claude_data(
PHASE_ANTECEDENT, nid=nid, extract_type_id=extract_type_id
)
env_run_id = int(CONF.get_id('env_run'))
pop_run_id = int(CONF.get_id('pop_run'))
location_set_version_id = int(CONF.get_id('location_set_version'))
cause_set_version_id = int(CONF.get_id('cause_set_version'))
df = df.rename(columns={'cf': 'cf_final'})
df = run_phase(df, nid, extract_type_id, env_run_id,
pop_run_id, location_set_version_id, cause_set_version_id)
print_log_message(
"Writing {n} rows of output for launch set {ls}, nid {nid}, extract "
"{e}".format(n=len(df), ls=launch_set_id, nid=nid, e=extract_type_id)
)
write_phase_output(df, PHASE_NAME, nid,
extract_type_id, launch_set_id)
if __name__ == "__main__":
nid = int(sys.argv[1])
extract_type_id = int(sys.argv[2])
launch_set_id = int(sys.argv[3])
main(nid, extract_type_id, launch_set_id)
| [
"cheth@uw.edu"
] | cheth@uw.edu |
be725ea9e6bae1bf24a18a6c291c9b57be27a15e | 78c144b1341f6b6d791e2949a95963033f27478c | /bauh/gems/arch/mapper.py | 55e05d16ca1c80d439831a6b0744fcb7588a6551 | [
"Zlib"
] | permissive | albanobattistella/bauh | 9b50b9a0262f7a914aeb11456dffe58264cbba7b | e00ae4f05b8c7ffde7407333e55446768eb89cce | refs/heads/master | 2022-08-15T17:13:30.542002 | 2019-11-25T19:37:39 | 2019-11-25T19:37:39 | 223,788,308 | 1 | 0 | NOASSERTION | 2019-11-24T18:10:20 | 2019-11-24T18:10:20 | null | UTF-8 | Python | false | false | 5,010 | py | import re
from datetime import datetime
from bauh.api.abstract.model import PackageStatus
from bauh.api.http import HttpClient
from bauh.gems.arch.model import ArchPackage
URL_PKG_DOWNLOAD = 'https://aur.archlinux.org/{}'
RE_LETTERS = re.compile(r'\.([a-zA-Z]+)-\d+$')
RE_VERSION_SPLIT = re.compile(r'[a-zA-Z]+|\d+|[\.\-_@#]+')
BAUH_PACKAGES = {'bauh', 'bauh-staging'}
RE_SFX = ('r', 're', 'release')
GA_SFX = ('ga', 'ge')
RC_SFX = ('rc',)
BETA_SFX = ('b', 'beta')
AL_SFX = ('alpha', 'alfa')
DEV_SFX = ('dev', 'devel', 'development')
V_SUFFIX_MAP = {s: {'c': sfxs[0], 'p': idx} for idx, sfxs in enumerate([RE_SFX, GA_SFX, RC_SFX, BETA_SFX, AL_SFX, DEV_SFX]) for s in sfxs}
class ArchDataMapper:
def __init__(self, http_client: HttpClient):
self.http_client = http_client
def fill_api_data(self, pkg: ArchPackage, package: dict, fill_version: bool = True):
version = package.get('Version')
if version:
version = version.split(':')
version = version[0] if len(version) == 1 else version[1]
pkg.id = package.get('ID')
pkg.name = package.get('Name')
if fill_version:
pkg.version = version
pkg.latest_version = version
pkg.description = package.get('Description')
pkg.package_base = package.get('PackageBase')
pkg.popularity = package.get('Popularity')
pkg.votes = package.get('NumVotes')
pkg.maintainer = package.get('Maintainer')
pkg.url_download = URL_PKG_DOWNLOAD.format(package['URLPath']) if package.get('URLPath') else None
pkg.first_submitted = datetime.fromtimestamp(package['FirstSubmitted']) if package.get('FirstSubmitted') else None
pkg.last_modified = datetime.fromtimestamp(package['LastModified']) if package.get('LastModified') else None
pkg.update = self.check_update(pkg.version, pkg.latest_version, check_suffix=pkg.name in BAUH_PACKAGES)
@staticmethod
def check_update(version: str, latest_version: str, check_suffix: bool = False) -> bool:
if version and latest_version:
if check_suffix:
current_sfx = RE_LETTERS.findall(version)
latest_sf = RE_LETTERS.findall(latest_version)
if latest_sf and current_sfx:
current_sfx = current_sfx[0]
latest_sf = latest_sf[0]
current_sfx_data = V_SUFFIX_MAP.get(current_sfx.lower())
latest_sfx_data = V_SUFFIX_MAP.get(latest_sf.lower())
if current_sfx_data and latest_sfx_data:
nversion = version.split(current_sfx)[0]
nlatest = latest_version.split(latest_sf)[0]
if nversion == nlatest:
if current_sfx_data['c'] != latest_sfx_data['c']:
return latest_sfx_data['p'] < current_sfx_data['p']
else:
return ''.join(latest_version.split(latest_sf)) > ''.join(version.split(current_sfx))
return nlatest > nversion
latest_split = RE_VERSION_SPLIT.findall(latest_version)
current_split = RE_VERSION_SPLIT.findall(version)
for idx in range(len(latest_split)):
if idx < len(current_split):
latest_part = latest_split[idx]
current_part = current_split[idx]
if latest_part != current_part:
try:
dif = int(latest_part) - int(current_part)
if dif > 0:
return True
elif dif < 0:
return False
else:
continue
except ValueError:
if latest_part.isdigit():
return True
elif current_part.isdigit():
return False
else:
return latest_part > current_part
return False
def fill_package_build(self, pkg: ArchPackage):
res = self.http_client.get(pkg.get_pkg_build_url())
if res and res.status_code == 200 and res.text:
pkg.pkgbuild = res.text
def map_api_data(self, apidata: dict, installed: dict, categories: dict) -> ArchPackage:
data = installed.get(apidata.get('Name'))
app = ArchPackage(name=apidata.get('Name'), installed=bool(data), mirror='aur')
app.status = PackageStatus.LOADING_DATA
if categories:
app.categories = categories.get(app.name)
if data:
app.version = data.get('version')
app.description = data.get('description')
self.fill_api_data(app, apidata, fill_version=not data)
return app
| [
"vinicius_fmoreira@hotmail.com"
] | vinicius_fmoreira@hotmail.com |
635c356beec5500a27dea54111b21deafa95ba2e | 24977a5bff7b2d8a13796c3dee703afb46bda3d5 | /IntroductionToPythonAndProgrammingBasic-Cisco-master/Model Driven Programmability - DevnNet20/Files Scripts Python/08_parse-json_sol.py | 546b7830842b356931f3285bc20badd263b8afa3 | [] | no_license | chunche95/ProgramacionModernaPython | 139699fd35923406b5cf92d65465738a5ae1d423 | 9da99388e73c29c021a0637be7ac74c6926f0ceb | refs/heads/master | 2023-05-09T18:37:27.845901 | 2021-02-28T11:39:53 | 2021-02-28T11:39:53 | 185,662,309 | 3 | 0 | null | 2023-05-02T18:56:10 | 2019-05-08T18:49:17 | Jupyter Notebook | UTF-8 | Python | false | false | 4,800 | py | #Replace "your_api_key" with your MapQuest API key
import urllib.parse
import requests
main_api = "https://www.mapquestapi.com/directions/v2/route?"
key = "your_api_key"
while True:
orig = input("Starting Location: ")
if orig == "quit" or orig == "q":
break
dest = input("Destination: ")
if dest == "quit" or dest == "q":
break
url = main_api + urllib.parse.urlencode({"key": key, "from":orig, "to":dest})
print("URL: " + (url))
json_data = requests.get(url).json()
json_status = json_data["info"]["statuscode"]
if json_status == 0:
print("API Status: " + str(json_status) + " = A successful route call.\n")
print("Directions from " + (orig) + " to " + (dest))
print("Trip Duration: " + (json_data["route"]["formattedTime"]))
print("Kilometers: " + str("{:.2f}".format((json_data["route"]["distance"])*1.61)))
print("Fuel Used (Ltr): " + str("{:.2f}".format((json_data["route"]["fuelUsed"])*3.78)))
print("=============================================")
for each in json_data["route"]["legs"][0]["maneuvers"]:
print((each["narrative"]) + " (" + str("{:.2f}".format((each["distance"])*1.61) + " km)"))
print("=============================================\n")
elif json_status == 402:
print("\n****************************************************************")
print("Status Code: " + str(json_status) + "; Invalid user inputs for one or both locations.")
print("****************************************************************\n")
else:
print("\n************************************************************************")
print("Status Code: " + str(json_status) + "; Refer to:")
print("https://developer.mapquest.com/documentation/directions-api/status-codes")
print("************************************************************************\n")
"""
Starting Location: Washington
Destination: Baltimore
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Washington&to=Baltimore
API Status: 0 = A successful route call.
Directions from Washington to Baltimore
Trip Duration: 00:49:19
Kilometers: 61.32
Fuel Used (Ltr): 6.24
=============================================
Start out going north on 6th St/US-50 E/US-1 N toward Pennsylvania Ave/US-1 Alt N. (1.28 km)
Turn right onto New York Ave/US-50 E. Continue to follow US-50 E (Crossing into Maryland). (7.51 km)
Take the Balt-Wash Parkway exit on the left toward Baltimore. (0.88 km)
Merge onto MD-295 N. (50.38 km)
Turn right onto W Pratt St. (0.86 km)
Turn left onto S Calvert St/MD-2. (0.43 km)
Welcome to BALTIMORE, MD. (0.00 km)
=============================================
Starting Location: Moscow
Destination: Beijing
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Moscow&to=Beijing
API Status: 0 = A successful route call.
Directions from Moscow to Beijing
Trip Duration: 84:31:10
Kilometers: 7826.83
Fuel Used (Ltr): 793.20
=============================================
Start out going west on Кремлёвская набережная/Kremlin Embankment. (0.37 km)
Turn slight right onto ramp. (0.15 km)
Turn slight right onto Боровицкая площадь. (0.23 km)
[output omitted]
Turn left onto 广场东侧路/E. Guangchang Rd. (0.82 km)
广场东侧路/E. Guangchang Rd becomes 东长安街/E. Chang'an Str. (0.19 km)
Welcome to BEIJING. (0.00 km)
=============================================
Starting Location: Washington
Destination: Beijing
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=WashingtonTurn+right+onto+%E5%89%8D%E9%97%A8%E8%A5%BF%E5%A4%A7%E8%A1%97%2FQianmen+West+Street.+%281.01+km%29&to=Beijing
****************************************************************
Staus Code: 402; Invalid user inputs for one or both locations.
****************************************************************
Starting Location: Washington
Destination: Balt
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Washington&to=Balt
************************************************************************
Staus Code: 602; Refer to:
https://developer.mapquest.com/documentation/directions-api/status-codes
************************************************************************
Starting Location: Washington
Destination:
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Washington&to=
************************************************************************
Staus Code: 611; Refer to:
https://developer.mapquest.com/documentation/directions-api/status-codes
************************************************************************
Starting Location: q
>>>
"""
| [
"paeste95.pb@gmail.com"
] | paeste95.pb@gmail.com |
f720b00c51af60267ee6a57f26e4c29c413ebfb1 | a2dce63dc04f484d1457073610343378656a1ffd | /p90.py | 7b330e887a1cd710fbcc921f7b4dbc7b9b9b01dc | [] | no_license | analaura09/pythongame | 5ece67047095160cdbc56ae3bb14920c787d8d02 | 54c83cf731a384fdb04bc4c3ed0bcf109b03d5ed | refs/heads/main | 2023-03-29T00:35:35.713616 | 2021-03-21T17:53:28 | 2021-03-21T17:53:28 | 348,432,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | aluno = dict()
aluno['nome']=input('nome:')
aluno['media']=float(input(f'media de {aluno["nome"]}:'))
if aluno['media'] >= 7:
aluno['situaçao']='aprovado'
elif 5 <= aluno['media'] < 7:
aluno['situaçao'] ='reprovado'
print('-='*30)
for k,v in aluno.items():
print(f' - {k} é igual a {v}')
| [
"pereira.laura@escolar.ifrn.edu.br"
] | pereira.laura@escolar.ifrn.edu.br |
b65beaa3bbdb3eae8c26ffa844c28fe459936b66 | b5b31c75ce9086872c4097db1130fac1a9b95b5b | /model.py | 5fcc16b3224ea65b7ff467e46a3da99672369c6a | [] | no_license | shmuel19-meet/LocalEat | 261558f9099ff51355fcd61568a2daa86b8a3730 | 642b7926944c8a519733068267e72eea20ff0ad3 | refs/heads/master | 2022-12-11T22:31:40.950583 | 2019-07-24T09:17:26 | 2019-07-24T09:17:26 | 174,370,824 | 0 | 3 | null | 2022-12-08T05:53:58 | 2019-03-07T15:28:23 | JavaScript | UTF-8 | Python | false | false | 2,659 | py | from sqlalchemy import Column, Integer, String, Boolean, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = "User"
id_table = Column(Integer, primary_key = True)
username = Column(String)
phone = Column(Integer)
address = Column(String)
password = Column(String)
cash = Column(Float)
def __repr__(self):
return ("username: {},\n"
"phone: {},\n"
"address: {},\n"
"password: {}, \n"
"cash: {}. \n"
).format(
self.username,
self.phone,
self.address,
self.password,
self.cash)
class Farm(Base):
__tablename__ = "Farm"
id_table = Column(Integer, primary_key = True)
Farm_name = Column(String)
bank_name = Column(String)
bank_account = Column(Integer)
phone = Column(Integer)
address = Column(String)
password = Column(String)
def __repr__(self):
return ("Farm_name: {},\n"
"bank_name: {},\n"
"bank_account: {},\n"
"phone: {},\n"
"address: {},\n"
"password: {}. \n"
).format(
self.Farm_name,
self.bank_name,
self.bank_account,
self.phone,
self.address,
self.password)
class Product(Base):
__tablename__ = "products"
id_table = Column(Integer, primary_key=True)
Type = Column(String)
Owner = Column(String)
cost = Column(Float)
buyer = Column(String)
def __repr__(self):
return ("id : {},\n"
"Type: {},\n"
"Owner: {},\n"
"cost: {},\n"
"buyer: {}.\n"
).format(self.id_table,
self.Type,
self.Owner,
self.cost,
self.buyer)
class Type(Base):
__tablename__ = "Types"
id_table = Column(Integer, primary_key=True)
Name = Column(String)
Img = Column(String)
Min_price = Column(Integer)
Max_price = Column(Integer)
def __repr__(self):
return ("id : {},\n"
"Name: {},\n"
"img link: {},\n"
"min_price: {}.\n"
"max_price: {}.\n"
).format(self.id_table,
self.Name,
self.Img,
self.Min_price,
self.Max_price) | [
"myname21@meet.mit.edu"
] | myname21@meet.mit.edu |
748457e1140e93665890f47c6df0eef0c8d9d539 | af98a6b70ed86a1c6c3c9cd3f07b0934c5ced093 | /src/toll/fixtures/bad/bad.py | 86ea1ac5b02411ae2f9597f2cd241fa08783a226 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause"
] | permissive | icemac/toll | 071d97e9992c91ba46effc7f4a990cd2a6f7cd62 | edefe6fa5d1ee3b4cd4c6b57c19b1df84551c33b | refs/heads/master | 2021-01-19T13:08:34.405470 | 2020-09-01T05:34:22 | 2020-09-01T06:35:18 | 88,068,713 | 5 | 2 | BSD-2-Clause | 2020-09-01T06:35:20 | 2017-04-12T15:38:27 | Python | UTF-8 | Python | false | false | 271 | py | import unittest
class Test_Bad(unittest.TestCase):
"""It fails."""
def test_fine__1(self):
self.assertTrue(False)
def test_suite():
"""Create test suite for `python setup.py test`."""
return unittest.TestSuite([unittest.makeSuite(Test_Bad)])
| [
"mh@gocept.com"
] | mh@gocept.com |
c29fe1d1e2ed7f97ac7bce1578ca0ffb86c8868a | a68fcfabacf0e55f690a4416d9f84fd26f9ed18f | /bonus1/exercises/exercise3/exercise3.py | a2623fe1918770b83429a35e37397256ff1231a3 | [
"Apache-2.0"
] | permissive | twin-bridges/nornir_course | b10e12ded7dec43f50bdb4e76f434fb458c3574c | 4a10b472cf01dc94b811d1c06b9d53c84aa68fe9 | refs/heads/master | 2022-07-26T20:54:25.573390 | 2022-07-15T17:38:55 | 2022-07-15T17:38:55 | 185,249,635 | 72 | 25 | Apache-2.0 | 2022-07-15T17:38:56 | 2019-05-06T18:23:59 | Python | UTF-8 | Python | false | false | 432 | py | from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
def netmiko_direct(task):
# Manually create Netmiko connection
net_connect = task.host.get_connection("netmiko", task.nornir.config)
return net_connect.find_prompt()
if __name__ == "__main__":
with InitNornir(config_file="config.yaml") as nr:
agg_result = nr.run(task=netmiko_direct)
print_result(agg_result)
| [
"ktbyers@twb-tech.com"
] | ktbyers@twb-tech.com |
12d1f4c42ce25953d4b200cbd33fb52f4c5abdfe | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /GCJ/GCJ2020_1B_b_interactive_runner.py | f2c57deb3504c247a7fc2e07a54e009f4512fc53 | [] | no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,814 | py | # This code can be run as python2 or python3 in most systems.
#
# This is a small program that runs two processes, connecting the stdin of each
# one to the stdout of the other.
# It doesn't perform a lot of checking, so many errors may
# be caught internally by Python (e.g., if your command line has incorrect
# syntax) or not caught at all (e.g., if the judge or solution hangs).
#
# Run this as:
# python interactive_runner.py <cmd_line_judge> -- <cmd_line_solution>
#
# For example, if you have a testing_tool.py in python3 (that takes a single
# integer as a command line parameter) to use as judge -- like one
# downloaded from a problem statement -- and you would run your solution
# in a standalone using one of the following:
# 1. python3 my_solution.py #####
# 2. ./my_solution
# 3. java Solution
# 4. my_solution.exe
# Then you could run the judge and solution together, using this, as:
# 1. python interactive_runner.py python3 testing_tool.py 0 -- python3 my_solution.py #####
# 2. python interactive_runner.py python3 testing_tool.py 0 -- ./my_solution
# 3. python interactive_runner.py python3 testing_tool.py 0 -- java solution
# 4. python interactive_runner.py python3 testing_tool.py 0 -- my_solution.exe
# Notice that the solution in cases 2, 3 and 4 would usually have a
# compilation step before running, which you should run in your usual way
# before using this tool.
#
# This is only intended as a convenient tool to help contestants test solutions
# locally. In particular, it is not identical to the implementation on our
# server, which is more complex.
#
# The standard streams are handled the following way:
# - judge's stdin is connected to the solution's stdout;
# - judge's stdout is connected to the solution's stdin;
# - stderrs of both judge and solution are piped to standard error stream, with
# lines prepended by "judge: " or "sol: " respectively (note, no
# synchronization is done so it's possible for the messages from both programs
# to overlap with each other).
from __future__ import print_function
import sys, subprocess, threading
class SubprocessThread(threading.Thread):
def __init__(self,
args,
stdin_pipe=subprocess.PIPE,
stdout_pipe=subprocess.PIPE,
stderr_prefix=None):
threading.Thread.__init__(self)
self.stderr_prefix = stderr_prefix
self.p = subprocess.Popen(
args, stdin=stdin_pipe, stdout=stdout_pipe, stderr=subprocess.PIPE)
def run(self):
try:
self.pipeToStdErr(self.p.stderr)
self.return_code = self.p.wait()
self.error_message = None
except (SystemError, OSError):
self.return_code = -1
self.error_message = "The process crashed or produced too much output."
# Reads bytes from the stream and writes them to sys.stderr prepending lines
# with self.stderr_prefix.
# We are not reading by lines to guard against the case when EOL is never
# found in the stream.
def pipeToStdErr(self, stream):
new_line = True
while True:
chunk = stream.readline(1024)
if not chunk:
return
chunk = chunk.decode("UTF-8")
if new_line and self.stderr_prefix:
chunk = self.stderr_prefix + chunk
new_line = False
sys.stderr.write(chunk)
if chunk.endswith("\n"):
new_line = True
sys.stderr.flush()
assert sys.argv.count("--") == 1, (
"There should be exactly one instance of '--' in the command line.")
sep_index = sys.argv.index("--")
judge_args = sys.argv[1:sep_index]
sol_args = sys.argv[sep_index + 1:]
t_sol = SubprocessThread(sol_args, stderr_prefix=" sol: ")
t_judge = SubprocessThread(
judge_args,
stdin_pipe=t_sol.p.stdout,
stdout_pipe=t_sol.p.stdin,
stderr_prefix="judge: ")
t_sol.start()
t_judge.start()
t_sol.join()
t_judge.join()
# Print an empty line to handle the case when stderr doesn't print EOL.
print()
print("Judge return code:", t_judge.return_code)
if t_judge.error_message:
print("Judge error message:", t_judge.error_message)
print("Solution return code:", t_sol.return_code)
if t_sol.error_message:
print("Solution error message:", t_sol.error_message)
if t_sol.return_code:
print("A solution finishing with exit code other than 0 (without exceeding "
"time or memory limits) would be interpreted as a Runtime Error "
"in the system.")
elif t_judge.return_code:
print("A solution finishing with exit code 0 (without exceeding time or "
"memory limits) and a judge finishing with exit code other than 0 "
"would be interpreted as a Wrong Answer in the system.")
else:
print("A solution and judge both finishing with exit code 0 (without "
"exceeding time or memory limits) would be interpreted as Correct "
"in the system.")
| [
"13600386+Linus-MK@users.noreply.github.com"
] | 13600386+Linus-MK@users.noreply.github.com |
5d0bfc9c267ed71d3e943012c68336e577a583cd | 671488a42bdb32ebc19ff38343a7699015fa9583 | /CLA/results/compare.py | b240204be91862ddaf07922bd8e63f8888b3b80c | [] | no_license | BML-MultiRobot/Multi-Box-Push | 86bec8b91485d169ec5181d17ecd4948912f4f93 | 8c453b83ddeabe7cb269d0526644c22d82a19bd0 | refs/heads/master | 2023-05-14T23:20:57.542293 | 2021-06-03T19:05:26 | 2021-06-03T19:05:26 | 298,180,234 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | import sys, pickle
import matplotlib.pyplot as plt
import os
folders = ['off_policy_cf_ddac', 'off_policy_cf_ddac_2', 'off_policy_IQL']
folders = sorted(folders)
def get_moving_average(lst, resolution):
cumsum, moving_aves = [0], []
for i, x in enumerate(lst, 1):
cumsum.append(cumsum[i - 1] + x)
if i >= resolution:
moving_ave = (cumsum[i] - cumsum[i - resolution]) / resolution
# can do stuff with moving_ave here
moving_aves.append(moving_ave)
else:
# moving_aves.append(cumsum[i] / len(cumsum))
continue
return moving_aves
if __name__ == '__main__':
function = sys.argv[1]
file_name = sys.argv[2]
if len(sys.argv) > 3:
title = sys.argv[3]
if function.lower() == 'graph' or function.lower() == 'graph_ma':
if len(sys.argv) <= 3:
print('input valid title...structure: analyze.py [function] [path] [title]')
sys.exit(0)
for folder in folders:
path = os.path.join(folder, file_name)
with open(path, "rb") as input_file:
data = pickle.load(input_file)
if 'sigma' in folder:
val = folder.split('=')[1][1:]
if type(data) == list:
data = get_moving_average(data, 20) if function.lower()=='graph_ma' else data
plt.plot(range(len(data)), data, label=r'DDAC $\sigma = ' + val + '$')
elif type(data) == np.ndarray:
data = np.array(get_moving_average(data, 20)) if function.lower()=='graph_ma' else data
plt.plot(range(data.shape[0]), data, label=r'$DDAC \sigma = ' + val + '$')
else:
if type(data) == list:
data = get_moving_average(data, 20) if function.lower()=='graph_ma' else data
plt.plot(range(len(data)), data, label=folder)
elif type(data) == np.ndarray:
data = np.array(get_moving_average(data, 20)) if function.lower()=='graph_ma' else data
plt.plot(range(data.shape[0]), data, label=folder)
plt.title(title)
plt.legend()
plt.show()
else:
print('Input valid function: graph or graph_ma')
| [
"austinnguyen517@berkeley.edu"
] | austinnguyen517@berkeley.edu |
4c02231110503dfc680f80c5f0a8855268fb2848 | 1e50f1643376039ca988d909e79f528e01fa1371 | /leetcode/editor/cn/726.原子的数量.py | fb4edc58afe746d833ec1a01367967d1342ffb48 | [] | no_license | mahatmaWM/leetcode | 482a249e56e2121f4896e34c58d9fa44d6d0034b | 4f41dad6a38d3cac1c32bc1f157e20aa14eab9be | refs/heads/master | 2022-09-04T17:53:54.832210 | 2022-08-06T07:29:46 | 2022-08-06T07:29:46 | 224,415,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,645 | py | #
# @lc app=leetcode.cn id=726 lang=python3
#
# [726] 原子的数量
#
# https://leetcode-cn.com/problems/number-of-atoms/description/
#
# algorithms
# Hard (44.85%)
# Likes: 78
# Dislikes: 0
# Total Accepted: 3.5K
# Total Submissions: 7.7K
# Testcase Example: '"H2O"'
#
# 给定一个化学式formula(作为字符串),返回每种原子的数量。
#
# 原子总是以一个大写字母开始,接着跟随0个或任意个小写字母,表示原子的名字。
#
# 如果数量大于 1,原子后会跟着数字表示原子的数量。如果数量等于 1 则不会跟数字。例如,H2O 和 H2O2 是可行的,但 H1O2
# 这个表达是不可行的。
#
# 两个化学式连在一起是新的化学式。例如 H2O2He3Mg4 也是化学式。
#
# 一个括号中的化学式和数字(可选择性添加)也是化学式。例如 (H2O2) 和 (H2O2)3 是化学式。
#
# 给定一个化学式,输出所有原子的数量。格式为:第一个(按字典序)原子的名子,跟着它的数量(如果数量大于
# 1),然后是第二个原子的名字(按字典序),跟着它的数量(如果数量大于 1),以此类推。
#
# 示例 1:
#
#
# 输入:
# formula = "H2O"
# 输出: "H2O"
# 解释:
# 原子的数量是 {'H': 2, 'O': 1}。
#
#
# 示例 2:
#
#
# 输入:
# formula = "Mg(OH)2"
# 输出: "H2MgO2"
# 解释:
# 原子的数量是 {'H': 2, 'Mg': 1, 'O': 2}。
#
#
# 示例 3:
#
#
# 输入:
# formula = "K4(ON(SO3)2)2"
# 输出: "K4N2O14S4"
# 解释:
# 原子的数量是 {'K': 4, 'N': 2, 'O': 14, 'S': 4}。
#
#
# 注意:
#
#
# 所有原子的第一个字母为大写,剩余字母都是小写。
# formula的长度在[1, 1000]之间。
# formula只包含字母、数字和圆括号,并且题目中给定的是合法的化学式。
#
#
#
# @lc code=start
import collections
class Solution:
# 和1106题类似
def __init__(self) -> None:
self.l = 0
def countOfAtoms(self, formula: str) -> str:
ans = ''
items = self._countOfAtoms(formula).items()
for k, v in sorted(items):
ans += k
if v > 1: ans += str(v)
return ans
# 有括号的嵌套定义使用递归最合适,递归处理一对()* 括号以及括号后面的数字
def _countOfAtoms(self, formula):
cnt = collections.defaultdict(int)
while self.l < len(formula):
# 遇到(,开始一次递归,注意指针跳过( 、 )符号
if formula[self.l] == '(':
self.l += 1
tmp_cnt = self._countOfAtoms(formula)
self.l += 1
tmp = self._getCount(formula)
for k, v in tmp_cnt.items():
cnt[k] += v * tmp
# 遇到),说明本轮处理完,return
elif formula[self.l] == ')':
return cnt
# 否则正常的处理元素和次数
else:
name = self._getName(formula)
cnt[name] += self._getCount(formula)
return cnt
# 获取元素名称
def _getName(self, str):
name = ''
while self.l < len(str) and str[self.l].isalpha() and (name == '' or str[self.l].islower()):
name += str[self.l]
self.l += 1
return name
# 获取元素次数
def _getCount(self, str):
cnt = ''
while self.l < len(str) and str[self.l].isdigit():
cnt += str[self.l]
self.l += 1
return 1 if cnt == '' else int(cnt)
# @lc code=end
if __name__ == "__main__":
print(Solution().countOfAtoms(formula='K4(ON(SO3)2)2'))
| [
"chrismwang@tencent.com"
] | chrismwang@tencent.com |
d33dd7b63868126ce7bf4913b02c330c9ff78284 | dde33c083b2923fa4adabcc696cf0ed21a4837ef | /cybercom_queue/util.py | b81bc08bd769dd9fd8764fc4ec0a18ae9353e8de | [
"BSD-3-Clause"
] | permissive | oulib-datacatalog/cybercommons | 28cee16f2dff36c016789b148132fdc27db7ab49 | 2665659228259508123e31fd71fb68f70aa6c048 | refs/heads/main | 2023-06-24T20:01:58.757200 | 2022-09-21T19:39:23 | 2022-09-21T19:39:23 | 251,441,512 | 0 | 10 | BSD-3-Clause | 2023-06-15T19:58:59 | 2020-03-30T22:20:30 | Python | UTF-8 | Python | false | false | 902 | py | __author__ = 'mstacy'
import sys
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
| [
"mbstacy@gmail.com"
] | mbstacy@gmail.com |
1694951498da2ad6b95eeae715bc400b8770b999 | bc441bb06b8948288f110af63feda4e798f30225 | /easy_work_service_sdk/model/monitor/alert_range_pb2.pyi | 7c70da75b437636d222d16eee32fe5638966a771 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,270 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from easy_work_service_sdk.model.monitor.alert_event_pb2 import (
AlertEvent as easy_work_service_sdk___model___monitor___alert_event_pb2___AlertEvent,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class AlertRange(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
org = ... # type: builtin___int
key = ... # type: typing___Text
alert_begin_time = ... # type: builtin___int
@property
def first_alert(self) -> easy_work_service_sdk___model___monitor___alert_event_pb2___AlertEvent: ...
def __init__(self,
*,
org : typing___Optional[builtin___int] = None,
key : typing___Optional[typing___Text] = None,
first_alert : typing___Optional[easy_work_service_sdk___model___monitor___alert_event_pb2___AlertEvent] = None,
alert_begin_time : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> AlertRange: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> AlertRange: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"first_alert",b"first_alert"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"alert_begin_time",b"alert_begin_time",u"first_alert",b"first_alert",u"key",b"key",u"org",b"org"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
b5350998e793f72686b2d5ea68ffbafa347ac10c | 2251e611c24426c1f787a2c58bb1bbf2a5018cf9 | /chcemvediet/apps/inforequests/forms/clarification_response.py | 848580f591bf2a1f03bbb54106019b390594e180 | [] | no_license | gitter-badger/chcemvediet | 68ff6fb1bf9d3c1b4d2c67b7118c42944e176a70 | 661082369495a97d0f8bdf8e5a775f4a06277799 | refs/heads/master | 2020-12-26T18:41:55.615024 | 2015-12-11T01:29:38 | 2015-12-11T01:29:38 | 48,106,138 | 0 | 0 | null | 2015-12-16T11:20:57 | 2015-12-16T11:20:56 | null | UTF-8 | Python | false | false | 4,663 | py | # vim: expandtab
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.sessions.models import Session
from poleno.attachments.forms import AttachmentsField
from poleno.utils.models import after_saved
from poleno.utils.urls import reverse
from poleno.utils.date import local_today
from poleno.utils.forms import CompositeTextField
from poleno.utils.template import render_to_string
from poleno.utils.misc import squeeze
from chcemvediet.apps.wizards.wizard import Step, Wizard
from chcemvediet.apps.inforequests.models import Action
class Main(Step):
template = u'inforequests/clarification_response/main.html'
text_template = u'inforequests/clarification_response/texts/main.html'
form_template = u'main/forms/form_horizontal.html'
global_fields = [u'attachments']
def add_fields(self):
super(Main, self).add_fields()
self.fields[u'content'] = CompositeTextField(
label=_(u'inforequests:clarification_response:Main:content:label'),
template=u'inforequests/clarification_response/forms/content.txt',
context=self.context(),
fields=[
forms.CharField(widget=forms.Textarea(attrs={
u'placeholder':
_(u'inforequests:clarification_response:Main:content:placeholder'),
u'class': u'pln-autosize',
u'cols': u'', u'rows': u'',
})),
],
composite_attrs={
},
)
self.fields[u'attachments'] = AttachmentsField(
label=_(u'inforequests:clarification_response:Main:attachments:label'),
required=False,
attached_to=(
self.wizard.draft,
Session.objects.get(session_key=self.wizard.request.session.session_key),
),
upload_url_func=(
lambda: reverse(u'inforequests:upload_attachment')),
download_url_func=(
lambda a: reverse(u'inforequests:download_attachment', args=[a.pk])),
)
def clean(self):
cleaned_data = super(Main, self).clean()
if self.wizard.branch.inforequest.has_undecided_emails:
msg = _(u'inforequests:clarification_response:Main:error:undecided_emails')
self.add_error(None, msg)
return cleaned_data
def commit(self):
super(Main, self).commit()
@after_saved(self.wizard.draft)
def deferred(draft):
for attachment in self.cleaned_data.get(u'attachments', []):
attachment.generic_object = draft
attachment.save()
def post_transition(self):
res = super(Main, self).post_transition()
if self.is_valid():
res.globals.update({
u'subject': squeeze(render_to_string(
u'inforequests/clarification_response/forms/subject.txt')),
u'content': self.fields[u'content'].finalize(self.cleaned_data[u'content']),
})
return res
class ClarificationResponseWizard(Wizard):
first_step_class = Main
def __init__(self, request, index, branch):
self.inforequest = branch.inforequest
self.branch = branch
self.last_action = branch.last_action
super(ClarificationResponseWizard, self).__init__(request, index)
def get_instance_id(self):
return u'{}-{}'.format(self.__class__.__name__, self.last_action.pk)
def get_step_url(self, step, anchor=u''):
return reverse(u'inforequests:clarification_response',
kwargs=dict(branch=self.branch, step=step)) + anchor
def context(self, extra=None):
res = super(ClarificationResponseWizard, self).context(extra)
res.update({
u'inforequest': self.inforequest,
u'branch': self.branch,
u'last_action': self.last_action,
})
return res
def finish(self):
today = local_today()
action = Action.create(
branch=self.branch,
type=Action.TYPES.CLARIFICATION_RESPONSE,
subject=self.values[u'subject'],
content=self.values[u'content'],
sent_date=today,
legal_date=today,
attachments=self.values[u'attachments'],
)
action.save()
action.send_by_email()
return action.get_absolute_url()
| [
"martin@github.ksp.sk"
] | martin@github.ksp.sk |
f7cf470b5bc308f5ac1729277bacc3471d63f065 | be2a81f03e8a2dac7d356dde7a3ffdcfe3f77e00 | /providers/com/figshare/v2/migrations/0002_favicon.py | b74fa5df61b4fd11cad5cf7c9ff7a40c2fb842f4 | [
"Apache-2.0"
] | permissive | Stevenholloway/SHARE | 4193bbd3ca50765a24bf21c0cc14438175fbb678 | b9759106d12c2ff548bad22c4be8650e9f41e61e | refs/heads/develop | 2021-01-21T19:13:35.205983 | 2017-02-23T14:45:46 | 2017-02-23T14:45:46 | 63,431,390 | 0 | 0 | null | 2016-07-15T15:17:45 | 2016-07-15T15:17:44 | null | UTF-8 | Python | false | false | 459 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-01 21:23
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('com.figshare.v2', '0001_initial'),
('share', '0018_store_favicons'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotFaviconMigration('com.figshare.v2'),
),
]
| [
"chriskseto@gmail.com"
] | chriskseto@gmail.com |
6ce4412de399cdb5a234c8a313f398f90e682b5c | a39f7413dcd87bb26319fe032d59cf12d7c69d54 | /dataset/UnbalancedDataset_single_gpu.py | 7abbf65698b94ac3432e0ecf5727032546a27bea | [] | no_license | liangyuandg/cross_modality_ibsr | 8ad937b5475bd5e6b00ad50351706304a962f975 | bb5cefd890f5fa0e15eae6e54d9559f5e8eb94ed | refs/heads/master | 2023-06-24T02:58:25.318170 | 2021-07-27T08:29:27 | 2021-07-27T08:29:27 | 389,904,637 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,105 | py | from torch.utils.data import Dataset
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
import torch
import string
import random
HE_STAT = [[177.24695274/255.0, 124.34710506/255.0, 161.72433061/255.0], [47.7463798/255.0, 55.49126494/255.0, 44.1525292/255.0]]
PH_STAT = [[194.24576912/255.0, 186.99282001/255.0, 200.96679032/255.0], [37.61597558/255.0, 40.52679066/255.0, 38.00598526/255.0]]
class UnbalancedDataset(Dataset):
"""dental dataset for detection.
"""
def __init__(
self,
data_path,
label_path,
repeat_augmentations,
test_mode=False,
):
"""Dataset class.
Args:
data_path.
label_path.
test_mode (bool): test mode.
Returns:
"""
self.test_mode = test_mode
self.repeat_augmentations = repeat_augmentations
self.he_data = np.load(data_path)[0]
self.ph_data = np.load(data_path)[1]
print(len(self.he_data))
if label_path is not None:
self.labels = np.load(label_path)
self.train_he_transform = self.get_train_he_transforms()
self.train_ph_transform = self.get_train_ph_transforms()
self.test_he_transform = self.get_test_he_transforms()
self.test_ph_transform = self.get_test_ph_transforms()
def __len__(self):
return len(self.he_data)
def _rand_another(self):
return np.random.choice(list(range(0, len(self))))
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
else:
return self.prepare_train_img(idx)
def get_train_he_transforms(self):
normalize = transforms.Normalize(mean=[177.24695274/255.0, 124.34710506/255.0, 161.72433061/255.0], std=[47.7463798/255.0, 55.49126494/255.0, 44.1525292/255.0])
side = 96
padding = 4
cutout = 0.0625
color_jitter = transforms.ColorJitter(brightness=0.2, contrast=0.4, saturation=0.4, hue=0.1)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
# rnd_gray = transforms.RandomGrayscale(p=0.2)
rnd_resizedcrop = transforms.RandomResizedCrop(size=side, scale=(0.8, 1.2), ratio=(0.75, 1.3333333333333333), interpolation=2)
rnd_hflip = transforms.RandomHorizontalFlip(p=0.5)
rnd_vflip = transforms.RandomVerticalFlip(p=0.5)
# rnd_rot = transforms.RandomRotation(10., resample=2)
rnd_trans = transforms.RandomAffine(degrees=10, translate=(0.1, 0.1), scale=None, shear=None, resample=2, fillcolor=0)
# train_transform = transforms.Compose(
# [rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_rot, rnd_color_jitter, transforms.ToTensor(), normalize]
# )
# train_transform = transforms.Compose(
# [rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_rot, rnd_color_jitter, transforms.ToTensor()]
# )
train_transform = transforms.Compose(
[rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_color_jitter, rnd_trans, transforms.ToTensor(), normalize]
)
# train_transform = transforms.Compose(
# [rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_color_jitter, rnd_trans, transforms.ToTensor()]
# )
return train_transform
def get_train_ph_transforms(self):
normalize = transforms.Normalize(mean=[194.24576912/255.0, 186.99282001/255.0, 200.96679032/255.0], std=[37.61597558/255.0, 40.52679066/255.0, 38.00598526/255.0])
side = 96
# padding = 4
# cutout = 0.0625
color_jitter = transforms.ColorJitter(brightness=0.2, contrast=0.4, saturation=0.4, hue=0.1)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
# rnd_gray = transforms.RandomGrayscale(p=0.2)
rnd_resizedcrop = transforms.RandomResizedCrop(size=side, scale=(0.8, 1.2), ratio=(0.75, 1.3333333333333333), interpolation=2)
rnd_hflip = transforms.RandomHorizontalFlip(p=0.5)
rnd_vflip = transforms.RandomVerticalFlip(p=0.5)
# rnd_rot = transforms.RandomRotation(10., resample=2)
rnd_trans = transforms.RandomAffine(degrees=10, translate=(0.1, 0.1), scale=None, shear=None, resample=2, fillcolor=0)
# train_transform = transforms.Compose(
# [rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_rot, rnd_color_jitter, transforms.ToTensor(), normalize]
# )
# train_transform = transforms.Compose(
# [rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_rot, rnd_color_jitter, transforms.ToTensor()]
# )
train_transform = transforms.Compose(
[rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_color_jitter, rnd_trans, transforms.ToTensor(), normalize]
)
# train_transform = transforms.Compose(
# [rnd_resizedcrop, rnd_hflip, rnd_vflip, rnd_color_jitter, rnd_trans, transforms.ToTensor()]
# )
return train_transform
def get_test_he_transforms(self):
normalize = transforms.Normalize(mean=[177.24695274/255.0, 124.34710506/255.0, 161.72433061/255.0], std=[47.7463798/255.0, 55.49126494/255.0, 44.1525292/255.0])
# normalize = transforms.Normalize(mean=[177.24695274, 124.34710506, 161.72433061], std=[47.7463798, 55.49126494, 44.1525292])
test_transform = transforms.Compose([transforms.ToTensor(), normalize])
# test_transform = transforms.Compose([transforms.ToTensor()])
return test_transform
def get_test_ph_transforms(self):
normalize = transforms.Normalize(mean=[194.24576912/255.0, 186.99282001/255.0, 200.96679032/255.0], std=[37.61597558/255.0, 40.52679066/255.0, 38.00598526/255.0])
# normalize = transforms.Normalize(mean=[194.24576912, 186.99282001, 200.96679032], std=[37.61597558, 40.52679066, 38.00598526])
test_transform = transforms.Compose([transforms.ToTensor(), normalize])
# test_transform = transforms.Compose([transforms.ToTensor()])
return test_transform
def prepare_train_img(self, idx):
he_image, ph_image, target = self.he_data[idx], self.ph_data[idx], int(self.labels[idx])
# original image
# temp_data_1 = np.copy(he_image)
# temp_data_2 = np.copy(ph_image)
# letters = string.ascii_lowercase
# name = ''.join(random.choice(letters) for i in range(10))
# work_dir = '/yuanProject/XPath/relationnet_ds96_512_two_task_adam/original/'
# Image.fromarray(temp_data_1).save(work_dir + '{}_1.jpg'.format(name))
# Image.fromarray(temp_data_2).save(work_dir + '{}_2.jpg'.format(name))
he_pic = Image.fromarray(he_image.astype(np.uint8))
ph_pic = Image.fromarray(ph_image.astype(np.uint8))
he_image_list = list()
ph_image_list = list()
count = 0
for _ in range(self.repeat_augmentations):
# tempx = np.transpose(self.train_transform(ph_pic.copy()).cpu().detach().numpy(), (1, 2, 0))
# tempx = (tempx / np.amax(tempx) * 255).astype(np.uint8)
# # original image
# work_dir = '/home/yuan/self-supervised-relational-reasoning/temp/'
# Image.fromarray(tempx).save(work_dir + '{}.jpg'.format(name+'{}'.format(i)))
# import torch
# print(torch.std(self.train_he_transform(he_pic.copy())), torch.mean(self.train_he_transform(he_pic.copy())), torch.std(self.train_ph_transform(ph_pic.copy())), torch.mean(self.train_ph_transform(ph_pic.copy())))
# temp_data_1 = self.train_he_transform(he_pic.copy())
# temp_data_2 = self.train_ph_transform(ph_pic.copy())
# letters = string.ascii_lowercase
# name = ''.join(random.choice(letters) for i in range(10))
# work_dir = '/yuanProject/XPath/relationnet_ds96_512_two_task_adam/augmented/'
# Image.fromarray(((temp_data_1.detach().cpu().numpy() * np.asarray(HE_STAT[1])[:, None, None] + np.asarray(HE_STAT[0])[:, None, None]) * 255.0).astype(np.uint8).transpose((1, 2, 0))).save(work_dir + '{}_{}_1.jpg'.format(name, count))
# Image.fromarray(((temp_data_2.detach().cpu().numpy() * np.asarray(PH_STAT[1])[:, None, None] + np.asarray(PH_STAT[0])[:, None, None]) * 255.0).astype(np.uint8).transpose((1, 2, 0))).save(work_dir + '{}_{}_2.jpg'.format(name, count))
# he_image_list.append(temp_data_1)
# ph_image_list.append(temp_data_2)
# count = count + 1
he_image_list.append(self.train_he_transform(he_pic.copy()))
ph_image_list.append(self.train_ph_transform(ph_pic.copy()))
return he_image, ph_image, he_image_list, ph_image_list, target
def prepare_test_img(self, idx):
he_image, ph_image = self.he_data[idx], self.ph_data[idx]
he_pic = Image.fromarray(he_image.astype(np.uint8))
ph_pic = Image.fromarray(ph_image.astype(np.uint8))
he_image = self.test_he_transform(he_pic.copy())
ph_image = self.test_ph_transform(ph_pic.copy())
return he_image, ph_image
| [
"liangyuandg@g.ucla.edu"
] | liangyuandg@g.ucla.edu |
0d47b387ecdcd63cf06997e6b94745e34488e70f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /WBsxNXnqeFr4es9oh_11.py | 05d4b8426d27ac7a24fed8b4bc62b4249477a61b | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | """
In **Clockwise Cipher** , encoding is done by placing message characters in
the corner cells of a square and moving in a clockwise direction.
Create a function that takes an argument `message`, and returns the **encoded
message**.
There are some variations on the rules of encipherment. One version of the
cipher rules are outlined below:
message = "Mubashir Hassan"
clockwise_cipher(message) ➞ "Ms ussahr nHaaib"
**Step 1:** Form a square large enough to fit all the message characters.
Given message can fit in a 4 x 4 square.
**Step 2:** Starting with the top-left corner, place message characters in
the corner cells moving in a clockwise direction. After the first cycle is
complete, continue placing characters in the cells following the last one in
its respective row/column. When the outer cells are filled, continue for the
remaining inner squares:
M| s| | u
---|---|---|---
s| s| a| h
r| | n| H
a| a| i| b
**Step 3:** Return encoded message **Rows-wise** :
eMessage = "Ms ussahr nHaaib"
### Example for a 5 x 5 Square
[ 1 5 9 13 2]
[16 17 21 18 6]
[12 24 25 22 10]
[ 8 20 23 19 14]
[ 4 15 11 7 3]
### Examples
clockwise_cipher("Mubashir Hassan") ➞ "Ms ussahr nHaaib"
clockwise_cipher("Matt MacPherson") ➞ "M ParsoMc nhteat"
clockwise_cipher("Edabit is amazing") ➞ "Eisadng tm i zbia a"
### Notes
* Fill up any unused cells with a space character.
* Message can contain spaces and special characters.
"""
from math import ceil
def clockwise_cipher(message):
n = ceil(len(message)**0.5)
getcoords = lambda n: [[],[[0,0]]][n] if n<2 else \
sum(([[0,i],[i,n-1],[n-1,n-1-i],[n-1-i,0]] for i in range(n-1)),[])+[
[j+1 for j in i] for i in getcoords(n-2)]
coords,grid = getcoords(n),[[" "]*n for _ in range(n)]
for x in range(len(message)):
a,b = coords[x]; grid[a][b] = message[x]
return ''.join(sum(grid,[]))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
76818ccb8ef34285c0545906b6f4749009848e8a | 9b05a0000eaad6ce6edb9fd0cbbfb469d8925e5f | /flybywire_tk/core.py | 8b08f865122228879b824d62fcc6e26ecfd3e45f | [
"MIT"
] | permissive | thomasantony/flybywire-tk | e667f69d1203efb55fe6d9a1df35387280b4dd97 | 9bc1cd04ff7ef3b555e5a7cbaa8f592ea096d2e9 | refs/heads/master | 2020-05-25T11:32:12.686359 | 2017-03-04T07:29:50 | 2017-03-04T07:29:50 | 83,617,806 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,926 | py | """Core module"""
import abc
import asyncio as aio
import collections
import copy
import inspect
import tkinter
from tkinter import N,NE,E,SE,S,SW,W,NW
import dictdiffer
from dictdiffer import diff, patch
from dictdiffer.utils import dot_lookup
from misc import set_interval, clear_interval, AutoScrollbar
import ui
def T(name, content=None, **props):
"""Helper function for building components."""
return dict(_name=name, text=content, _props=props)
def parse_component_tree(root_node):
"""
Travereses and builds the component tree
Parameters
----------
root_comp : dict
Root component description generated using the T() function
"""
output = {}
comp_name = root_node['_name']
comp_props = root_node['_props']
if callable(comp_name):
if root_node['text'] is not None:
comp_node = comp_name(text=root_node['text'], **comp_props)
else:
comp_node = comp_name(**comp_props)
try:
comp_node.on_mount()
except:
pass
elif comp_name in ui.available_widgets:
comp_node = root_node
else:
raise ValueError('Widget not found : %s' % comp_name)
subnodes = comp_node['text']
if not isinstance(subnodes, str) and isinstance(subnodes, collections.Iterable):
output['text'] = [parse_component_tree(n) for n in subnodes]
return dict(collections.ChainMap(output, comp_node))
class FBWApplication(object):
"""The main Applicaton object."""
def __init__(self, title="flybywire-tk application", **kw):
self._root = tkinter.Tk()
self._root.title(title)
self.kw = kw
self._root_comp = None
self._comp_tree = None
self._old_tree = None
def invalidate(self):
self._dirty = True
def mount(self, component, **props):
"""Mounts the given component in the application."""
self._root_comp = component
self._root_comp.add_observer(self.invalidate)
self._build_app()
def _build_app(self):
"""Build the tkinter window with default settings."""
# create scroll bar
self.vscrollbar = AutoScrollbar(self._root)
self.vscrollbar.grid(row=0, column=1, sticky=N+S)
# create canvas
self.canvas = tkinter.Canvas(self._root,yscrollcommand=self.vscrollbar.set, bd=5)
self.canvas.grid(row=0, column=0, sticky=N+S+E+W)
# configure scroll bar for canvas
self.vscrollbar.config(command=self.canvas.yview)
# make the canvas expandable
self._root.grid_rowconfigure(0, weight=1)
self._root.grid_columnconfigure(0, weight=1)
# create frame in canvas
self._frame = tkinter.Frame(self.canvas)
self._frame.columnconfigure(0, weight=1)
self._frame.columnconfigure(1, weight=1)
self.render()
# After component creation
# puts tkinter widget onto canvas
self.canvas.create_window(0, 0, anchor=NW, window=self._frame, width = int(self.canvas.config()['width'][4])-int(self.vscrollbar.config()['width'][4]))
# deal with canvas being resized
def resize_canvas(event):
self.canvas.create_window(0, 0, anchor=NW, window=self._frame, width = int(event.width)-int(self.vscrollbar.config()['width'][4]))
self.canvas.bind("<Configure>", resize_canvas)
# updates geometry management
self._frame.update_idletasks()
# set canvas scroll region to all of the canvas
self.canvas.config(scrollregion=self.canvas.bbox("all"))
# set minimum window width
self._root.update()
self._root.minsize(self._root.winfo_width(), 0)
self._root.config(**self.kw)
self.is_running = True
self._frame.update()
def render_component(self, node, parent=None):
node_cls = ui.available_widgets[node['_name']]
if parent is None:
parent = self._frame
# Create component
comp_obj, update_fn = node_cls(parent, node['text'], node['_props'])
comp_obj.pack(side='top')
if not isinstance(node['text'], str) and isinstance(node['text'], collections.Iterable):
for sub_node in node['text']:
self.render_component(sub_node, comp_obj)
# Store component and update fn
node['_comp_obj'] = comp_obj
node['_comp_update'] = update_fn
def render(self):
# TODO: Fix it so only changed components are updated
# for widget in self._frame.winfo_children():
# widget.destroy()
# Recursively build component tree dict
root_tree = parse_component_tree(self._root_comp())
if self._old_tree is not None:
delta = diff(self._old_tree, root_tree, ignore=('_comp_obj','_comp_update'))
for diff_type, index, data in delta:
if isinstance(index, str) or len(index) == 1:
# Top-level change
old_node = self._old_tree
else:
# Sub-node change
old_node = dot_lookup(self._old_tree, index[:-1])
# Update node props
if diff_type == 'change':
update_fn = old_node['_comp_update']
new_node = dot_lookup(root_tree, index[:-1])
new_node['_comp_update'] = update_fn
new_node['_comp_obj'] = old_node['_comp_obj']
if update_fn is not None:
update_fn(text=new_node['text'], **new_node['_props'])
# # Copy component info
# Update new node in root tree (if new one created)
# patch([('change', index[:-1], (old_node2, new_node))], root_tree)
else:
render_list = [(root_tree, None)]
self.render_component(root_tree)
# TODO: Diff tree against existing tree
# Instantiate or modify components as needed (pack(), update() or destroy())
self._dirty = False
self._old_tree = root_tree
@aio.coroutine
def main_loop(self, loop=aio.get_event_loop()):
"""Run the tkinter event loop asynchronously."""
self._root_comp.on_mount()
while self.is_running:
# start mainloop
if self._dirty:
self.render()
self._root.update_idletasks()
self._root.update()
try:
yield from aio.sleep(.1)
except aio.CancelledError:
break
def start(self):
"""Start the application."""
loop = aio.get_event_loop()
def on_closing():
self.is_running = False
self._root.protocol("WM_DELETE_WINDOW", on_closing)
try:
loop.run_until_complete(self.main_loop())
except KeyboardInterrupt:
pass
finally:
loop.close()
class Component(object):
"""Class defining a UI component."""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.observers = []
@abc.abstractmethod
def __call__(self, props):
"""Component must implement this method."""
raise NotImplementedError()
def __str__(self):
return self.__class__.__name__.upper()
def on_mount(self):
pass
def on_unmount(self):
pass
def add_observer(self, obs):
self.observers.append(obs)
def update(self, **new_state):
for k,v in new_state.items():
setattr(self, k, v)
for obs in self.observers:
obs()
class TimerApp(Component):
def __init__(self):
"""Initialize the application."""
super().__init__()
self.secondsElapsed = 0
self.task = None
def __call__(self):
"""Renders view given application state."""
count = self.secondsElapsed
return T(TimerView, count=self.secondsElapsed)
def tick(self):
"""Increments counter."""
self.update(secondsElapsed = self.secondsElapsed + 1)
def on_mount(self):
"""
Triggers when the component is mounted
"""
self.task = set_interval(self.tick, 1)
def on_unmount(self):
"""
Triggers when the component is removed
"""
clear_interval(self.task)
class CounterApp(Component):
def __init__(self):
"""Initialize the application."""
super().__init__()
self.count = 0
def __call__(self):
"""Renders view given application state."""
return T('Frame', [
T('Label', str(self.count)),
T('Button','+', command=self.increment),
T('Button','-', command=self.decrement)
])
def increment(self):
self.update(count = self.count + 1)
def decrement(self):
self.update(count = self.count - 1)
def on_mount(self):
self.increment()
def TimerView(count=0):
return T('Label', 'Seconds Elapsed: '+str(count))
if __name__ == '__main__':
# Tests for component parsing
comps = T('Frame', [T(TimerView, count=1337), T(TimerView, count=6969)], align='center')
out = parse_component_tree(comps)
# print(out)
# assert out == {'_name': 'Frame',
# '_props': {'align': 'center'},
# 'text': [{'_name': 'Label',
# '_props': {},
# 'text': 'Seconds Elapsed: 1337'},
# {'_name': 'Label',
# '_props': {},
# 'text': 'Seconds Elapsed: 6969'}]}
#
# comps = T(TimerView, count=1337)
# assert parse_component_tree(comps) == {'_name': 'Label', 'text': 'Seconds Elapsed: 1337', '_props': {}}
fbw = FBWApplication()
# fbw.mount(TimerApp())
fbw.mount(CounterApp())
fbw.start()
| [
"tantony@purdue.edu"
] | tantony@purdue.edu |
a974511c8d4f4eea341e3475123871ddf39fcf49 | 24c5c46f1d281fc15de7f6b72a5148ae85f89fb4 | /SRC/demo/imooc/imooc_advanced/文件IO高效处理/part5.py | 95bf4363644347a2b13b050019262fd1169ef12e | [] | no_license | enterpriseih/easyTest | 22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0 | 43b8d294e898f25055c78313cfece2753352c250 | refs/heads/master | 2023-08-23T22:55:14.798341 | 2020-02-11T09:13:43 | 2020-02-11T09:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | # 如何访问文件的状态
# 文件的类型
# 文件的访问权限
# 文件的最后访问 修改 节点状态更改时间
# 普通文件的大小
#
# 方法一: 标准库中os模块下的三个系统调用stat,fstat,lstat获取文件状态
# os.stat 不取符号链接
# os.lstat 取符号链接
# os.fstat 需要一个打开的文件描述符
# open().fileno() #获取文件描述符
import os, stat,time
s = os.stat('demo.txt')
print(s.st_mode)
print(bin(s.st_mode))
print(stat.S_ISDIR(s.st_mode)) # 判断状态
print(stat.S_ISREG(s.st_mode))
# 获取文件权限
res=s.st_mode & stat.S_IRUSR #判断标志位 大于0就是真的
print(res)
#获取文件访问时间
print(time.localtime(s.st_atime))
#获取文件大小
res=s.st_size
print(res)
# 方法二:标准库中os.path下的一些函数
res=os.path.isdir('x.txt')
print(res)
res=os.path.islink('x.txt')
print(res)
| [
"yaolihui0506"
] | yaolihui0506 |
de7233c81973b09b53acb3c50b0fd57ce38459e3 | f52997ac7e1b41f34018c3a0028ced8638072b2b | /src/peoplefinder/test/views/test_profile_views.py | 7055716d96542fdced68803c3eae3b488cf6d244 | [
"MIT"
] | permissive | uktrade/digital-workspace-v2 | 49fae1fca819b625c6f6949fb5ce51b89fbcab96 | 7e328d0d55c9aa73be61f476823a743d96e792d0 | refs/heads/main | 2023-09-03T12:03:47.016608 | 2023-09-01T12:07:55 | 2023-09-01T12:07:55 | 232,302,840 | 6 | 0 | MIT | 2023-09-13T15:50:24 | 2020-01-07T10:41:18 | Python | UTF-8 | Python | false | false | 24,240 | py | from dataclasses import dataclass
from typing import Any, Optional
import pytest
from bs4 import BeautifulSoup
from django.contrib.auth.models import Group, Permission
from django.core.management import call_command
from django.db import models
from django.db.utils import IntegrityError
from django.test.client import Client
from django.urls import reverse
from pytest_django.asserts import assertContains
from networks.models import Network
from peoplefinder.forms.profile_edit import (
AdminProfileEditForm,
ContactProfileEditForm,
LocationProfileEditForm,
PersonalProfileEditForm,
SkillsProfileEditForm,
TeamsProfileEditForm,
TeamsProfileEditFormset,
)
from peoplefinder.forms.role import RoleFormsetForm
from peoplefinder.management.commands.create_people_finder_groups import (
PERSON_ADMIN_GROUP_NAME,
TEAM_ADMIN_GROUP_NAME,
)
from peoplefinder.models import (
AdditionalRole,
Grade,
KeySkill,
LearningInterest,
Person,
Profession,
Team,
UkStaffLocation,
Workday,
)
from peoplefinder.services.person import PersonService
from peoplefinder.test.factories import TeamFactory
from peoplefinder.types import EditSections
from user.models import User
from user.test.factories import UserFactory
@dataclass
class State:
client: Client
user: User
team: Team
person: Person
@pytest.fixture()
def state(db):
team = Team.objects.all().last()
if team == None:
team = TeamFactory()
user = UserFactory()
user.save()
person = PersonService().create_user_profile(user)
client = Client()
client.force_login(user)
return State(client=client, person=person, team=team, user=user)
def check_visible_button(state, test_url, button_title, codename):
response = state.client.get(test_url)
assert response.status_code == 200
assert button_title not in response.content
soup = BeautifulSoup(response.content, features="html.parser")
buttons = soup.find_all("a")
button_len = len(buttons)
edit_team_perm = Permission.objects.get(codename=codename)
state.user.user_permissions.add(edit_team_perm)
response = state.client.get(test_url)
assert response.status_code == 200
assert button_title in response.content
soup = BeautifulSoup(response.content, features="html.parser")
buttons = soup.find_all("a")
assert len(buttons) == button_len + 1
def check_permission(state, view_url, codename):
response = state.client.get(view_url)
assert response.status_code == 403
edit_profile_perm = Permission.objects.get(codename=codename)
state.user.user_permissions.add(edit_profile_perm)
state.user.save()
response = state.client.get(view_url)
assert response.status_code == 200
def test_edit_profile_personal(state):
edit_profile_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.PERSONAL.value,
},
)
response = state.client.get(edit_profile_url)
assert response.status_code == 200
def test_edit_profile_contact(state):
edit_profile_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.CONTACT.value,
},
)
response = state.client.get(edit_profile_url)
assert response.status_code == 200
def test_edit_profile_teams(state):
edit_profile_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.TEAMS.value,
},
)
response = state.client.get(edit_profile_url)
assert response.status_code == 200
def test_edit_profile_location(state):
edit_profile_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.LOCATION.value,
},
)
response = state.client.get(edit_profile_url)
assert response.status_code == 200
def test_edit_profile_skills(state):
edit_profile_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.SKILLS.value,
},
)
response = state.client.get(edit_profile_url)
assert response.status_code == 200
def test_edit_profile_admin_no_superuser(state):
edit_profile_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.ADMIN.value,
},
)
response = state.client.get(edit_profile_url)
assert response.status_code == 403
def test_edit_profile_admin_superuser(state):
PersonService.update_groups_and_permissions(
person=state.person,
is_person_admin=False,
is_team_admin=False,
is_superuser=True,
)
edit_profile_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.ADMIN.value,
},
)
response = state.client.get(edit_profile_url)
assert response.status_code == 200
def test_edit_team_permission(state):
edit_url = reverse(
"team-edit",
kwargs={
"slug": state.team.slug,
},
)
check_permission(state, edit_url, "change_team")
def test_add_sub_team_permission(state):
add_url = reverse(
"team-add-new-subteam",
kwargs={
"slug": state.team.slug,
},
)
check_permission(state, add_url, "add_team")
def test_delete_team_permission(state):
add_url = reverse(
"team-delete",
kwargs={
"slug": state.team.slug,
},
)
check_permission(state, add_url, "delete_team")
def test_edit_profile_visible(state):
view_url = reverse(
"profile-view",
kwargs={
"profile_slug": state.person.slug,
},
)
response = state.client.get(view_url)
assertContains(response, "Edit profile", html=True)
def test_edit_team_visible_permission(state):
view_url = reverse(
"team-view",
kwargs={
"slug": state.team.slug,
},
)
check_visible_button(state, view_url, b"Edit team", "change_team")
def test_delete_team_visible_permission(state):
view_url = reverse(
"team-view",
kwargs={
"slug": state.team.slug,
},
)
check_visible_button(state, view_url, b"Delete team", "delete_team")
def test_create_sub_team_visible_permission(state):
view_url = reverse(
"team-view",
kwargs={
"slug": state.team.slug,
},
)
check_visible_button(state, view_url, b"Add new sub-team", "add_team")
def test_team_log_visible_permission(state):
view_url = reverse(
"team-view",
kwargs={
"slug": state.team.slug,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
title = b"Audit log"
assert title not in response.content
soup = BeautifulSoup(response.content, features="html.parser")
log_detail = soup.find_all(attrs={"data-module": "govuk-details"})
log_detail_len = len(log_detail)
team_admin_group = Group.objects.get(name="Team Admin") # /PS-IGNORE
state.user.groups.add(team_admin_group)
response = state.client.get(view_url)
assert response.status_code == 200
assert title in response.content
soup = BeautifulSoup(response.content, features="html.parser")
log_detail = soup.find_all(attrs={"data-module": "govuk-details"})
assert len(log_detail) == log_detail_len + 1
def test_self_profile_log_visible_permission(state):
view_url = reverse(
"profile-view",
kwargs={
"profile_slug": state.person.slug,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
title = b"Audit log"
assert title in response.content
def test_profile_log_visible_permission(state):
other_user = UserFactory(username="other_user", legacy_sso_user_id="other_user")
other_user.save()
other_person = PersonService().create_user_profile(other_user)
view_url = reverse(
"profile-view",
kwargs={
"profile_slug": other_person.slug,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
title = b"Audit log"
assert title not in response.content
soup = BeautifulSoup(response.content, features="html.parser")
log_detail = soup.find_all(attrs={"data-module": "govuk-details"})
log_detail_len = len(log_detail)
view_log_perm = Permission.objects.get(codename="view_auditlog")
state.user.user_permissions.add(view_log_perm)
response = state.client.get(view_url)
assert response.status_code == 200
assert title in response.content
soup = BeautifulSoup(response.content, features="html.parser")
log_detail = soup.find_all(attrs={"data-module": "govuk-details"})
assert len(log_detail) == log_detail_len + 2
def test_profile_detail_view(state):
view_url = reverse(
"profile-view",
kwargs={
"profile_slug": state.person.slug,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
def test_cannot_be_own_manager(state):
assert state.person.manager is None
state.person.save()
state.person.manager = state.person
with pytest.raises(IntegrityError):
state.person.save()
def get_payload_value(value) -> Any:
if obj_id := getattr(value, "id", None):
value = obj_id
elif isinstance(value, list) or isinstance(value, models.QuerySet):
value = [get_payload_value(item) for item in value]
return value
def payload_from_cleaned_data(form) -> dict:
payload = {}
for key, value in form.cleaned_data.items():
if value:
if form.prefix:
payload[f"{form.prefix}-{key}"] = get_payload_value(value)
else:
payload[key] = get_payload_value(value)
return payload
def test_profile_edit_personal_view(state):
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.PERSONAL.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
assert state.person.first_name is "Jane"
assert state.person.last_name is "Smith"
assert state.person.pronouns is None
assert state.person.name_pronunciation is None
form = PersonalProfileEditForm(
{
"first_name": "Jane",
"last_name": "Smith",
"pronouns": "she/her",
"name_pronunciation": "Jay-n Smi-th",
},
instance=state.person,
)
assert form.is_valid()
# Need to remove items with no value with cleaned data in order that "POST" will work
payload = payload_from_cleaned_data(form)
response = state.client.post(view_url, payload)
assert response.status_code == 302
assert response.url == view_url
assert state.person.first_name == "Jane"
assert state.person.last_name == "Smith"
assert state.person.pronouns == "she/her"
assert state.person.name_pronunciation == "Jay-n Smi-th"
def test_profile_edit_contact_view(state):
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.CONTACT.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
assert state.person.contact_email is None
assert state.person.primary_phone_number is None
assert state.person.secondary_phone_number is None
form = ContactProfileEditForm(
{
"contact_email": "jane.smith@test.com",
"primary_phone_number": "01234567890",
"secondary_phone_number": "09876543210",
},
instance=state.person,
)
assert form.is_valid()
# Need to remove items with no value with cleaned data in order that "POST" will work
payload = payload_from_cleaned_data(form)
response = state.client.post(view_url, payload)
assert response.status_code == 302
assert response.url == view_url
assert state.person.contact_email == "jane.smith@test.com"
assert state.person.primary_phone_number == "01234567890"
assert state.person.secondary_phone_number == "09876543210"
def test_profile_edit_teams_view(state):
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.TEAMS.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
assert state.person.contact_email is None
assert state.person.primary_phone_number is None
assert state.person.secondary_phone_number is None
grade = Grade.objects.all().first()
form = TeamsProfileEditForm(
{
"grade": grade,
"do_not_work_for_dit": True,
},
instance=state.person,
)
form.is_valid()
assert form.is_valid()
# Need to remove items with no value with cleaned data in order that "POST" will work
payload = {
"teams-TOTAL_FORMS": "0",
"teams-INITIAL_FORMS": "0",
}
payload.update(**payload_from_cleaned_data(form))
response = state.client.post(view_url, payload)
assert response.status_code == 302
assert response.url == view_url
assert state.person.grade == grade
assert state.person.manager is None
assert state.person.do_not_work_for_dit is True
def test_profile_edit_teams_formset_view(state):
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.TEAMS.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
assert state.person.roles.count() == 0
form = TeamsProfileEditForm(
{},
instance=state.person,
)
assert form.is_valid()
prefix = "teams-0"
teams_formset_form = RoleFormsetForm(
{
f"{prefix}-person": state.person,
f"{prefix}-team": state.team,
f"{prefix}-job_title": "Job title",
f"{prefix}-head_of_team": False,
f"{prefix}-DELETE": False,
},
prefix="teams-0",
)
teams_formset_form.is_valid()
assert teams_formset_form.is_valid()
# Need to remove items with no value with cleaned data in order that "POST" will work
payload = {
"teams-TOTAL_FORMS": "1",
"teams-INITIAL_FORMS": "0",
}
payload.update(**payload_from_cleaned_data(form))
payload.update(**payload_from_cleaned_data(teams_formset_form))
response = state.client.post(view_url, payload)
assert response.status_code == 302
assert response.url == view_url
assert state.person.roles.count() == 1
role = state.person.roles.first()
assert role.person == state.person
assert role.team == state.team
assert role.job_title == "Job title"
assert role.head_of_team is False
def test_profile_edit_location_view(state):
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.LOCATION.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
assert state.person.uk_office_location is None
assert state.person.remote_working is None
assert state.person.location_in_building is None
assert state.person.international_building is None
assert state.person.workdays.count() == 0
uk_office_location = UkStaffLocation.objects.all().first()
workday = Workday.objects.all().first()
form = LocationProfileEditForm(
{
"uk_office_location": uk_office_location,
"remote_working": Person.RemoteWorking.OFFICE_WORKER.value,
"location_in_building": "3rd floor",
"international_building": "international",
"workdays": [workday],
},
instance=state.person,
)
assert form.is_valid()
# Need to remove items with no value with cleaned data in order that "POST" will work
payload = payload_from_cleaned_data(form)
response = state.client.post(view_url, payload)
assert response.status_code == 302
assert response.url == view_url
assert state.person.uk_office_location == uk_office_location
assert state.person.remote_working == Person.RemoteWorking.OFFICE_WORKER
assert state.person.location_in_building == "3rd floor"
assert state.person.international_building == "international"
assert state.person.workdays.count() == 1
assert state.person.workdays.first() == workday
def test_profile_edit_skills_view(state):
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.SKILLS.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
assert state.person.key_skills.count() == 0
assert state.person.other_key_skills is None
assert state.person.fluent_languages is None
assert state.person.intermediate_languages is None
assert state.person.learning_interests.count() == 0
assert state.person.other_learning_interests is None
assert state.person.networks.count() == 0
assert state.person.professions.count() == 0
assert state.person.additional_roles.count() == 0
assert state.person.other_additional_roles is None
assert state.person.previous_experience is None
key_skills = [KeySkill.objects.all().first()]
learning_interests = [LearningInterest.objects.all().first()]
networks = []
# networks = [Network.objects.all().first()]
professions = [Profession.objects.all().first()]
additional_roles = [AdditionalRole.objects.all().first()]
form = SkillsProfileEditForm(
{
"key_skills": key_skills,
"other_key_skills": "Other key skills",
"fluent_languages": "French",
"intermediate_languages": "Italian",
"learning_interests": learning_interests,
"other_learning_interests": "Other learning interests",
"networks": networks,
"professions": professions,
"additional_roles": additional_roles,
"other_additional_roles": "Other additional roles",
"previous_experience": "Previous experience",
},
instance=state.person,
)
assert form.is_valid()
# Need to remove items with no value with cleaned data in order that "POST" will work
payload = payload_from_cleaned_data(form)
response = state.client.post(view_url, payload)
assert response.status_code == 302
assert response.url == view_url
assert state.person.key_skills.count() == 1
assert state.person.other_key_skills == "Other key skills"
assert state.person.fluent_languages == "French"
assert state.person.intermediate_languages == "Italian"
assert state.person.learning_interests.count() == 1
assert state.person.other_learning_interests == "Other learning interests"
assert state.person.networks.count() == 0
# assert state.person.networks.count() == 1
assert state.person.professions.count() == 1
assert state.person.additional_roles.count() == 1
assert state.person.other_additional_roles == "Other additional roles"
assert state.person.previous_experience == "Previous experience"
def test_profile_edit_admin_view(state):
PersonService.update_groups_and_permissions(
person=state.person,
is_person_admin=False,
is_team_admin=False,
is_superuser=True,
)
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.ADMIN.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
assert state.person.user.is_superuser == True
assert (
state.person.user.groups.filter(name=PERSON_ADMIN_GROUP_NAME).exists() == False
)
assert state.person.user.groups.filter(name=TEAM_ADMIN_GROUP_NAME).exists() == False
form = AdminProfileEditForm(
{
"is_person_admin": True,
"is_team_admin": True,
"is_superuser": False,
},
instance=state.person,
request_user=state.user,
)
assert form.is_valid()
# Need to remove items with no value with cleaned data in order that "POST" will work
payload = payload_from_cleaned_data(form)
response = state.client.post(view_url, payload)
assert response.status_code == 302
assert response.url == view_url
state.person.user.refresh_from_db()
assert state.person.user.groups.filter(name=PERSON_ADMIN_GROUP_NAME).exists()
assert state.person.user.groups.filter(name=TEAM_ADMIN_GROUP_NAME).exists()
assert state.person.user.is_superuser == False
def test_user_admin_no_superuser(state):
PersonService.update_groups_and_permissions(
person=state.person,
is_person_admin=False,
is_team_admin=False,
is_superuser=False,
)
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.PERSONAL.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
soup = BeautifulSoup(response.content, features="html.parser")
assert not soup.find(
lambda tag: tag.name == "a" and "Administer profile" in tag.text
)
def test_user_admin_with_superuser(state):
PersonService.update_groups_and_permissions(
person=state.person,
is_person_admin=False,
is_team_admin=False,
is_superuser=True,
)
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.PERSONAL.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
soup = BeautifulSoup(response.content, features="html.parser")
assert soup.find(lambda tag: tag.name == "a" and "Administer profile" in tag.text)
def test_user_admin_no_superuser_but_team_person_admin(state):
PersonService.update_groups_and_permissions(
person=state.person,
is_person_admin=True,
is_team_admin=True,
is_superuser=False,
)
view_url = reverse(
"profile-edit-section",
kwargs={
"profile_slug": state.person.slug,
"edit_section": EditSections.PERSONAL.value,
},
)
response = state.client.get(view_url)
assert response.status_code == 200
soup = BeautifulSoup(response.content, features="html.parser")
assert not soup.find(
lambda tag: tag.name == "a" and "Administer profile" in tag.text
)
class TestProfileUpdateUserView:
def _update_user(self, client, profile, new_user):
return client.post(
reverse("profile-update-user", kwargs={"profile_slug": profile.slug}),
{"username": new_user.username},
)
def test_swap_user(self, normal_user, state):
john = normal_user
john_profile = john.profile
jane = state.user
jane_profile = jane.profile
assert john.profile == john_profile
self._update_user(state.client, jane.profile, john)
john.refresh_from_db()
jane.refresh_from_db()
assert john.profile == jane_profile
assert not hasattr(jane, "profile")
| [
"noreply@github.com"
] | uktrade.noreply@github.com |
7534586b26049b6a1df685966ea721bd74d2fd71 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/86/usersdata/236/58743/submittedfiles/pico.py | abe26e0bc7e8432854b8628ab529927705d3a298 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # -*- coding: utf-8 -*-
def pico (lista):
def cresncente (lista):
i=1
cont1=0
while lista[i]>lista[i-1]:
i=i+1
cont1=cont1+1
def decrescente (lista):
while lista[i]<lista[i-1]:
i=i+1
cont2=cont2+1
if (cont1+cont2)==(len(lista)-1):
print('S')
else:
print('N')
n = int(input('Digite a quantidade de elementos da lista: '))
A=[]
for i in range (1,n+1,1):
numero= float(input('numero:'))
A.append (numero)
pico(A)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ddcdc27572a19ef988a11713433e4beec21812b3 | 3665ef60ba2cde4c13a8d192b8779a2f43dd9980 | /python/ray/tests/kubernetes_e2e/test_k8s_operator_scaling.py | 1d2511685df5490cf9f6750448cdd343a5b8495f | [
"MIT",
"Apache-2.0"
] | permissive | QPC-database/amazon-ray | 45783aa27977d7afda0bfa1b076be414f5dd1328 | 55aa4cac02a412b96252aea4e8c3f177a28324a1 | refs/heads/main | 2023-06-12T17:07:49.202429 | 2021-07-08T17:58:32 | 2021-07-08T17:58:32 | 384,644,975 | 1 | 0 | Apache-2.0 | 2021-07-10T08:05:02 | 2021-07-10T08:05:02 | null | UTF-8 | Python | false | false | 6,571 | py | """
Tests scaling behavior of Kubernetes operator.
(1) Start a cluster with minWorkers = 30 and verify scale-up
(2) Edit minWorkers to 0, verify scale-down
(3) Submit a task requiring 14 workers using Ray client
(4) Verify scale-up, task execution, and scale down.
"""
import copy
import kubernetes
import subprocess
import sys
import tempfile
import time
import unittest
import pytest
import yaml
import ray
from test_k8s_operator_basic import client_connect_to_k8s
from test_k8s_operator_basic import get_crd_path
from test_k8s_operator_basic import get_component_config_path
from test_k8s_operator_basic import retry_until_true
from test_k8s_operator_basic import wait_for_pods
from test_k8s_operator_basic import IMAGE
from test_k8s_operator_basic import PULL_POLICY
from test_k8s_operator_basic import NAMESPACE
def submit_scaling_job(num_tasks):
@ray.remote(num_cpus=1)
def f(i):
time.sleep(60)
return i
print(">>>Submitting tasks with Ray client.")
futures = [f.remote(i) for i in range(num_tasks)]
print(">>>Verifying scale-up.")
# Expect as many pods as tasks.
# (each Ray pod has 1 CPU)
wait_for_pods(num_tasks)
print(">>>Waiting for task output.")
task_output = ray.get(futures, timeout=360)
assert task_output == list(range(num_tasks)), "Tasks did not"\
"complete with expected output."
@retry_until_true
def wait_for_operator():
cmd = "kubectl get pods"
out = subprocess.check_output(cmd, shell=True).decode()
for line in out.splitlines():
if "ray-operator" in line and "Running" in line:
return True
return False
class KubernetesScaleTest(unittest.TestCase):
def test_scaling(self):
with tempfile.NamedTemporaryFile("w+") as example_cluster_file, \
tempfile.NamedTemporaryFile("w+") as example_cluster_file2, \
tempfile.NamedTemporaryFile("w+") as operator_file:
example_cluster_config_path = get_component_config_path(
"example_cluster.yaml")
operator_config_path = get_component_config_path(
"operator_cluster_scoped.yaml")
operator_config = list(
yaml.safe_load_all(open(operator_config_path).read()))
example_cluster_config = yaml.safe_load(
open(example_cluster_config_path).read())
# Set image and pull policy
podTypes = example_cluster_config["spec"]["podTypes"]
pod_specs = [operator_config[-1]["spec"]["template"]["spec"]] + [
podType["podConfig"]["spec"] for podType in podTypes
]
for pod_spec in pod_specs:
pod_spec["containers"][0]["image"] = IMAGE
pod_spec["containers"][0]["imagePullPolicy"] = PULL_POLICY
# Config set-up for this test.
example_cluster_config["spec"]["maxWorkers"] = 100
example_cluster_config["spec"]["idleTimeoutMinutes"] = 1
worker_type = podTypes[1]
# Make sure we have the right type
assert "worker" in worker_type["name"]
worker_type["maxWorkers"] = 100
# Key for the first part of this test:
worker_type["minWorkers"] = 30
# Config for a small cluster with the same name to be launched
# in another namespace.
example_cluster_config2 = copy.deepcopy(example_cluster_config)
example_cluster_config2["spec"]["podTypes"][1]["minWorkers"] = 1
# Test overriding default client port.
example_cluster_config["spec"]["headServicePorts"] = [{
"name": "client",
"port": 10002,
"targetPort": 10001
}]
yaml.dump(example_cluster_config, example_cluster_file)
yaml.dump(example_cluster_config2, example_cluster_file2)
yaml.dump_all(operator_config, operator_file)
files = [example_cluster_file, operator_file]
for file in files:
file.flush()
print(">>>Creating operator.")
cmd = f"kubectl apply -f {operator_file.name}"
subprocess.check_call(cmd, shell=True)
# Test creating operator before CRD.
print(">>>Waiting for Ray operator to enter running state.")
wait_for_operator()
print(">>>Creating RayCluster CRD.")
cmd = f"kubectl apply -f {get_crd_path()}"
subprocess.check_call(cmd, shell=True)
# Takes a bit of time for CRD to register.
time.sleep(10)
# Start a 30-pod cluster.
print(">>>Starting a cluster.")
cd = f"kubectl -n {NAMESPACE} apply -f {example_cluster_file.name}"
subprocess.check_call(cd, shell=True)
print(">>>Starting a cluster with same name in another namespace")
# Assumes a namespace called {NAMESPACE}2 has been created.
cd = f"kubectl -n {NAMESPACE}2 apply -f "\
f"{example_cluster_file2.name}"
subprocess.check_call(cd, shell=True)
# Check that autoscaling respects minWorkers by waiting for
# 32 pods in one namespace and 2 pods in the other.
print(">>>Waiting for pods to join cluster.")
wait_for_pods(31)
wait_for_pods(2, namespace=f"{NAMESPACE}2")
# Check scale-down.
print(">>>Decreasing min workers to 0.")
example_cluster_edit = copy.deepcopy(example_cluster_config)
# Set minWorkers to 0:
example_cluster_edit["spec"]["podTypes"][1]["minWorkers"] = 0
yaml.dump(example_cluster_edit, example_cluster_file)
example_cluster_file.flush()
cm = f"kubectl -n {NAMESPACE} apply -f {example_cluster_file.name}"
subprocess.check_call(cm, shell=True)
print(">>>Sleeping for a minute while workers time-out.")
time.sleep(60)
print(">>>Verifying scale-down.")
wait_for_pods(1)
with client_connect_to_k8s(port="10002"):
# Test scale up and scale down after task submission.
submit_scaling_job(num_tasks=15)
print(">>>Sleeping for a minute while workers time-out.")
time.sleep(60)
print(">>>Verifying scale-down.")
wait_for_pods(1)
if __name__ == "__main__":
kubernetes.config.load_kube_config()
sys.exit(pytest.main(["-sv", __file__]))
| [
"noreply@github.com"
] | QPC-database.noreply@github.com |
cdda202c4def7eaeb7606d6be5eee7deea8ba86d | bd8532378ad2a61240faaa7be8ef44c60c055a2a | /rabona/data/leagues/Calcio B/Bari/Bari.py | cf848ea6f2baeef94433eab77e69f1ccb6c6dea5 | [] | no_license | nosoyyo/rabona | 278a9dfe158e342261343b211fb39b911e993803 | b0af3ab5806675fbf81b038633a74943118a67bb | refs/heads/master | 2020-03-16T06:56:55.277293 | 2018-05-30T11:45:51 | 2018-05-30T11:45:51 | 132,565,989 | 2 | 1 | null | 2018-05-30T11:45:52 | 2018-05-08T06:44:11 | Python | UTF-8 | Python | false | false | 11,218 | py | club_info = {'club_url': 'https://www.futbin.com///18/leagues/Calcio%20B?page=1&club=1848', 'club_logo': 'https://cdn.futbin.com/content/fifa18/img/clubs/1848.png', 'club_name': 'Bari'}
players = {}
players['Brienza'] = {'player_url': 'https://www.futbin.com//18/player/1687/Franco Brienza', 'player_name': 'Franco Brienza', 'player_rating': '75', 'player_shortname': 'Brienza', 'player_position': 'RW', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/137199.png?v=2'}
players['Oikonomou'] = {'player_url': 'https://www.futbin.com//18/player/16667/Marios Oikonomou', 'player_name': 'Marios Oikonomou', 'player_rating': '74', 'player_shortname': 'Oikonomou', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/22.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/216523.png?v=2'}
players['Marrone'] = {'player_url': 'https://www.futbin.com//18/player/2839/Luca Marrone', 'player_name': 'Luca Marrone', 'player_rating': '73', 'player_shortname': 'Marrone', 'player_position': 'CDM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/194753.png?v=2'}
players['Micai'] = {'player_url': 'https://www.futbin.com//18/player/3100/Alessandro Micai', 'player_name': 'Alessandro Micai', 'player_rating': '73', 'player_shortname': 'Micai', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/224977.png?v=2'}
players['Gyömbér'] = {'player_url': 'https://www.futbin.com//18/player/3631/Norbert Gyömbér', 'player_name': 'Norbert Gyömbér', 'player_rating': '72', 'player_shortname': 'Gyömbér', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/43.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/220061.png?v=2'}
players['Floro Flores'] = {'player_url': 'https://www.futbin.com//18/player/3167/Antonio Floro Flores', 'player_name': 'Antonio Floro Flores', 'player_rating': '72', 'player_shortname': 'Floro Flores', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/34824.png?v=2'}
players['Kozák'] = {'player_url': 'https://www.futbin.com//18/player/3362/Libor Kozák', 'player_name': 'Libor Kozák', 'player_rating': '72', 'player_shortname': 'Kozák', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/12.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/188871.png?v=2'}
players['Cassani'] = {'player_url': 'https://www.futbin.com//18/player/3185/Mattia Cassani', 'player_name': 'Mattia Cassani', 'player_rating': '72', 'player_shortname': 'Cassani', 'player_position': 'LB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/137380.png?v=2'}
players['Galano'] = {'player_url': 'https://www.futbin.com//18/player/4160/Cristian Galano', 'player_name': 'Cristian Galano', 'player_rating': '71', 'player_shortname': 'Galano', 'player_position': 'RW', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/203672.png?v=2'}
players['Martinho'] = {'player_url': 'https://www.futbin.com//18/player/4090/Raphael Martinho', 'player_name': 'Raphael Martinho', 'player_rating': '71', 'player_shortname': 'Martinho', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/54.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/199205.png?v=2'}
players['Basha'] = {'player_url': 'https://www.futbin.com//18/player/3990/Migjen Basha', 'player_name': 'Migjen Basha', 'player_rating': '71', 'player_shortname': 'Basha', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/1.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/189779.png?v=2'}
players['Nenê'] = {'player_url': 'https://www.futbin.com//18/player/4622/Nenê', 'player_name': 'Nenê', 'player_rating': '70', 'player_shortname': 'Nenê', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/54.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/178264.png?v=2'}
players['Greco'] = {'player_url': 'https://www.futbin.com//18/player/4532/Leandro Greco', 'player_name': 'Leandro Greco', 'player_rating': '70', 'player_shortname': 'Greco', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/144511.png?v=2'}
players['Andrada'] = {'player_url': 'https://www.futbin.com//18/player/17200/Federico Andrada', 'player_name': 'Federico Andrada', 'player_rating': '69', 'player_shortname': 'Andrada', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/52.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/208158.png?v=2'}
players['Tonucci'] = {'player_url': 'https://www.futbin.com//18/player/5567/Denis Tonucci', 'player_name': 'Denis Tonucci', 'player_rating': '69', 'player_shortname': 'Tonucci', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/184512.png?v=2'}
players['D'Elia'] = {'player_url': "https://www.futbin.com//18/player/5732/Salvatore D'Elia", 'player_name': "Salvatore D'Elia", 'player_rating': '69', 'player_shortname': "D'Elia", 'player_position': 'LB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/201408.png?v=2'}
players['Morleo'] = {'player_url': 'https://www.futbin.com//18/player/6359/Archimede Morleo', 'player_name': 'Archimede Morleo', 'player_rating': '68', 'player_shortname': 'Morleo', 'player_position': 'LB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/192823.png?v=2'}
players['Tello'] = {'player_url': 'https://www.futbin.com//18/player/6770/Andres Tello', 'player_name': 'Andres Tello', 'player_rating': '68', 'player_shortname': 'Tello', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/56.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/222573.png?v=2'}
players['Busellato'] = {'player_url': 'https://www.futbin.com//18/player/7864/Massimiliano Busellato', 'player_name': 'Massimiliano Busellato', 'player_rating': '67', 'player_shortname': 'Busellato', 'player_position': 'CDM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/205663.png?v=2'}
players['Salzano'] = {'player_url': 'https://www.futbin.com//18/player/8691/Aniello Salzano', 'player_name': 'Aniello Salzano', 'player_rating': '66', 'player_shortname': 'Salzano', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/224805.png?v=2'}
players['Improta'] = {'player_url': 'https://www.futbin.com//18/player/8408/Riccardo Improta', 'player_name': 'Riccardo Improta', 'player_rating': '66', 'player_shortname': 'Improta', 'player_position': 'RW', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/210833.png?v=2'}
players['Sabelli'] = {'player_url': 'https://www.futbin.com//18/player/8330/Stefano Sabelli', 'player_name': 'Stefano Sabelli', 'player_rating': '66', 'player_shortname': 'Sabelli', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/206159.png?v=2'}
players['Fiamozzi'] = {'player_url': 'https://www.futbin.com//18/player/8409/Riccardo Fiamozzi', 'player_name': 'Riccardo Fiamozzi', 'player_rating': '66', 'player_shortname': 'Fiamozzi', 'player_position': 'RM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/210880.png?v=2'}
players['Capradossi'] = {'player_url': 'https://www.futbin.com//18/player/9364/Elio Capradossi', 'player_name': 'Elio Capradossi', 'player_rating': '65', 'player_shortname': 'Capradossi', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/215376.png?v=2'}
players['Cissé'] = {'player_url': 'https://www.futbin.com//18/player/8989/Karamoko Cissé', 'player_name': 'Karamoko Cissé', 'player_rating': '65', 'player_shortname': 'Cissé', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/118.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/178150.png?v=2'}
players['Henderson'] = {'player_url': 'https://www.futbin.com//18/player/17395/Liam Henderson', 'player_name': 'Liam Henderson', 'player_rating': '64', 'player_shortname': 'Henderson', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/42.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/219952.png?v=2'}
players['Empereur'] = {'player_url': 'https://www.futbin.com//18/player/16804/Empereur', 'player_name': 'Empereur', 'player_rating': '63', 'player_shortname': 'Empereur', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/54.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/227705.png?v=2'}
players['Petriccione'] = {'player_url': 'https://www.futbin.com//18/player/11847/Jacopo Petriccione', 'player_name': 'Jacopo Petriccione', 'player_rating': '62', 'player_shortname': 'Petriccione', 'player_position': 'CDM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/225381.png?v=2'}
players['Scalera'] = {'player_url': 'https://www.futbin.com//18/player/12930/Giuseppe Scalera', 'player_name': 'Giuseppe Scalera', 'player_rating': '60', 'player_shortname': 'Scalera', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/225015.png?v=2'}
players['De Lucia'] = {'player_url': 'https://www.futbin.com//18/player/14923/Victor De Lucia', 'player_name': 'Victor De Lucia', 'player_rating': '53', 'player_shortname': 'De Lucia', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/27.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/225521.png?v=2'}
| [
"oyyoson@gmail.com"
] | oyyoson@gmail.com |
8a7738f0d32537b2f0994883b51fa8ae1b0e8c30 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-hilens/huaweicloudsdkhilens/v3/model/delete_secret_response.py | aa4df97a980ce2ea5cf944f3cadf638e76878e29 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,211 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteSecretResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'secret': 'SecretId'
}
attribute_map = {
'secret': 'secret'
}
def __init__(self, secret=None):
"""DeleteSecretResponse
The model defined in huaweicloud sdk
:param secret:
:type secret: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
super(DeleteSecretResponse, self).__init__()
self._secret = None
self.discriminator = None
if secret is not None:
self.secret = secret
@property
def secret(self):
"""Gets the secret of this DeleteSecretResponse.
:return: The secret of this DeleteSecretResponse.
:rtype: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
return self._secret
@secret.setter
def secret(self, secret):
"""Sets the secret of this DeleteSecretResponse.
:param secret: The secret of this DeleteSecretResponse.
:type secret: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
self._secret = secret
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteSecretResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
0c498980bf25b8dfbd71c2f2472b890eb46bed3f | ac33e7a30131db58f0e72c9bf1f79cd34a38d335 | /manufacturing/doctype/stability_product_specification/stability_product_specification.py | 1292b97230dcb653d9bc46cc130feb66b19ebaa7 | [] | no_license | mbhavesh95863/erpnext | 395d545292c67cc5d6d7be3029d03245c754d984 | d6c490e4a404235abe9b4d541de1bbb53ba32949 | refs/heads/master | 2020-03-26T20:03:45.620397 | 2018-08-19T12:46:43 | 2018-08-19T12:46:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import cint, cstr, flt
class StabilityProductSpecification(Document):
def autoname(self):
names = frappe.db.sql_list("""select name from `tabStability Product Specification` where item_code=%s""", self.item_code)
if names:
# name can be BOM/ITEM/001, BOM/ITEM/001-1, BOM-ITEM-001, BOM-ITEM-001-1
# split by item
names = [name.split(self.item_code)[-1][1:] for name in names]
# split by (-) if cancelled
names = [cint(name.split('/')[-1]) for name in names]
idx = max(names) + 1
else:
idx = 1
self.name = 'SPS/' + self.item_code + ('/%.3i' % idx)
self.spstp = 'SPSTP/' + self.item_code + ('/%.3i' % idx) | [
"erpnextdeveloper1@gmail.com"
] | erpnextdeveloper1@gmail.com |
5da051244616584e88c4cd12cafd5e1ee72871da | 8f88c31a3218646e61d9891b230a289621066faa | /rcbfp/apps.py | 77601995314f45e92d5605b7bca8b30a790d5f1f | [] | no_license | jorgec/rcbfp-backend-web | e72fdc203b4ab22127f82255faec63ba6a28703f | 5472a82538494fe897d5379e844dcbb0d0d8f041 | refs/heads/master | 2021-01-26T11:54:19.672898 | 2020-02-22T07:36:36 | 2020-02-22T07:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | # Application definition
INSTALLED_APPS = [
]
INSTALLED_APPS += [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize'
]
# Logging and debugging
INSTALLED_APPS += [
'debug_toolbar',
]
INSTALLED_APPS += [
'django_extensions',
'django_filters'
]
INSTALLED_APPS += [
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
]
# Utilities
INSTALLED_APPS += [
]
# Template Tags
INSTALLED_APPS += [
]
# Common Scaffold
INSTALLED_APPS += [
'crispy_forms',
'phonenumber_field',
]
# Core
INSTALLED_APPS += [
'locations',
'datesdim',
'accounts',
'profiles',
'buildings',
'business',
'checklists',
'incidents',
'admin_dashboards',
] | [
"markjungersaniva@gmail.com"
] | markjungersaniva@gmail.com |
8b5b3e59563bc105597e183f6fea7f1ef9ddadf0 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /055_modules/001_modules/examples/Python 3 Most Nessesary/12.2.Listing 12.15. The contents of the main program.py | 47eabaf9e133a17462e0bf428724e32c448b96d1 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 366 | py | # -*- coding: utf-8 -*-
from module1 import *
from module2 import *
import module1, module2
print(s) # Выведет: "Значение из модуля module2"
print(module1.s) # Выведет: "Значение из модуля module1"
print(module2.s) # Выведет: "Значение из модуля module2"
input() | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
7f23b5c699cad4f4a7f30661ea327d0201a9dfe6 | bd87d8947878ccb2f5b720e70a22493b00868fd3 | /bee/01basics/basics.py | 5c8af0d21a92dd0a4306103a5277afc532240e41 | [] | no_license | damiansp/completePython | 4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac | 3f5e2f14d79c93df5147b82d901190c054535158 | refs/heads/master | 2023-09-01T20:50:03.444440 | 2023-08-28T00:27:57 | 2023-08-28T00:27:57 | 99,197,610 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #!/usr/bin/env python3
import cmath # complex math
# Getting Input from User
meaning = input('The meaning of life: ')
print('%s, is it?' %meaning)
x = int(input('x: ')) # 3 only?
y = int(input('y: '))
print('xy = %d' %(x * y))
# cmath and Complex Numbers
print(cmath.sqrt(-1))
print((1 + 3j) * (9 + 4j))
#name = raw_input('What is your name? > ') # 2 only
#print('Why, hello, ' + name, + '!')
# String Representations
print("Hello, world!")
print(repr("Hello, world!"))
#print(str(10000L)) # error in 3
#print(repr(10000L)) # error in 3
temp = 42
print('The temperature is ' + str(temp))
# print('The temperature is ' + `temp`) # 2 only
print('The temperature is ' + repr(temp))
| [
"damiansp@gmail.com"
] | damiansp@gmail.com |
ba87068732a9123c6844d2c7a4f6cf2818a6a686 | 06ae8168b7067c8f77f06a48a22d158af1657651 | /teafound/migrations/0004_auto_20210411_1314.py | 76b6eb5f48409d932a3ab3028003f36582b7e115 | [] | no_license | Jiangjao/teaProject | 61e3cab41fab4b1aa8d2b1cfd6c6337c01196497 | 9f14d59d974bf82158a43d19c42b977b393857d2 | refs/heads/master | 2023-08-12T11:38:56.561815 | 2021-10-11T06:30:17 | 2021-10-11T06:30:17 | 347,795,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | # Generated by Django 3.0 on 2021-04-11 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teafound', '0003_auto_20210330_1509'),
]
operations = [
migrations.AddField(
model_name='chemistry',
name='entryname',
field=models.FileField(blank=True, null=True, upload_to='images/', verbose_name='pictureUsedNext'),
),
migrations.AlterField(
model_name='codeimages',
name='cid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='resource',
name='name',
field=models.CharField(max_length=200),
),
]
| [
"918718278@qq.com"
] | 918718278@qq.com |
698a82d67584b20bbeec34810819d8c8af499121 | 3e405caed737ae72c504bbbaa542d03fd886d8cc | /aplot/colors.py | dffd075fe20290f47e0fdc4b85da6576c207fdab | [] | no_license | alvarolopez/aplot | 89611309698ccd78edba4a31b9e0a29389c67571 | e881ab70f74e0ab3999e177ea1f350da614f88f6 | refs/heads/master | 2016-08-12T05:51:06.255050 | 2015-12-17T09:45:21 | 2016-01-17T11:42:46 | 48,102,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | from matplotlib import cm
from matplotlib import pyplot
import numpy
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
gray = "#777777"
base = tableau20[0]
colormap_name = 'YlOrRd'
colormap = cm.YlOrRd
colormap_array = cm.YlOrRd
pyplot.rcParams['image.cmap'] = 'YlOrRd'
| [
"aloga@ifca.unican.es"
] | aloga@ifca.unican.es |
638aca7dfce5519e601786e1b869cfa33cd48f09 | 650d8fb0348989e919359bbbc7f4b40888f38b87 | /case_management/cpbns/doctype/participant_identified/test_participant_identified.py | c02d04e916af98a2083f73e05ec349bbdadac616 | [
"MIT"
] | permissive | worldkingpradeep/case_management | 4f0daa467f3a1c83be0cc431307298e31282a5ee | d39af5f49614672fc30ac6019eb0772c28de3df3 | refs/heads/master | 2023-03-18T05:09:13.797475 | 2021-01-23T15:27:22 | 2021-01-23T15:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestParticipantIdentified(unittest.TestCase):
pass
| [
"you@example.com"
] | you@example.com |
4b13b7e42c2daec731d0176f7ae8c407716eeb47 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/servicenetworking/azure-mgmt-servicenetworking/azure/mgmt/servicenetworking/operations/_operations.py | aec0cfc6052ac274273da1b0d2196dae4a48717c | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 6,505 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ServiceNetworking/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicenetworking.ServiceNetworkingMgmtClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""List the operations for the provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.servicenetworking.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ServiceNetworking/operations"}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
087e778dd0a985adefd0d54e25b7977edd159a2a | 737728a38690e2e31c4b4c1a998fae923502cf54 | /Python/20058_마법사상어와파이어스톰.py | ea777152f74b546b0f3419128405a6e1d4dc89d3 | [] | no_license | chaeonee/baekjoon | 528c300f15f7f88a4c608a46e7b82aa6cf325a76 | 90da231f7134ab10a3649d4038da3ad6d631de45 | refs/heads/master | 2023-06-27T03:17:54.908553 | 2021-07-26T07:06:53 | 2021-07-26T07:06:53 | 220,909,690 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | import sys
from collections import deque
sys.setrecursionlimit(100000)
def checkIce(N, ice):
new_ice = [[0]*N for _ in range(N)]
c_dir = [[-1,0],[1,0],[0,-1],[0,1]]
for x in range(N):
for y in range(N):
check = 0
for dx, dy in c_dir:
dx += x
dy += y
if 0 <= dx < N and 0 <= dy < N and ice[dx][dy] > 0:
check += 1
new_ice[x][y] = ice[x][y]
if check < 3 and new_ice[x][y] > 0:
new_ice[x][y] -= 1
return new_ice
def rotate(N, m, ice):
tmp_ice = [[ice[i][j] for j in range(N)] for i in range(N)]
for sx in range(0,N,m):
for sy in range(0,N,m):
for i in range(m):
for j in range(m):
ice[sx+j][sy+(m-1-i)] = tmp_ice[sx+i][sy+j]
return checkIce(N,ice)
N, Q = list(map(int,sys.stdin.readline().split()))
N = 2**N
ice = [list(map(int,sys.stdin.readline().split())) for _ in range(N)]
magic = list(map(int,sys.stdin.readline().split()))
for m in magic:
m = 2**m
ice = rotate(N,m,ice)
ice_size, total_ice = 0, 0
c_dir = [[-1,0],[1,0],[0,-1],[0,1]]
for i in range(N):
for j in range(N):
if not ice[i][j]:
continue
s = 1
total_ice += ice[i][j]
ice[i][j] = 0
q = deque([[i,j]])
while q:
x, y = q.popleft()
for dx, dy in c_dir:
dx += x
dy += y
if 0 <= dx < N and 0 <= dy < N and ice[dx][dy]:
s += 1
total_ice += ice[dx][dy]
q.append([dx,dy])
ice[dx][dy] = 0
ice_size = max(s, ice_size)
print(total_ice)
print(ice_size)
| [
"noreply@github.com"
] | chaeonee.noreply@github.com |
d748c30874202ee7e8e9b3514ac9dd4995de7cdc | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/program/database/map/AddressMap.pyi | a114bde92b49b6dbdd7d5b50b9dabf7c1e7cf4bf | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,301 | pyi | from typing import List
import ghidra.program.database.map
import ghidra.program.model.address
import ghidra.program.model.lang
import ghidra.program.util
import java.lang
class AddressMap(object):
"""
Address map interface add methods need by the program database implementation to manage its address map.
NOTE: Objects implementing this interface are not intended for use outside of the
ghidra.program.database packages.
"""
INVALID_ADDRESS_KEY: long = -0x1L
def decodeAddress(self, value: long) -> ghidra.program.model.address.Address:
"""
Returns the address that was used to generate the given long key. (If the image base was
moved, then a different address is returned unless the value was encoded using the
"absoluteEncoding" method. If the program's default address space is segmented (i.e., SegmentedAddressSpace).
the address returned will be always be normalized to defined segmented memory blocks if possible.
@param value the long value to convert to an address.
"""
...
def deleteOverlaySpace(self, name: unicode) -> None:
"""
Delete the specified overlay space from this address map.
@param name overlay space name (must be unique among all space names within this map)
@throws IOException
"""
...
def equals(self, __a0: object) -> bool: ...
def findKeyRange(self, __a0: List[object], __a1: ghidra.program.model.address.Address) -> int: ...
def getAbsoluteEncoding(self, addr: ghidra.program.model.address.Address, create: bool) -> long:
"""
Get the database key associated with the given absolute address.
This key uniquely identifies an absolute location within the program.
If the requested key does not exist and create is false, INVALID_ADDRESS_KEY
will be returned. Note that nothing should ever be stored using the returned key unless
create is true.
@param addr the address for which to get a database key.
@param create true if a new key may be generated
@return the database key for the given address or INVALID_ADDRESS_KEY if
create is false and one does not exist for the specified addr.
"""
...
def getAddressFactory(self) -> ghidra.program.model.address.AddressFactory:
"""
Returns the address factory associated with this map.
Null may be returned if map not associated with a specific address factory.
"""
...
def getClass(self) -> java.lang.Class: ...
def getImageBase(self) -> ghidra.program.model.address.Address:
"""
Returns the current image base setting.
"""
...
def getKey(self, addr: ghidra.program.model.address.Address, create: bool) -> long:
"""
Get the database key associated with the given relative address.
This key uniquely identifies a relative location within the program.
If the program's image base is moved to another address, this key will map to a new
address that is the same distance to the new base as the old address was to the old base.
If the requested key does not exist and create is false, INVALID_ADDRESS_KEY
will be returned. Note that nothing should ever be stored using the returned key unless
create is true.
@param addr the address for which to get a database key.
@param create true if a new key may be generated
@return the database key for the given address or INVALID_ADDRESS_KEY if
create is false and one does not exist for the specified addr.
"""
...
@overload
def getKeyRanges(self, set: ghidra.program.model.address.AddressSetView, create: bool) -> List[ghidra.program.model.address.KeyRange]:
"""
Generates a properly ordered list of database key ranges for a
a specified address set. If absolute encodings are requested,
only memory addresses will be included.
@param set address set or null for all real address.
@param create true if a new keys may be generated, otherwise returned
key-ranges will be limited to those already defined.
@return "sorted" list of KeyRange objects
"""
...
@overload
def getKeyRanges(self, set: ghidra.program.model.address.AddressSetView, absolute: bool, create: bool) -> List[ghidra.program.model.address.KeyRange]:
"""
Generates a properly ordered list of database key ranges for a
a specified address set. If absolute encodings are requested,
only memory addresses will be included.
@param set address set or null for all real address.
@param absolute if true, absolute key encodings are returned, otherwise
standard/relocatable address key encodings are returned.
@param create true if a new keys may be generated, otherwise returned
key-ranges will be limited to those already defined.
@return "sorted" list of KeyRange objects
"""
...
@overload
def getKeyRanges(self, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, create: bool) -> List[ghidra.program.model.address.KeyRange]:
"""
Generates a properly ordered list of database key ranges for a
a specified address range. If absolute encodings are requested,
only memory addresses will be included. Returned key ranges are
generally intended for read-only operations since new keys will
never be generated. The returned key ranges will correspond
to those key ranges which have previously been created within
the specified address range and may represent a much smaller subset
of addresses within the specified range.
@param start minimum address of range
@param end maximum address of range
@param create true if a new keys may be generated, otherwise returned
key-ranges will be limited to those already defined.
@return "sorted" list of KeyRange objects
"""
...
@overload
def getKeyRanges(self, start: ghidra.program.model.address.Address, end: ghidra.program.model.address.Address, absolute: bool, create: bool) -> List[ghidra.program.model.address.KeyRange]:
"""
Generates a properly ordered list of database key ranges for a
a specified address range. If absolute encodings are requested,
only memory addresses will be included.
@param start minimum address of range
@param end maximum address of range
@param absolute if true, absolute key encodings are returned, otherwise
standard/relocatable address key encodings are returned.
@param create true if a new keys may be generated, otherwise returned
key-ranges will be limited to those already defined.
@return "sorted" list of KeyRange objects
"""
...
def getModCount(self) -> int:
"""
Returns a modification number that always increases when the address map base table has
changed.
"""
...
def getOldAddressMap(self) -> ghidra.program.database.map.AddressMap:
"""
Returns an address map capable of decoding old address encodings.
"""
...
def hasSameKeyBase(self, addrKey1: long, addrKey2: long) -> bool:
"""
Returns true if the two address keys share a common key base and can be
used within a single key-range.
@param addrKey1
@param addrKey2
"""
...
def hashCode(self) -> int: ...
def invalidateCache(self) -> None:
"""
Clears any cached values.
@throws IOException
"""
...
def isKeyRangeMax(self, addrKey: long) -> bool:
"""
Returns true if the specified addrKey is the maximum key within
its key-range.
@param addrKey
"""
...
def isKeyRangeMin(self, addrKey: long) -> bool:
"""
Returns true if the specified addrKey is the minimum key within
its key-range.
@param addrKey
"""
...
def isUpgraded(self) -> bool:
"""
Returns true if this address map has been upgraded.
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def renameOverlaySpace(self, oldName: unicode, newName: unicode) -> None:
"""
Rename an existing overlay space.
@param oldName old overlay name
@param newName new overlay name (must be unique among all space names within this map)
@throws IOException
"""
...
def setImageBase(self, base: ghidra.program.model.address.Address) -> None:
"""
Sets the image base, effectively changing the mapping between addresses and longs.
@param base the new base address.
"""
...
def setLanguage(self, newLanguage: ghidra.program.model.lang.Language, addrFactory: ghidra.program.model.address.AddressFactory, translator: ghidra.program.util.LanguageTranslator) -> None:
"""
Converts the current base addresses to addresses compatible with the new language.
@param newLanguage the new language to use.
@param addrFactory the new AddressFactory.
@param translator translates address spaces from the old language to the new language.
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def addressFactory(self) -> ghidra.program.model.address.AddressFactory: ...
@property
def imageBase(self) -> ghidra.program.model.address.Address: ...
@imageBase.setter
def imageBase(self, value: ghidra.program.model.address.Address) -> None: ...
@property
def modCount(self) -> int: ...
@property
def oldAddressMap(self) -> ghidra.program.database.map.AddressMap: ...
@property
def upgraded(self) -> bool: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
f0726feaad28d68f162f4eb3e242c62833307ecb | cdb186ad49bba1406c81f634b936e73f8cb04009 | /ABC/120/d2.py | 59601ef036cf61ce1bab54e68b795dbf3a972c45 | [] | no_license | ToshikiShimizu/AtCoder | 9e46f5581f2c1f5149ce1394d61d652cda6256a3 | 41fe6408c20c59bbf1b5d7ee9db2e132f48ad1ac | refs/heads/master | 2023-07-26T22:45:51.965088 | 2023-07-10T14:11:35 | 2023-07-10T14:11:35 | 148,154,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | class UnionFind:
def __init__(self, n):
self.nodes = n
self.parents = [i for i in range(n)]
self.sizes = [1] * n
self.rank = [0] * n
def find(self, i): # どの集合に属しているか(根ノードの番号)
if self.parents[i] == i:
return i
else:
self.parents[i] = self.find(self.parents[i]) # 経路圧縮
return self.parents[i]
def unite(self, i, j): # 二つの集合を併合
pi = self.find(i)
pj = self.find(j)
if pi != pj:
if self.rank[pi] < self.rank[pj]:
self.sizes[pj] += self.sizes[pi]
self.parents[pi] = pj
else:
self.sizes[pi] += self.sizes[pj]
self.parents[pj] = pi
if self.rank[pi] == self.rank[pj]:
self.rank[pi] += 1
def same(self, i, j): # 同じ集合に属するかを判定
return self.find(i)==self.find(j)
def get_parents(self): # 根ノードの一覧を取得
for n in range(self.nodes): # findで経路圧縮する
self.find(n)
return self.parents
def size(self, i):
p = self.find(i)
return self.sizes[p]
N, M = map(int, input().split())
AB = []
B = []
for m in range(M):
a, b = map(int, input().split())
AB.append((a-1,b-1))
ans = []
score = N * (N-1) // 2
uf = UnionFind(N)
for a, b in AB[::-1]:
ans.append(score)
if not uf.same(a,b):
score -= uf.size(a) * uf.size(b)
uf.unite(a,b)
for score in ans[::-1]:
print(score)
| [
"tamreff3290@gmail.com"
] | tamreff3290@gmail.com |
d26d7340ae7caa87d85733eb2be12086b2f83ad4 | 8aca00af527752d527303d0f5260e818a5f49d27 | /VAE/Vanilla_VAE/train.py | 0b4eae02aed7f9355a5ead20b6769a92f30142cb | [] | no_license | tobby2002/generative_models | 2b01b7110b9f7cc258742694eb912b4b087d8571 | aee760bf2d281512e2b52c273e12daa3314f77f9 | refs/heads/master | 2021-01-21T04:26:53.828509 | 2017-06-29T08:56:02 | 2017-06-29T08:56:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import argparse
import os
from solver import Solver
# from torch.backends import cudnn
from data_loader import mnist_data_loader
from configs import get_config
def main(config):
data_loader = mnist_data_loader(
image_path=config.image_path,
batch_size=config.batch_size,
train=True)
solver = Solver(config, data_loader=data_loader, is_train=True)
print(config)
print(f'\nTotal data size: {solver.total_data_size}\n')
solver.build_model()
solver.train()
if __name__ == '__main__':
# Get Configuration
parser = argparse.ArgumentParser()
# TODO: add arguments
kwargs = parser.parse_args()
# Namespace => dictionary
kwargs = vars(kwargs)
config = get_config(**kwargs)
for path in [config.image_path, config.image_log_path, config.save_dir]:
if not os.path.isdir(path):
os.mkdir(path)
main(config)
| [
"heythisischo@gmail.com"
] | heythisischo@gmail.com |
a7fa75c779672492052164329d3bed0f821ea051 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03141/s588799336.py | ebd887e107d7cc9e8aef470dc771fa270d4b87c5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | import sys
def solve():
input = sys.stdin.readline
N = int(input())
T, A = [None] * N, [None] * N
used = [False] * N
for i in range(N):
a, b = map(int, input().split())
T[i] = (a + b, a, i)
A[i] = (a + b, b, i)
T.sort(reverse = True)
A.sort(reverse = True)
hapiness = 0
ti, ai = 0, 0
for i in range(N):
if i % 2 == 0:
while used[T[ti][2]]: ti += 1
hapiness += T[ti][1]
used[T[ti][2]] = True
ti += 1
else:
while used[A[ai][2]]: ai += 1
hapiness -= A[ai][1]
used[A[ai][2]] = True
ai += 1
print(hapiness)
return 0
if __name__ == "__main__":
solve() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
041e74a93a32e8ecf62389e69580bde92a4029be | bd28f8a8dbcf7f2b4be3bcc0c0e656009191d379 | /predict_nn/ranlp/rsr/mi/lstm.py | 7b46f1ff48c11e224884dc9848aef46c174b4aa0 | [
"MIT"
] | permissive | nicolay-r/attitudes-extraction-ds | e2e5f9218408514ca1f3eff5edf88771e2f368ee | 49a82843e6adbca35321aaaa08d05532e953a0fc | refs/heads/master | 2022-08-30T04:51:14.133899 | 2020-05-28T11:06:01 | 2020-05-28T11:06:01 | 197,908,649 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | #!/usr/bin/python
import sys
sys.path.append('../../../../')
from predict_nn.ranlp.rsr.config import TEST_ON_EPOCHS, MI_CONTEXTS_PER_OPINION
from networks.ranlp.io_rsr import RaNLPConfTaskRuSentRelIO
from networks.mimlre.base import MIMLRE
from networks.ranlp.model_mimlre import RaNLPConfTaskMIMLREModel
from networks.mimlre.configuration.base import MIMLRESettings
from networks.context.architectures.rnn import RNN
from networks.context.configurations.rnn import RNNConfig, CellTypes
from predict_nn.ranlp.mi_names import ModelNames
import predict_nn.ranlp.utils as utils
def modify_settings(settings):
assert(isinstance(settings, MIMLRESettings))
assert(isinstance(settings.ContextSettings, RNNConfig))
settings.modify_contexts_per_opinion(MI_CONTEXTS_PER_OPINION)
settings.ContextSettings.set_cell_type(CellTypes.LSTM)
settings.ContextSettings.modify_hidden_size(128)
if __name__ == "__main__":
utils.run_cv_testing(model_name=ModelNames.MI_RNN,
create_network=lambda: MIMLRE(context_network=RNN()),
create_config=lambda: MIMLRESettings(context_settings=RNNConfig()),
create_io=RaNLPConfTaskRuSentRelIO,
create_model=RaNLPConfTaskMIMLREModel,
modify_settings_callback=modify_settings,
test_on_epochs=TEST_ON_EPOCHS,
cancel_training_by_cost=False)
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
fd9a87cff6770478db8de311c0a2d82fd99f9919 | 2fb1ab65258d724ea869f8684db444cff0464d7e | /gym_copter/rendering/twod.py | acda7172d4bdd17a17f2e325d07b814f584b8bf7 | [
"MIT"
] | permissive | coletta1/gym-copter | 3cf80dd710da8222b91b8714e6fc78540faa684a | 841a8ea2c5343e289bc06efd6ad49a75ba14cccd | refs/heads/master | 2023-02-13T19:45:01.352539 | 2021-01-07T17:06:58 | 2021-01-07T17:06:58 | 327,072,254 | 0 | 0 | MIT | 2021-01-07T14:07:19 | 2021-01-05T17:34:03 | null | UTF-8 | Python | false | false | 6,088 | py | '''
2D quadcopter rendering
Copyright (C) 2020 Simon D. Levy
MIT License
'''
from gym.envs.classic_control import rendering
import Box2D
from Box2D.b2 import fixtureDef, polygonShape
class TwoDRenderer:
VIEWPORT_W, VIEWPORT_H = 600, 400
SCALE = 30.0
GROUND_Z = 3.33
GEAR_HEIGHT = 0.85
LANDER_POLY = [(-14, +17),
(-17, 0),
(-17, -10),
(+17, -10),
(+17, 0),
(+14, +17)]
HULL_POLY = [(-30, 0),
(-4, +4),
(+4, +4),
(+30, 0),
(+4, -14),
(-4, -14)]
LEG_X, LEG_Y, LEG_W, LEG_H = 12, -7, 3, 20
MOTOR_X, MOTOR_Y, MOTOR_W, MOTOR_H = 25, 7, 4, 5
BLADE_X, BLADE_Y, BLADE_W, BLADE_H = 25, 8, 20, 2
SKY_COLOR = 0.5, 0.8, 1.0
GROUND_COLOR = 0.5, 0.7, 0.3
VEHICLE_COLOR = 1.0, 1.0, 1.0
MOTOR_COLOR = 0.5, 0.5, 0.5
PROP_COLOR = 0.0, 0.0, 0.0
OUTLINE_COLOR = 0.0, 0.0, 0.0
def __init__(self):
self.viewer = rendering.Viewer(self.VIEWPORT_W, self.VIEWPORT_H)
self.viewer.set_bounds(0,
self.VIEWPORT_W/self.SCALE,
0,
self.VIEWPORT_H/self.SCALE)
self.world = Box2D.b2World()
self.lander = self.world.CreateDynamicBody(
fixtures=[
fixtureDef(shape=polygonShape(vertices=[(x/self.SCALE,
y/self.SCALE)
for x, y in poly]),
density=0.0)
for poly in [self.HULL_POLY,
self._leg_poly(-1),
self._leg_poly(+1),
self._motor_poly(+1),
self._motor_poly(-1),
self._blade_poly(+1, -1),
self._blade_poly(+1, +1),
self._blade_poly(-1, -1),
self._blade_poly(-1, +1)]
]
)
# By showing props periodically, we can emulate prop rotation
self.props_visible = 0
def close(self):
self.viewer.close()
self.world.DestroyBody(self.lander)
self.lander = None
def render(self, pose, spinning):
# Draw ground as background
self.viewer.draw_polygon(
[(0, 0),
(self.VIEWPORT_W, 0),
(self.VIEWPORT_W, self.VIEWPORT_H),
(0, self.VIEWPORT_H)],
color=self.GROUND_COLOR)
# Draw sky
self.viewer.draw_polygon(
[(0, self.GROUND_Z),
(self.VIEWPORT_W, self.GROUND_Z),
(self.VIEWPORT_W, self.VIEWPORT_H),
(0, self.VIEWPORT_H)],
color=self.SKY_COLOR)
# Set copter pose to values from Lander2D.step(), negating for
# coordinate conversion
self.lander.position = (pose[0] + self.VIEWPORT_W/self.SCALE/2,
-pose[1] + self.GROUND_Z + self.GEAR_HEIGHT)
self.lander.angle = -pose[2]
# Draw copter
self._show_fixture(1, self.VEHICLE_COLOR)
self._show_fixture(2, self.VEHICLE_COLOR)
self._show_fixture(0, self.VEHICLE_COLOR)
self._show_fixture(3, self.MOTOR_COLOR)
self._show_fixture(4, self.MOTOR_COLOR)
# Simulate spinning props by alternating show/hide
if not spinning or self.props_visible:
for k in range(5, 9):
self._show_fixture(k, self.PROP_COLOR)
self.props_visible = (not spinning or ((self.props_visible + 1) % 3))
def complete(self, mode):
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def _show_fixture(self, index, color):
fixture = self.lander.fixtures[index]
trans = fixture.body.transform
path = [trans*v for v in fixture.shape.vertices]
self.viewer.draw_polygon(path, color=color)
path.append(path[0])
self.viewer.draw_polyline(path, color=self.OUTLINE_COLOR, linewidth=1)
def _blade_poly(self, x, w):
return [
(x*self.BLADE_X, self.BLADE_Y),
(x*self.BLADE_X+w*self.BLADE_W/2, self.BLADE_Y+self.BLADE_H),
(x*self.BLADE_X+w*self.BLADE_W, self.BLADE_Y),
(x*self.BLADE_X+w*self.BLADE_W/2, self.BLADE_Y-self.BLADE_H),
]
def _motor_poly(self, x):
return [
(x*self.MOTOR_X, self.MOTOR_Y),
(x*self.MOTOR_X+self.MOTOR_W, self.MOTOR_Y),
(x*self.MOTOR_X+self.MOTOR_W, self.MOTOR_Y-self.MOTOR_H),
(x*self.MOTOR_X, self.MOTOR_Y-self.MOTOR_H)
]
def _leg_poly(self, x):
return [
(x*self.LEG_X, self.LEG_Y),
(x*self.LEG_X+self.LEG_W, self.LEG_Y),
(x*self.LEG_X+self.LEG_W, self.LEG_Y-self.LEG_H),
(x*self.LEG_X, self.LEG_Y-self.LEG_H)
]
class TwoDLanderRenderer(TwoDRenderer):
FLAG_COLOR = 0.8, 0.0, 0.0
def __init__(self, landing_radius):
TwoDRenderer.__init__(self)
self.landing_radius = landing_radius
def render(self, mode, pose, spinning):
TwoDRenderer.render(self, pose, spinning)
# Draw flags
for d in [-1, +1]:
flagy1 = self.GROUND_Z
flagy2 = flagy1 + 50/self.SCALE
x = d*self.landing_radius + self.VIEWPORT_W/self.SCALE/2
self.viewer.draw_polyline([(x, flagy1), (x, flagy2)],
color=(1, 1, 1))
self.viewer.draw_polygon([(x, flagy2),
(x, flagy2-10/self.SCALE),
(x + 25/self.SCALE,
flagy2-5/self.SCALE)],
color=self.FLAG_COLOR)
return TwoDRenderer.complete(self, mode)
| [
"simon.d.levy@gmail.com"
] | simon.d.levy@gmail.com |
8892a90c709b1fad91ec73e656344c01c93f3649 | 2ac169bf1294df6069db95e5362df2376844faec | /urllib3/backends/trio_backend.py | 0c2b016fe82561fd0a99cff7d4de0705657c6d91 | [
"MIT"
] | permissive | merrellb/urllib3 | a9000632f54a68cb1f4bd9ee9618e39e89f538b1 | dd9d52eb1e69227bb02d6a0fcbc2771e4c4e54fd | refs/heads/bleach-spike | 2021-01-16T19:37:28.619347 | 2017-08-04T21:51:38 | 2017-08-04T21:51:38 | 100,185,249 | 0 | 0 | null | 2017-08-13T15:15:17 | 2017-08-13T15:15:16 | null | UTF-8 | Python | false | false | 2,740 | py | import trio
from . import LoopAbort
from ._util import is_readable
class TrioBackend:
async def connect(
self, host, port, source_address=None, socket_options=None):
if source_address is not None:
# You can't really combine source_address= and happy eyeballs
# (can we get rid of source_address? or at least make it a source
# ip, no port?)
raise NotImplementedError(
"trio backend doesn't support setting source_address")
stream = await trio.open_tcp_stream(host, port)
for (level, optname, value) in socket_options:
stream.setsockopt(level, optname, value)
return TrioSocket(stream)
# XX it turns out that we don't need SSLStream to be robustified against
# cancellation, but we probably should do something to detect when the stream
# has been broken by cancellation (e.g. a timeout) and make is_readable return
# True so the connection won't be reused.
class TrioSocket:
def __init__(self, stream):
self._stream = stream
async def start_tls(self, server_hostname, ssl_context):
wrapped = trio.ssl.SSLStream(
self._stream, ssl_context,
server_hostname=server_hostname,
https_compatible=True)
return TrioSocket(wrapped)
def getpeercert(self, binary=False):
return self._stream.getpeercert(binary=binary)
async def receive_some(self):
return await self._stream.receive_some(BUFSIZE)
async def send_and_receive_for_a_while(produce_bytes, consume_bytes):
async def sender():
while True:
outgoing = await produce_bytes()
if outgoing is None:
break
await self._stream.send_all(outgoing)
async def receiver():
while True:
incoming = await stream.receive_some(BUFSIZE)
consume_bytes(incoming)
try:
async with trio.open_nursery() as nursery:
nursery.spawn(sender)
nursery.spawn(receiver)
except LoopAbort:
pass
def forceful_close(self):
self._stream.forceful_close()
def is_readable(self):
# This is a bit of a hack, but I can't think of a better API that trio
# *could* provide, since what we want to check here is such an odd
# thing.
sock_stream = self._stream
# Strip off SSLStream wrappings
while hasattr(sock_stream, "transport_stream"):
sock_stream = sock_stream.transport_stream
sock = sock_stream.socket
return is_readable(sock)
def set_readable_watch_state(self, enabled):
pass
| [
"njs@pobox.com"
] | njs@pobox.com |
bae510a110217c38f7a9d01c90829e9b968dd4c0 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/W_w_Mgt_to_C_focus_div/ch032/Tcrop_s255_p20_j15/pyr_2s/L5/step09_2side_L5.py | 7bcb0377bf4eb4ba36eab7dde3ed6cbf06544d8d | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,714 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_W_w_M_to_Cx_Cy_combine import W_w_M_to_Cx_Cy
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_W_w_M_to_Cx_Cy
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = W_w_M_to_Cx_Cy( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_what_train_step = Train_step_W_w_M_to_Cx_Cy( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15) )
use_hid_ch = 32
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1__2side_0 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
pyramid_1side_1__2side_1 = [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
pyramid_1side_2__2side_0 = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1]
pyramid_1side_2__2side_1 = [2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2]
pyramid_1side_2__2side_2 = [2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2]
pyramid_1side_3__2side_0 = [1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1]
pyramid_1side_3__2side_1 = [2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2]
pyramid_1side_3__2side_2 = [2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2]
pyramid_1side_3__2side_3 = [2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2]
pyramid_1side_4__2side_0 = [1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1]
pyramid_1side_4__2side_1 = [2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2]
pyramid_1side_4__2side_2 = [2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2]
pyramid_1side_4__2side_3 = [2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2]
pyramid_1side_4__2side_4 = [2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2]
pyramid_1side_5__2side_0 = [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1]
pyramid_1side_5__2side_1 = [2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2]
pyramid_1side_5__2side_2 = [2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2]
pyramid_1side_5__2side_3 = [2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2]
pyramid_1side_5__2side_4 = [2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2]
pyramid_1side_5__2side_5 = [2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_0 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
pyramid_1side_6__2side_1 = [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_6__2side_2 = [2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_6__2side_3 = [2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_6__2side_4 = [2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_6__2side_5 = [2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
#########################################################################################
ch032_pyramid_1side_1__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=5, out_ch=1, d_amount=2, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1), dtype=np.float32)
use_model = ch032_pyramid_1side_4__2side_2
use_model = use_model.build()
result = use_model.generator(data)
print(result[0].shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
e75e43698a4d7ee8fab67e0ead070703180f9d66 | d83cd7bfec9c71ef3186546fd7423082415fca39 | /src/old/forums->JSON.py | 7b786d4da19838744f6d985dd71366cc646cf927 | [] | no_license | mac389/phikal | e139d7e20e6cda0fcedca9eb4a5a5ff397f5f49a | b5162a3bab7320ed3d67bb6b7c9c3a3f03c2ba5a | refs/heads/master | 2021-01-15T15:36:37.568339 | 2016-09-04T18:18:23 | 2016-09-04T18:18:23 | 56,150,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import json, os
db = {}
WRITE = 'wb'
for filename in os.listdir(os.path.join(os.getcwd(),'forums')):
db[filename] = {}
db[filename]['text'] = open(os.path.join(os.getcwd(),'forums',filename)).read()
json.dump(db,open('db.json',WRITE)) | [
"mac389@gmail.com"
] | mac389@gmail.com |
eaa24b780fa830d12b2c79f659cfa00efdfda6ca | c2be187155aabf59a4c0d3f5065bc26239c0b827 | /master_category/converters/template_to_googlemanufacturer.py | b2cf214fc975e92734e471245aef63c2ddb66c20 | [] | no_license | dankCodeNugs/tmtext | 1d6c54f79399bfa5e6f3905c0f72ba0be59d8d0d | 8e2d834775f440def7f57294674b8109b46ee191 | refs/heads/master | 2023-03-16T08:49:38.456929 | 2016-12-20T19:45:54 | 2016-12-20T19:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,363 | py | import os
from lxml import html
import csv
import xlrd
from helper import check_extension, logging_info, write_to_file, convert_xls_file
#-------- define column names ------------
UPC = 'UPC'
MPN = 'MPN'
BRAND = 'Brand Name'
TITLE = 'Item Name'
GTIN = 'GTIN'
DESC = 'Description'
L_DESC = 'Long Description'
#-----------------------------------------
def convert_upc_to_gtin(upc):
s_upc = u''
if type(upc) == str or type(upc) == unicode:
s_upc = upc
elif type(upc) == float:
s_upc = u'%.f' % upc
gtin_code = u''
if len(s_upc) <= 14 or len(s_upc) >= 12:
gtin_code = s_upc
if len(s_upc) == 8:
gtin_code = u'0000' + s_upc
return gtin_code
def generate_bullets(desc):
if desc == '':
return []
tree_description = html.fromstring(desc)
#--------- Description / CSV
tree_bullets = \
tree_description.xpath("//*[contains(@id,'feature-bullets')]//ul/"
"li[not(contains(@class,'hidden'))]")
try:
bullet_points = [b.text_content().strip() for b in tree_bullets if b.text_content().strip() != '']
except Exception as e:
bullet_points = []
logging_info('Bullet parse error')
if len(tree_bullets) > 0:
return bullet_points
#--------- Long Description / Amazon
tree_bullets = \
tree_description.xpath("//ul/li")
try:
bullet_points = [b.text_content().strip() for b in tree_bullets if b.text_content().strip() != '']
except Exception as e:
bullet_points = []
logging_info('Bullet parse error')
if len(tree_bullets) > 0:
return bullet_points
#--------- Long Description / Walmart
tree_bullets = \
tree_description.xpath("//p")
try:
bullet_points = [b.text_content().strip() for b in tree_bullets if b.text_content().strip() != '']
except Exception as e:
bullet_points = []
logging_info('Bullet parse error')
if len(tree_bullets) > 0:
return bullet_points
return [desc]
def parse_xls_value(val):
if type(val) == float:
return '%.f' % val
return val
def generate_google_manufacturer_xml(template_env, input_file):
available_extensions = ['.csv', '.xls']
items = []
context = {}
if not check_extension(input_file, available_extensions):
logging_info('The file extension should be %s.'
% (','.join(available_extensions)), 'ERROR')
return
try:
name, file_extension = os.path.splitext(input_file)
ci = {}
# The ci will take column index like this
# {
# 'MPN': -1, # The column index of the MPN field
# 'Brand Name': -1,
# 'Item Name': -1,
# 'GTIN': -1,
# 'Description': -1,
# 'Long Description': -1
# }
if file_extension == '.csv':
with open(input_file, 'rU') as csvfile:
reader = csv.reader(csvfile)
for idx, item in enumerate(reader):
if idx == 0:
for i, c in enumerate(item):
ci[c] = i
else:
data = {
'id': item[ci[MPN]] if ci[MPN] > -1 else '',
'brand': item[ci[BRAND]] if ci[BRAND] > -1 else '',
'title': item[ci[TITLE]] if ci[TITLE] > -1 else '',
'gtin': item[ci[GTIN]] if ci[GTIN] > -1 else '',
'mpn': item[ci[MPN]] if ci[MPN] > -1 else '',
'description': item[ci[DESC]] if ci[DESC] > -1 else '',
'bullet_points': item[ci[L_DESC]] if ci[L_DESC] > -1 else '',
}
data['bullet_points'] = generate_bullets(data['bullet_points'])
if data['gtin'] == '':
if ci[UPC] > -1 and item[ci[UPC]] != '':
data['gtin'] = convert_upc_to_gtin(item[ci[UPC]])
items.append(data)
else: # .xls file
logging_info('START CONVERSION')
# xlrd could not read xls file that generated by PHPExcel
# so we make file conversion
input_file_c = convert_xls_file(input_file)
logging_info('END CONVERSION')
if input_file_c == '':
raise Exception('Could not convert xml file')
wb = xlrd.open_workbook(filename=input_file_c)
s_names = wb.sheet_names()
for sn in s_names:
item_sheet = wb.sheet_by_name(sn)
for idx, row in enumerate(item_sheet.get_rows()):
if idx == 0:
for i, c in enumerate(row):
ci[c.value] = i
else:
data = {
'id': parse_xls_value(row[ci[MPN]].value) if ci[MPN] > -1 else '',
'brand': row[ci[BRAND]].value if ci[BRAND] > -1 else '',
'title': row[ci[TITLE]].value if ci[TITLE] > -1 else '',
'gtin': parse_xls_value(row[ci[GTIN]].value) if ci[GTIN] > -1 else '',
'mpn': parse_xls_value(row[ci[MPN]].value) if ci[MPN] > -1 else '',
'description': row[ci[DESC]].value if ci[DESC] > -1 else '',
'bullet_points': row[ci[L_DESC]].value if ci[L_DESC] > -1 else '',
}
data['bullet_points'] = generate_bullets(data['bullet_points'])
if data['gtin'] == '':
if ci[UPC] > -1 and row[ci[UPC]] != '':
data['gtin'] = convert_upc_to_gtin(row[ci[UPC]].value)
items.append(data)
except Exception as e:
logging_info(str(e), 'ERROR')
return
context['items'] = items
template = template_env.get_template('GoogleManufacturer.html')
output_content = template.render(context).encode('utf-8')
filename = write_to_file(output_content)
logging_info(filename, 'RESULT_FILE')
logging_info('google-manufacturer.xml', 'FILE_NAME') | [
"life.long.learner127@outlook.com"
] | life.long.learner127@outlook.com |
80a4b87e6030c74944a0822f7667bbfb7fd38fa7 | d5a5ee613027429ed654dbae11d3a18af6c379f8 | /timber_modisette/pythonDjango/dojosecrets/apps/dojo_secrets/models.py | 5ffb2d167ea093833f5a5a773c29443a4868d1b3 | [] | no_license | husainun/python_april_2017 | 1c3d04792bfe50f57f0f3238c06dca49c605f4b1 | 633f3451ae99eaa94a97fdf2647de38db03e49be | refs/heads/master | 2020-05-15T09:22:06.794597 | 2017-06-13T15:35:22 | 2017-06-13T15:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | from __future__ import unicode_literals
from django.db import models
import re
# Create your models here.
class UserManager(models.Manager):
def validateUser(self,post):
is_valid = True
errors =[]
if len(post.get('first_name')) == 0:
is_valid = False
errors.append("Must enter valid first name")
if len(post.get('last_name')) == 0:
is_valid = False
errors.append("Must enter valid last name")
if not re.search(r'\w+\@+\w+\.\w+', post.get('email')):
is_valid = False
errors.append("Please enter a valid email address")
if User.objects.filter(email=post.get('email')).first() != None:
is_valid = False
errors.append("email already exists")
if len(post.get('password')) < 6:
is_valid = False
errors.append("please enter a password of at least 6 characters")
if post.get('password') != post.get('cf_password'):
is_valid = False
errors.append("passwords do not match")
return (is_valid, errors)
class User(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(max_length=200)
password = models.CharField(max_length=100)
cf_password = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class Post(models.Model):
content = models.TextField(max_length=1000)
user = models.ForeignKey(User, related_name='posts')
likes = models.ManyToManyField(User, related_name='liked_posts')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"mister.modistette@gmail.com"
] | mister.modistette@gmail.com |
7b903bffae937420e3d65bd50a9afd654f013f43 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/abresler/super_simple_twitter_scraper_8.py | f3b967c560a5cccaf86c33bbefe8ff74a186e952 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,230 | py | ###################################################################################
# Super Simple Twitter Scraper
###################################################################################
import scraperwiki
import simplejson
import urllib2
import csv
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'korean and delicious'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Super Simple Twitter Scraper
###################################################################################
import scraperwiki
import simplejson
import urllib2
import csv
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'korean and delicious'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
af0251879607882ceeffe19420e0ab47b8a7b0c4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03804/s510368755.py | 7c3e304c260b97d80771fc571df678da7b42baf3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | n, m = map(int, input().split())
a = [input() for _ in range(n)]
b = [input() for _ in range(m)]
ans = 'No'
for x in range(0, n - m + 1):
for y in range(0, n - m + 1):
check = True
for i in range(m):
for j in range(m):
if a[i+x][j+y] != b[i][j]:
check = False
break
if check == False:
break
if check == True:
ans = 'Yes'
break
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e88c846218f4fd40e81aa568be49a6bde82586ad | 1626e16760c9c5b5dc9bd7c345871c716d5ffd99 | /Problems/0800_0899/0861_Score_After_Flipping_Matrix/Project_Python3/Score_After_Flipping_Matrix.py | 528676c4fee1db599a5bb4afb91bf8dba6f59098 | [] | no_license | NobuyukiInoue/LeetCode | 94ddb19e63cb8d0775cdc13f311fe90c87a1d718 | 3f0ffd519404165fd1a735441b212c801fd1ad1e | refs/heads/master | 2023-09-01T07:38:50.939942 | 2023-08-23T09:51:17 | 2023-08-23T09:51:17 | 158,100,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | # coding: utf-8
import os
import sys
import time
from typing import List, Dict, Tuple
class Solution:
def matrixScore(self, A: List[List[int]]) -> int:
# 40ms
r, c = len(A), len(A[0])
answer = r * (1 << (c - 1))
for j in range(1, c):
count = 0
for i in range(r):
if A[i][0] == 1:
count += A[i][j]
else:
count += A[i][j] ^ 1
answer += max(r - count, count) * (1 << (c - 1 - j))
return answer
def printGrid(title, grid):
print("{0} = [".format(title))
for i in range(len(grid)):
if i == 0:
print(" [", end = "")
else:
print(",[", end = "")
for j in range(len(grid[i])):
if j == 0:
print("{0:d}".format(grid[i][j]), end = "")
else:
print(",{0:d}".format(grid[i][j]), end = "")
print("]")
print("]")
def printResult(title, result):
print("{0} = [".format(title))
for i in range(len(result)):
print(result[i])
print("]")
def main():
argv = sys.argv
argc = len(argv)
if argc < 2:
print("Usage: python {0} <testdata.txt>".format(argv[0]))
exit(0)
if not os.path.exists(argv[1]):
print("{0} not found...".format(argv[1]))
exit(0)
testDataFile = open(argv[1], "r")
lines = testDataFile.readlines()
for temp in lines:
temp = temp.strip()
if temp == "":
continue
print("args = {0}".format(temp))
loop_main(temp)
# print("Hit Return to continue...")
# input()
def loop_main(temp):
flds = temp.replace(" ","").replace("\"","").replace("[[","").replace("]]","").rstrip()
A = [[int(col) for col in data.split(",")] for data in flds.split("],[")]
printGrid("A", A)
sl = Solution()
time0 = time.time()
result = sl.matrixScore(A)
time1 = time.time()
print("result = {0:d}".format(result))
print("Execute time ... : {0:f}[s]\n".format(time1 - time0))
if __name__ == "__main__":
main()
| [
"gx3n-inue@asahi-net.or.jp"
] | gx3n-inue@asahi-net.or.jp |
256dba10a1f9e4655c2d1f4b3024442dc6b1f1f1 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /sts_write_3/role-with-saml_assume.py | bd754cf1c2a82bb42705ba084e7a21c5716a333e | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sts/assume-role-with-saml.html
if __name__ == '__main__':
"""
"""
parameter_display_string = """
# role-arn : The Amazon Resource Name (ARN) of the role that the caller is assuming.
# principal-arn : The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the IdP.
# saml-assertion : The base-64 encoded SAML authentication response provided by the IdP.
For more information, see Configuring a Relying Party and Adding Claims in the IAM User Guide .
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("sts", "assume-role-with-saml", "role-arn", "principal-arn", "saml-assertion", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
e328506e24b558eac711bd04cb331a655324fecc | ad99cf70a4ab84e0eb3971718147869c639e8976 | /backend/menu/api/v1/viewsets.py | 31633f37a05075e119710cd97bb2f559e1407d28 | [] | no_license | crowdbotics-apps/ellensburg-delivery-22553 | 60d3caf6ed99543a0e8362546f33b0d1bb07c825 | 3f7a5d1d4af602445a2715ca06fc3e327b9297da | refs/heads/master | 2023-01-12T09:07:08.250685 | 2020-11-12T08:14:21 | 2020-11-12T08:14:21 | 312,209,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | from rest_framework import authentication
from menu.models import Category, Country, Item, ItemVariant, Review
from .serializers import (
CategorySerializer,
CountrySerializer,
ItemSerializer,
ItemVariantSerializer,
ReviewSerializer,
)
from rest_framework import viewsets
class ItemViewSet(viewsets.ModelViewSet):
serializer_class = ItemSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Item.objects.all()
class ReviewViewSet(viewsets.ModelViewSet):
serializer_class = ReviewSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Review.objects.all()
class CountryViewSet(viewsets.ModelViewSet):
serializer_class = CountrySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Country.objects.all()
class ItemVariantViewSet(viewsets.ModelViewSet):
serializer_class = ItemVariantSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ItemVariant.objects.all()
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4053ac7ddf4fecc6860ced92c9c946507d847eae | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/aio/operations/_blob_inventory_policies_operations.py | 19a2da9aeddfb198f43eed42c911a969c9b400bf | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 21,916 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._blob_inventory_policies_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobInventoryPoliciesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2019_06_01.aio.StorageManagementClient`'s
:attr:`blob_inventory_policies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Gets the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-06-01"))
cls: ClsType[_models.BlobInventoryPolicy] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
blob_inventory_policy_name=blob_inventory_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("BlobInventoryPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
properties: _models.BlobInventoryPolicy,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Sets the blob inventory policy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicyName
:param properties: The blob inventory policy set to a storage account. Required.
:type properties: ~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicy
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Sets the blob inventory policy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicyName
:param properties: The blob inventory policy set to a storage account. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
properties: Union[_models.BlobInventoryPolicy, IO],
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Sets the blob inventory policy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicyName
:param properties: The blob inventory policy set to a storage account. Is either a
BlobInventoryPolicy type or a IO type. Required.
:type properties: ~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicy or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.BlobInventoryPolicy] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IO, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "BlobInventoryPolicy")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
blob_inventory_policy_name=blob_inventory_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("BlobInventoryPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
**kwargs: Any
) -> None:
"""Deletes the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-06-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
blob_inventory_policy_name=blob_inventory_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}"
}
@distributed_trace
def list(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncIterable["_models.BlobInventoryPolicy"]:
"""Gets the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BlobInventoryPolicy or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2019_06_01.models.BlobInventoryPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-06-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-06-01"))
cls: ClsType[_models.ListBlobInventoryPolicy] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListBlobInventoryPolicy", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies"
}
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
1cd6b93f4df40c4cfb55a5fbad37af872ccaa04d | 032e08b21990b83b046cf2feab8bd02c6cb81563 | /src/cadence/analysis/AnalysisStage.py | 26800ce80149c30379c792436add9904e22077a9 | [] | no_license | satello/Cadence | 362f1be6f4565d5e7ffd4530e6cc6b4a955b5fac | c795ed7cfab512ad340ff88c8c0e67237ac2dfc5 | refs/heads/master | 2020-12-25T00:38:26.639588 | 2015-03-12T18:29:13 | 2015-03-12T18:29:13 | 30,662,959 | 0 | 0 | null | 2015-02-11T18:34:18 | 2015-02-11T18:34:18 | null | UTF-8 | Python | false | false | 14,198 | py | # AnalysisStage.py
# (C)2014
# Scott Ernst
from __future__ import print_function, absolute_import, unicode_literals, division
from PyPDF2.merger import PdfFileMerger
from PyPDF2.pdf import PdfFileReader
from pyaid.config.ConfigsDict import ConfigsDict
from pyaid.time.TimeUtils import TimeUtils
try:
import matplotlib.pyplot as plt
except Exception:
plt = None
#*************************************************************************************************** AnalysisStage
class AnalysisStage(object):
""" The base class for creating analysis stages, which are distinct pieces of analysis carried
out within the scope of an AnalyzerBase instance that owns the stage. """
#===================================================================================================
# C L A S S
#___________________________________________________________________________________________________ __init__
def __init__(self, key, owner, label =None, **kwargs):
"""Creates a new instance of AnalysisStage."""
# The AnalyzerBase object that owns this stage
self.owner = owner
self._key = key
self._cache = ConfigsDict()
self._label = label if label else self.__class__.__name__
self._startTime = None
self._analyzeCallback = kwargs.get('analyze')
self._preStageCallback = kwargs.get('pre')
self._postStageCallback = kwargs.get('post')
self._sitemapCallback = kwargs.get('sitemap')
self._seriesCallback = kwargs.get('series')
self._trackwayCallback = kwargs.get('trackway')
self._trackCallback = kwargs.get('track')
#===================================================================================================
# G E T / S E T
#___________________________________________________________________________________________________ GS: key
@property
def key(self):
""" The identification key for this analyzer stage, which is how the AnalyzerBase
references this stage. """
return self._key
#___________________________________________________________________________________________________ GS: index
@property
def index(self):
""" The integer index of this stage within its AnalyzerBase owner. """
try:
return self.owner.stages.index(self)
except Exception:
return -1
#___________________________________________________________________________________________________ GS: cache
@property
def cache(self):
""" A DataCache instance used to easily store dynamic key-based data during the analysis
process. """
return self._cache
#___________________________________________________________________________________________________ GS: logger
@property
def logger(self):
""" The Logger instance for writing all analysis process information. This logger
instance is owned by the AnalyzerBase and shared across stages. """
return self.owner.logger
#___________________________________________________________________________________________________ GS: plot
@property
def plot(self):
""" A convenience reference to Matplotlib's PyPlot module. Included here so Analyzers do
not have to handle failed Matplotlib loading internally. """
return plt
#===================================================================================================
# P U B L I C
#___________________________________________________________________________________________________ getPath
def getPath(self, *args, **kwargs):
""" Convenience method for creating paths relative to the output root path for this
Analyzer. """
return self.owner.getPath(*args, **kwargs)
#___________________________________________________________________________________________________ getTempFilePath
def getTempFilePath(self, name =None, extension =None, *args):
""" Used to create a temporary file path within this instance's temporary folder.
Any file on this path will be automatically removed at the end of the analysis
process.
[name] :: String :: None
The desired file name for the desired file within the temporary directory. If no
name is specified, a name will be created automatically using the current time
(microsecond) and a 16 digit random code for a very low probability of collisions.
[extension] :: String :: None
Specifies the extension to add to this file. The file name is not altered if no
extension is specified.
[*args] :: [String] :: []
A list of relative folder prefixes in which the file should reside. For example,
if you wish to have a file 'bar' in a folder 'foo' then you would specify 'foo' as
the single arg to get a file located at 'foo/bar' within the temporary file. No
directory prefixes will be created within this method. """
return self.owner.getTempFilePath(name=name, extension=extension, *args)
#___________________________________________________________________________________________________ getTempPath
def getTempPath(self, *args, **kwargs):
""" Creates a path relative to this instance's root temporary path. Uses the
FileUtils.createPath() format for args and kwargs. """
return self.owner.getTempPath(*args, **kwargs)
#___________________________________________________________________________________________________ analyze
def analyze(self):
""" Executes the analysis process for this stage, which consists largely of calling the
analysis hook methods in their specified order. """
self._startTime = TimeUtils.getNowDatetime()
self._writeHeader()
self._preAnalyze()
self._analyze()
self._postAnalyze()
self._writeFooter()
#___________________________________________________________________________________________________ mergePdfs
def mergePdfs(self, paths, fileName =None):
""" Takes a list of paths to existing PDF files and merges them into a single pdf with
the given file name.
[fileName] :: String :: None
The name of the file to be written. If not specified, a file name will be created
using the name of this class. """
merger = PdfFileMerger()
for p in paths:
merger.append(PdfFileReader(file(p, 'rb')))
if not fileName:
fileName = '%s-Report.pdf' % self.__class__.__name__
merger.write(file(self.getPath(fileName), 'wb'))
#===================================================================================================
# P R O T E C T E D
#___________________________________________________________________________________________________ _writeHeader
def _writeHeader(self):
""" Method for writing the logging header for this stage. This is the first method called
during the analysis process to denote in the log file that the following output was
created by this stage. """
self.logger.write('\n' + 80*'*')
self.logger.write('\n'.join([
'[STARTED]: %s ANALYSIS STAGE' % self._label.upper(),
'Run on %s' % TimeUtils.toZuluFormat(self._startTime).replace('T', ' at ')]
+ self._getHeaderArgs()))
#___________________________________________________________________________________________________ _getHeaderArgs
# noinspection PyMethodMayBeStatic
def _getHeaderArgs(self):
""" A hook method used to add information to the log header for this stage. """
return []
#___________________________________________________________________________________________________ _preAnalyze
def _preAnalyze(self):
""" A hook method called just before starting the analysis process. """
if self._preStageCallback:
self._preStageCallback(self)
#___________________________________________________________________________________________________ _analyze
def _analyze(self):
""" The core method in the analysis process. Unless overridden directly or by callback,
this method will iterate through the sitemaps in the database and call the
_analyzeSitemap() method on each one. """
if not self._analyzeCallback or self._analyzeCallback(self):
for sitemap in self.owner.getSitemaps():
self._analyzeSitemap(sitemap)
#___________________________________________________________________________________________________ _analyzeSitemap
def _analyzeSitemap(self, sitemap):
""" Iterates over each trackway within the specified sitemap and calls the
_analyzeTrackway() method on each one.
sitemap :: Tracks_SiteMap
The sitemap model instance to analyze. """
if self._sitemapCallback and not self._sitemapCallback(self, sitemap):
return
for tw in self.owner.getTrackways(sitemap):
self._analyzeTrackway(tw, sitemap)
#___________________________________________________________________________________________________ _analyzeTrackway
def _analyzeTrackway(self, trackway, sitemap):
""" Iterates over each track series in the trackway and calls the _analyzeTrackSeries()
method on each one.
trackway :: Tracks_Trackway
The trackway instance to analyze.
sitemap :: Tracks_SiteMap
The sitemap in which this trackway resides. """
if self._trackwayCallback and not self._trackwayCallback(self, trackway, sitemap):
return
for key, series in self.owner.getTrackwaySeries(trackway).items():
if series.isReady:
self._analyzeTrackSeries(series, trackway, sitemap)
#___________________________________________________________________________________________________ _analyzeTrackSeries
def _analyzeTrackSeries(self, series, trackway, sitemap):
""" Iterates over the tracks within the track series and calls the _analyzeTrack() method
on each one.
series :: TrackSeries
The TrackSeries instance to analyze.
trackway :: Tracks_Trackway
The trackway instance in which the track series resides.
sitemap :: Tracks_Sitemap
The sitemap in which the track series resides. """
if self._seriesCallback and not self._seriesCallback(self, series, trackway, sitemap):
return
for t in series.tracks:
self._analyzeTrack(t, series, trackway, sitemap)
#___________________________________________________________________________________________________ _analyzeTrack
def _analyzeTrack(self, track, series, trackway, sitemap):
""" Analyzes the specified track. By default nothing happens unless a trackCallback has
been specified. This method should be otherwise overridden to analyze each track
according to the analysis requirements.
track :: Tracks_Track
The track to analyze
series :: TrackSeries
The TrackSeries instance in which the track resides.
trackway :: Tracks_Trackway
The trackway instance in which the track resides.
sitemap :: Tracks_Sitemap
The sitemap in which the track resides. """
if self._trackCallback:
self._trackCallback(self, track, series, trackway, sitemap)
#___________________________________________________________________________________________________ _postAnalyze
def _postAnalyze(self):
""" A hook method called when the analysis process completes. """
if self._postStageCallback:
self._postStageCallback(self)
#___________________________________________________________________________________________________ _writeFooter
def _writeFooter(self):
""" The final method called in the analysis process, which writes the final information
about the analysis stage to the log file for reference. This includes basic operational
information about performance by default. """
elapsed = TimeUtils.getElapsedTime(
startDateTime=self._startTime,
endDateTime=TimeUtils.getNowDatetime(),
toUnit=TimeUtils.MILLISECONDS)
self.logger.write('\n' + 80*'*')
self.logger.write('\n'.join([
'[COMPLETE]: %s ANALYSIS STAGE' % self._label.upper(),
'Elapsed Time: %s' % TimeUtils.toPrettyElapsedTime(elapsed)] + self._getFooterArgs()))
#___________________________________________________________________________________________________ _getFooterArgs
# noinspection PyMethodMayBeStatic
def _getFooterArgs(self):
""" Specifies additional arguments to be written to the log file as part of the analysis
footer. This method returns an empty list by default. """
return []
#===================================================================================================
# I N T R I N S I C
#___________________________________________________________________________________________________ __repr__
def __repr__(self):
return self.__str__()
#___________________________________________________________________________________________________ __str__
def __str__(self):
return '<%s>' % self.__class__.__name__
| [
"swernst@gmail.com"
] | swernst@gmail.com |
437b658890538d63a1cdda99d98e43a01604f019 | fff4e1431391956292afe5514b03f81035a3f402 | /GAN/GAN.py | b6e5893f41e42b03e882097c4acf7ad3f796facb | [] | no_license | recomdDN/DL_practice | b1d0172bc91a735a32ae01379965247bc9b71a91 | 5addd8629533b7547a62f68c518138af48e174f2 | refs/heads/master | 2020-04-16T23:02:12.324842 | 2019-02-18T09:29:53 | 2019-02-18T09:29:53 | 165,995,401 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,430 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
from skimage.io import imsave
import os
import shutil
img_width = 28
img_height = 28
img_size = img_height * img_width
to_train = True
to_restore = False
output_path = 'output'
#总迭代次数500次
max_epoch = 500
h1_size = 150
h2_size = 300
z_size = 100
batch_size = 256
def build_generator(z_prior):
w1 = tf.Variable(tf.truncated_normal([z_size,h1_size],stddev=0.1),name='g_w1',dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h1_size]),name='g_b1',dtype=tf.float32)
h1 = tf.nn.relu(tf.matmul(z_prior,w1) + b1)
w2 = tf.Variable(tf.truncated_normal([h1_size,h2_size],stddev=0.1),name='g_w2',dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h2_size]),name='g_b2',dtype=tf.float32)
h2 = tf.nn.relu(tf.matmul(h1,w2)+b2)
w3 = tf.Variable(tf.truncated_normal([h2_size,img_size],stddev=0.1),name='g_w3',dtype=tf.float32)
b3 = tf.Variable(tf.zeros([img_size]),name='g_b3',dtype=tf.float32)
h3 = tf.matmul(h2,w3)+b3
x_generate = tf.nn.tanh(h3)
g_params = [w1,b1,w2,b2,w3,b3]
return x_generate,g_params
def build_discriminator(x_data,x_generated,keep_prob):
#将real img 和 generated img拼在一起
x_in = tf.concat([x_data,x_generated],0)
w1 = tf.Variable(tf.truncated_normal([img_size,h2_size],stddev=0.1),name='d_w1',dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h2_size]),name='d_b1',dtype=tf.float32)
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_in,w1)+b1),keep_prob)
w2 = tf.Variable(tf.truncated_normal([h2_size,h1_size],stddev=0.1),name='d_w2',dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h1_size]),name='d_b2',dtype=tf.float32)
h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1,w2)+b2),keep_prob)
w3 = tf.Variable(tf.truncated_normal([h1_size,1]),name='d_w3',dtype=tf.float32)
b3 = tf.Variable(tf.zeros([1]),name='d_b3',dtype=tf.float32)
h3 = tf.matmul(h2,w3)+b3
"""
1,函数原型 tf.slice(inputs,begin,size,name='')
2,用途:从inputs中抽取部分内容
inputs:可以是list,array,tensor
begin:n维列表,begin[i] 表示从inputs中第i维抽取数据时,相对0的起始偏移量,也就是从第i维的begin[i]开始抽取数据
size:n维列表,size[i]表示要抽取的第i维元素的数目
有几个关系式如下:
(1) i in [0,n]
(2)tf.shape(inputs)[0]=len(begin)=len(size)
(3)begin[i]>=0 抽取第i维元素的起始位置要大于等于0
(4)begin[i]+size[i]<=tf.shape(inputs)[i]
"""
"""
h3的size:[batch_size + batch_size,1]
所以 y_data 是对 real img的判别结果
y_generated 是对 generated img 的判别结果
"""
y_data = tf.nn.sigmoid(tf.slice(h3,[0,0],[batch_size,-1],name=None))
y_generated = tf.nn.sigmoid(tf.slice(h3,[batch_size,0],[-1,-1],name=None))
d_params = [w1,b1,w2,b2,w3,b3]
return y_data,y_generated,d_params
def show_result(batch_res,fname,grid_size=(0,0),grid_pad=5):
batch_res = 0.5 * batch_res.reshape((batch_res.shape[0], img_height, img_width)) + 0.5
img_h, img_w = batch_res.shape[1], batch_res.shape[2]
grid_h = img_h * grid_size[0] + grid_pad * (grid_size[0] - 1)
grid_w = img_w * grid_size[1] + grid_pad * (grid_size[1] - 1)
img_grid = np.zeros((grid_h, grid_w), dtype=np.uint8)
for i, res in enumerate(batch_res):
if i >= grid_size[0] * grid_size[1]:
break
img = (res) * 255
img = img.astype(np.uint8)
row = (i // grid_size[0]) * (img_h + grid_pad)
col = (i % grid_size[1]) * (img_w + grid_pad)
img_grid[row:row + img_h, col:col + img_w] = img
imsave(fname, img_grid)
def train():
# load data
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
x_data = tf.placeholder(tf.float32,[None,img_size],name='x_data')
z_prior = tf.placeholder(tf.float32,[None,z_size],name='z_prior')
keep_prob = tf.placeholder(tf.float32,name='keep_prob')
global_step = tf.Variable(0,name="global_step",trainable=False)
x_generated,g_params = build_generator(z_prior)
y_data,y_generated,d_params = build_discriminator(x_data,x_generated,keep_prob)
d_loss =-( tf.log(y_data) + tf.log(1-y_generated))
g_loss = -(tf.log(y_generated))
optimizer = tf.train.AdamOptimizer(0.0001)
d_trainer= optimizer.minimize(d_loss,var_list=d_params)
g_trainer = optimizer.minimize(g_loss,var_list=g_params)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
if to_restore:
chkpt_fname = tf.train.latest_checkpoint(output_path)
saver.restore(sess, chkpt_fname)
else:
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
z_sample_val = np.random.normal(0,1,size=(batch_size,z_size)).astype(np.float32)
steps = 60000 / batch_size
for i in range(sess.run(global_step),max_epoch):
for j in np.arange(steps):
print("epoch:%s, iter:%s" % (i, j))
# 每一步迭代,我们都会加载256个训练样本,然后执行一次train_step
x_value, _ = mnist.train.next_batch(batch_size)
x_value = 2 * x_value.astype(np.float32) - 1
z_value = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
# 执行生成
sess.run(d_trainer,
feed_dict={x_data: x_value, z_prior: z_value, keep_prob: 0.7})
# 执行判别
if j % 1 == 0:
sess.run(g_trainer,
feed_dict={x_data: x_value, z_prior: z_value, keep_prob: 0.7})
x_gen_val = sess.run(x_generated, feed_dict={z_prior: z_sample_val})
show_result(x_gen_val, "output/sample{0}.jpg".format(i))
z_random_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
x_gen_val = sess.run(x_generated, feed_dict={z_prior: z_random_sample_val})
show_result(x_gen_val, "output/random_sample{0}.jpg".format(i))
sess.run(tf.assign(global_step, i + 1))
saver.save(sess, os.path.join(output_path, "model"), global_step=global_step)
train() | [
"593476874@qq.com"
] | 593476874@qq.com |
821a86f82974e6e107d894164c3d8173485f4d89 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/i2h.py | 3e5a207560c3554044fa5574641787eded6de80e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'i2H':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
d3cc50f4cabcb9c5c39980932845050fda43a332 | 4a1336d6d3de077aab654bd315ddeb4cf261256f | /spider/headerchange/headerchange/spiders/headervalidation.py | 8ced065d76f34ea2e30825c27194917c00b92c1b | [
"Apache-2.0"
] | permissive | barneyElDinosaurio/base_function | e84be6da0fc2681421cefdbb6792dfda74031fce | d4ecbd4e8dfe43ed9c02ac430ce4598bfca0fc25 | refs/heads/master | 2020-04-04T20:35:48.477181 | 2018-11-05T10:34:34 | 2018-11-05T10:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # -*- coding: utf-8 -*-
import scrapy
from headerchange.user_agents import agents
import random
import json
class HeadervalidationSpider(scrapy.Spider):
name = 'headervalidation'
def start_requests(self):
url='http://httpbin.org/ip'
for i in range(5):
yield scrapy.Request(url=url,dont_filter=True)
def parse(self, response):
print('*'*20)
print(response.text)
# print(json.loads(response.body_as_unicode()).get('headers').get('User-Agent'))
print('*'*20)
| [
"jinweizsu@gmail.com"
] | jinweizsu@gmail.com |
43f1604b8b228c0f2c7e723d31a650b9d2e13cc0 | e20978855798f958a460b318c247d80515edbf8e | /Node.py | c1eb13f840cd772767fb1d467922f602bdb64d46 | [] | no_license | hanifsarwary/ThesisALgo | 846df062524fc62718e6bf45aafa2ce895d98dfd | 01184740c139099055e290bb9a14454b389f8bd9 | refs/heads/master | 2020-07-12T07:54:09.153225 | 2019-09-09T13:05:16 | 2019-09-09T13:05:16 | 204,759,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | class Node:
def __init__(self, name, parent=None, children=None, is_mendatory=None, is_alternative=None, is_or=None):
self.name = name
self.parent = parent
self.children = children
self.is_mendatory = is_mendatory
self.is_alternative = is_alternative
self.is_or = is_or
def __str__(self):
stri = self.name +" child of " + self.parent.name
if self.is_mendatory:
stri+= " it is mandetory"
elif self.is_alternative:
stri += " it is alternative"
elif self.is_or:
stri += " it is or"
return stri | [
"mianhanif13@gmail.com"
] | mianhanif13@gmail.com |
185854112af879ccfa14120f7425b4b1445cb903 | 91c204ce2b5c8793effdcb5a8e958536e4fa253f | /main/migrations/0020_cult_date_created.py | c41a1a0d1d962a8225847ae9fd4a990af925f662 | [
"MIT"
] | permissive | send2cloud/opencult.com | eb20bdecbaee741e43831cbeb430fec73d45947f | 22888ab7f66d577ec8940c8b1f45a1412509389b | refs/heads/master | 2021-04-26T23:21:58.349125 | 2018-03-04T19:53:08 | 2018-03-04T19:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.0.1 on 2018-02-10 15:37
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0019_auto_20180210_1402'),
]
operations = [
migrations.AddField(
model_name='cult',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"theodorekeloglou@gmail.com"
] | theodorekeloglou@gmail.com |
f7d6097ad8e36722971a2dee7fe0e8bb7ba02e76 | ed2ea3c4bb8809b8aad0977f193f265478696a25 | /ldap_sync/concepts/__init__.py | f0428145b5ba089ae875216a6ae92016a3e406fa | [
"Apache-2.0"
] | permissive | agdsn/pycroft | 4d40f0f2b61ec48fb1ed9df7f5227e59e5e2b946 | 9f3abb5dc1a7dd54c577af37d5004dd2204739cd | refs/heads/develop | 2023-08-31T10:52:15.658921 | 2023-08-27T12:53:37 | 2023-08-27T12:53:37 | 56,711,411 | 21 | 15 | Apache-2.0 | 2023-09-05T00:51:57 | 2016-04-20T18:36:36 | Python | UTF-8 | Python | false | false | 257 | py | # Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
"""
ldap_sync.concepts
~~~~~~~~~~~~~~~~~~
"""
| [
"lukas.juhrich@agdsn.de"
] | lukas.juhrich@agdsn.de |
be6728759401ccad5796401b07e2d4c3d04d2771 | b2f4d7275b407b56309d8e5ede3b1c75a18c7871 | /MxOnline/apps/courses/migrations/0006_bannercorse.py | 11d3e518cb1f0fde38dc95280b2de831c0269532 | [] | no_license | renlei-great/MxOnline | 9157459cea21bb20379dbe5d285f4aecac026090 | 01454796473cf2fbb10be315e8340bda715159b2 | refs/heads/master | 2021-01-09T14:47:54.530942 | 2020-05-09T16:37:33 | 2020-05-09T16:37:33 | 242,342,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 2.2 on 2020-03-20 21:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_course_is_banner'),
]
operations = [
migrations.CreateModel(
name='BannerCorse',
fields=[
],
options={
'verbose_name': '课程轮播',
'verbose_name_plural': '课程轮播',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('courses.course',),
),
]
| [
"1415977534@qq.com"
] | 1415977534@qq.com |
e9dd6b17803be564c025bd5076ef9dfa8fbff2ec | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/79/usersdata/157/43823/submittedfiles/serie1.py | 876643f429b71443f76ea7727c5a88460859f3ce | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | n=int(input('Informe um valor:'))
s=0
for i in range (1,n+1,1):
if (i%2==1):
s=s+(i**2))
else:
s=s-(i/(i**2))
print('%.5f'%s) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
21e6f7a73ae5b4b0a60276196bd64a2f0b5af5f4 | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/basics/converting_to_int_via_float.py | e4a380c6955ace2a5bc7d9e1ebeb9c41f61efb5d | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 385 | py | a = "2.1"
b = float(a)
c = int(b)
print(c) # 2
print( type(a) ) # <class 'str'>
print( type(b) ) # <class 'float'>
print( type(c) ) # <class 'int'>
d = int( float(a) )
print(d) # 2
print( type(d) ) # <class 'int'>
print( int( float(2.1) )) # 2
print( int( float("2") )) # 2
print( int( float(2) )) # 2
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
11905ea741213e9e24040446860c1f72c54dec7a | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/jedi/inference/flow_analysis.py | 89bfe578dfcb4dd2770c6218ed32d374639910fb | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 4,583 | py | from typing import Dict, Optional
from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope
from jedi.inference.recursion import execution_allowed
from jedi.inference.helpers import is_big_annoying_library
class Status:
lookup_table: Dict[Optional[bool], 'Status'] = {}
def __init__(self, value: Optional[bool], name: str) -> None:
self._value = value
self._name = name
Status.lookup_table[value] = self
def invert(self):
if self is REACHABLE:
return UNREACHABLE
elif self is UNREACHABLE:
return REACHABLE
else:
return UNSURE
def __and__(self, other):
if UNSURE in (self, other):
return UNSURE
else:
return REACHABLE if self._value and other._value else UNREACHABLE
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name)
REACHABLE = Status(True, 'reachable')
UNREACHABLE = Status(False, 'unreachable')
UNSURE = Status(None, 'unsure')
def _get_flow_scopes(node):
while True:
node = get_parent_scope(node, include_flows=True)
if node is None or is_scope(node):
return
yield node
def reachability_check(context, value_scope, node, origin_scope=None):
if is_big_annoying_library(context) \
or not context.inference_state.flow_analysis_enabled:
return UNSURE
first_flow_scope = get_parent_scope(node, include_flows=True)
if origin_scope is not None:
origin_flow_scopes = list(_get_flow_scopes(origin_scope))
node_flow_scopes = list(_get_flow_scopes(node))
branch_matches = True
for flow_scope in origin_flow_scopes:
if flow_scope in node_flow_scopes:
node_keyword = get_flow_branch_keyword(flow_scope, node)
origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope)
branch_matches = node_keyword == origin_keyword
if flow_scope.type == 'if_stmt':
if not branch_matches:
return UNREACHABLE
elif flow_scope.type == 'try_stmt':
if not branch_matches and origin_keyword == 'else' \
and node_keyword == 'except':
return UNREACHABLE
if branch_matches:
break
# Direct parents get resolved, we filter scopes that are separate
# branches. This makes sense for autocompletion and static analysis.
# For actual Python it doesn't matter, because we're talking about
# potentially unreachable code.
# e.g. `if 0:` would cause all name lookup within the flow make
# unaccessible. This is not a "problem" in Python, because the code is
# never called. In Jedi though, we still want to infer types.
while origin_scope is not None:
if first_flow_scope == origin_scope and branch_matches:
return REACHABLE
origin_scope = origin_scope.parent
return _break_check(context, value_scope, first_flow_scope, node)
def _break_check(context, value_scope, flow_scope, node):
reachable = REACHABLE
if flow_scope.type == 'if_stmt':
if flow_scope.is_node_after_else(node):
for check_node in flow_scope.get_test_nodes():
reachable = _check_if(context, check_node)
if reachable in (REACHABLE, UNSURE):
break
reachable = reachable.invert()
else:
flow_node = flow_scope.get_corresponding_test_node(node)
if flow_node is not None:
reachable = _check_if(context, flow_node)
elif flow_scope.type in ('try_stmt', 'while_stmt'):
return UNSURE
# Only reachable branches need to be examined further.
if reachable in (UNREACHABLE, UNSURE):
return reachable
if value_scope != flow_scope and value_scope != flow_scope.parent:
flow_scope = get_parent_scope(flow_scope, include_flows=True)
return reachable & _break_check(context, value_scope, flow_scope, node)
else:
return reachable
def _check_if(context, node):
with execution_allowed(context.inference_state, node) as allowed:
if not allowed:
return UNSURE
types = context.infer_node(node)
values = set(x.py__bool__() for x in types)
if len(values) == 1:
return Status.lookup_table[values.pop()]
else:
return UNSURE
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
9a79fa81dfbdd295757923dad43e8a3a4edf1a0c | 022104aa2456429356bdd26c701a2949381a83cf | /build/campero/campero_common/campero_pad/catkin_generated/pkg.develspace.context.pc.py | f99ca17b449a9e448c76c1b55c6efcc0f936148a | [] | no_license | nachocz/campero_ws | 204f313d5fbdb81d1f7cc568341a1170ddd2b4cf | f2b09f96165166c0e867e3f5f3dcd092dbac1c1b | refs/heads/master | 2023-02-02T03:25:56.603172 | 2020-12-11T11:28:42 | 2020-12-11T11:28:42 | 320,539,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pc-campero2/campero_ws/devel/include".split(';') if "/home/pc-campero2/campero_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "robotnik_msgs;roscpp;sensor_msgs;geometry_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "campero_pad"
PROJECT_SPACE_DIR = "/home/pc-campero2/campero_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"nachocz@gmail.com"
] | nachocz@gmail.com |
5f4b5bcbabc535fe3049ed7f7871e3f932a445bf | a54f78f026c937b5a8a31180024496748169db91 | /nibabel/cmdline/convert.py | c0bc8f212eb0bea113f9dc2e612a04df9b450b1b | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"PDDL-1.0"
] | permissive | nipy/nibabel | 7017e29ee9e3e93d1085d9032c32f6d922b0e43d | 8fea2a8e50aaf4d8b0d4bfff7a21b132914120ee | refs/heads/master | 2023-08-22T07:12:46.167323 | 2023-08-06T23:46:30 | 2023-08-06T23:46:30 | 791,352 | 544 | 239 | NOASSERTION | 2023-09-08T19:10:32 | 2010-07-22T16:28:30 | Python | UTF-8 | Python | false | false | 2,233 | py | #!python
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
Convert neuroimaging file to new parameters
"""
import argparse
import warnings
from pathlib import Path
import nibabel as nib
def _get_parser():
"""Return command-line argument parser."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('infile', help='Neuroimaging volume to convert')
p.add_argument('outfile', help='Name of output file')
p.add_argument(
'--out-dtype', action='store', help='On-disk data type; valid argument to numpy.dtype()'
)
p.add_argument(
'--image-type',
action='store',
help='Name of NiBabel image class to create, e.g. Nifti1Image. '
'If specified, will be used prior to setting dtype. If unspecified, '
'a new image like `infile` will be created and converted to a type '
'matching the extension of `outfile`.',
)
p.add_argument(
'-f',
'--force',
action='store_true',
help='Overwrite output file if it exists, and ignore warnings if possible',
)
p.add_argument('-V', '--version', action='version', version=f'{p.prog} {nib.__version__}')
return p
def main(args=None):
"""Main program function."""
parser = _get_parser()
opts = parser.parse_args(args)
orig = nib.load(opts.infile)
if not opts.force and Path(opts.outfile).exists():
raise FileExistsError(f'Output file exists: {opts.outfile}')
if opts.image_type:
klass = getattr(nib, opts.image_type)
else:
klass = orig.__class__
out_img = klass.from_image(orig)
if opts.out_dtype:
try:
out_img.set_data_dtype(opts.out_dtype)
except Exception as e:
if opts.force:
warnings.warn(f'Ignoring error: {e!r}')
else:
raise
nib.save(out_img, opts.outfile)
| [
"markiewicz@stanford.edu"
] | markiewicz@stanford.edu |
38d18892148e084929ca4f3658094b983819c601 | 2581fbdc72887143376a8f9d8f0da0f1508b9cdf | /Flask/08-Social-Blog-Project/11-Blog-Posts-Views/puppycompanyblog/users/picture_handler.py | ecac9e972d79f099fdad038fbc6ed8fac83e338a | [
"Apache-2.0"
] | permissive | Sandy1811/python-for-all | 6e8a554a336b6244af127c7bcd51d36018b047d9 | fdb6878d93502773ba8da809c2de1b33c96fb9a0 | refs/heads/master | 2022-05-16T02:36:47.676560 | 2019-08-16T08:35:42 | 2019-08-16T08:35:42 | 198,479,841 | 1 | 0 | Apache-2.0 | 2022-03-11T23:56:32 | 2019-07-23T17:39:38 | Jupyter Notebook | UTF-8 | Python | false | false | 611 | py | import os
# pip install pillow
from PIL import Image
from flask import url_for, current_app
def add_profile_pic(pic_upload,username):
filename = pic_upload.filename
# Grab extension type .jpg or .png
ext_type = filename.split('.')[-1]
storage_filename = str(username) + '.' +ext_type
filepath = os.path.join(current_app.root_path, 'static\profile_pics', storage_filename)
# Play Around with this size.
output_size = (200, 200)
# Open the picture and save it
pic = Image.open(pic_upload)
pic.thumbnail(output_size)
pic.save(filepath)
return storage_filename
| [
"sndp1811@gmail.com"
] | sndp1811@gmail.com |
04f8120a4657332ab632b812bfbf975295baebcb | a867b1c9da10a93136550c767c45e0d8c98f5675 | /AZ_GFG_DP_EditDistance.py | 4ed89d71dbcfd5e1a552f7b0b1dc754bda561f19 | [] | no_license | Omkar02/FAANG | f747aacc938bf747129b8ff35b6648fb265d95b6 | ee9b245aa83ea58aa67954ab96442561dbe68d06 | refs/heads/master | 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='Dynamic-Programing', Difficult='Medium')
cnt = [0]
def editDist(stringOne, stringTwo, n, m, cache):
cnt[0] += 1
current = (n, m)
if current in cache:
return cache[current]
if not n or not m:
return 0
elif stringOne[n - 1] == stringTwo[m - 1]:
cache[current] = editDist(stringOne, stringTwo, n - 1, m - 1, cache)
else:
cache[current] = 1 + min(editDist(stringOne, stringTwo, n, m - 1, cache), # Insert
editDist(stringOne, stringTwo, n - 1, m, cache), # Remove
editDist(stringOne, stringTwo, n - 1, m - 1, cache)) # Replace
return cache[current]
stringOne = "geek"
stringTwo = "gesek"
print(editDist(stringOne, stringTwo, len(stringOne), len(stringTwo), {}))
print(cnt)
| [
"omkarjoshi4031@live.com"
] | omkarjoshi4031@live.com |
46ca3e2a6be53b2c21d526a65d215a4c498382a9 | 15a7af91ff82773a4eb9818f0dd6fafa5bbf9ff6 | /15_examples_os_file/os_write.py | ff2507f2414ee308329d4cc83c4c2b42c4b085e2 | [] | no_license | hooj0/python-examples | 6d13579d2a7e6424bdc7f7be061d72e27ab8cddd | 1fe2960a710dec60ab640370eee0d60c8222523e | refs/heads/master | 2022-07-03T09:31:36.586102 | 2022-06-14T07:59:07 | 2022-06-14T07:59:07 | 109,703,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | #!/usr/bin/env python3
# encoding: utf-8
# @author: hoojo
# @email: hoojo_@126.com
# @github: https://github.com/hooj0
# @create date: 2018-04-01 17:42:04
# @copyright by hoojo@2018
# @changelog Added python3 `os file -> write` example
import os
'''
概述
os.write() 方法用于写入字符串到文件描述符 fd 中. 返回实际写入的字符串长度。
在Unix中有效。
语法
write()方法语法格式如下:
os.write(fd, str)
参数
fd -- 文件描述符。
str -- 写入的字符串。
返回值
该方法返回写入的实际位数
'''
# 打开文件
fd = os.open("/tmp/foo.txt", os.O_RDWR|os.O_CREAT)
# 写入字符串
str = "new content"
ret = os.write(fd, str)
# 输入返回值
print("写入的位数为: ")
print(ret)
print("写入成功")
# 关闭文件
os.close(fd)
print("关闭文件成功!!") | [
"hoojo@qq.com"
] | hoojo@qq.com |
262a8e23089bd6a0dfaee5b541632f4457a6a6ce | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeBackupMachineStatusRequest.py | 31df152996c75da13dd980f4fd00cf333d1f23e5 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 1,829 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeBackupMachineStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeBackupMachineStatus')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PolicyVersion(self): # String
return self.get_query_params().get('PolicyVersion')
def set_PolicyVersion(self, PolicyVersion): # String
self.add_query_param('PolicyVersion', PolicyVersion)
def get_Uuid(self): # String
return self.get_query_params().get('Uuid')
def set_Uuid(self, Uuid): # String
self.add_query_param('Uuid', Uuid)
def get_PolicyId(self): # Long
return self.get_query_params().get('PolicyId')
def set_PolicyId(self, PolicyId): # Long
self.add_query_param('PolicyId', PolicyId)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.